repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
cells
list
types
list
ES-DOC/esdoc-jupyterhub
notebooks/ipsl/cmip6/models/ipsl-cm6a-lr/ocnbgchem.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Ocnbgchem\nMIP Era: CMIP6\nInstitute: IPSL\nSource ID: IPSL-CM6A-LR\nTopic: Ocnbgchem\nSub-Topics: Tracers. \nProperties: 65 (37 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-20 15:02:45\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'ipsl', 'ipsl-cm6a-lr', 'ocnbgchem')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport\n3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks\n4. Key Properties --> Transport Scheme\n5. Key Properties --> Boundary Forcing\n6. Key Properties --> Gas Exchange\n7. Key Properties --> Carbon Chemistry\n8. Tracers\n9. Tracers --> Ecosystem\n10. Tracers --> Ecosystem --> Phytoplankton\n11. Tracers --> Ecosystem --> Zooplankton\n12. Tracers --> Disolved Organic Matter\n13. Tracers --> Particules\n14. Tracers --> Dic Alkalinity \n1. Key Properties\nOcean Biogeochemistry key properties\n1.1. Model Overview\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nOverview of ocean biogeochemistry model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nName of ocean biogeochemistry model code (PISCES 2.0,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Model Type\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nType of ocean biogeochemistry model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.model_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Geochemical\" \n# \"NPZD\" \n# \"PFT\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Elemental Stoichiometry\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nDescribe elemental stoichiometry (fixed, variable, mix of the two)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Fixed\" \n# \"Variable\" \n# \"Mix of both\" \n# TODO - please enter value(s)\n", "1.5. Elemental Stoichiometry Details\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nDescribe which elements have fixed/variable stoichiometry", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.6. Prognostic Variables\nIs Required: TRUE    Type: STRING    Cardinality: 1.N\nList of all prognostic tracer variables in the ocean biogeochemistry component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.7. Diagnostic Variables\nIs Required: TRUE    Type: STRING    Cardinality: 1.N\nList of all diagnotic tracer variables in the ocean biogeochemistry component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.8. Damping\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nDescribe any tracer damping used (such as artificial correction or relaxation to climatology,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.damping') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport\nTime stepping method for passive tracers transport in ocean biogeochemistry\n2.1. Method\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nTime stepping framework for passive tracers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"use ocean model transport time step\" \n# \"use specific time step\" \n# TODO - please enter value(s)\n", "2.2. Timestep If Not From Ocean\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nTime step for passive tracers (if different from ocean)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks\nTime stepping framework for biology sources and sinks in ocean biogeochemistry\n3.1. Method\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nTime stepping framework for biology sources and sinks", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"use ocean model transport time step\" \n# \"use specific time step\" \n# TODO - please enter value(s)\n", "3.2. Timestep If Not From Ocean\nIs Required: FALSE    Type: INTEGER    Cardinality: 0.1\nTime step for biology sources and sinks (if different from ocean)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4. Key Properties --> Transport Scheme\nTransport scheme in ocean biogeochemistry\n4.1. Type\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nType of transport scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Offline\" \n# \"Online\" \n# TODO - please enter value(s)\n", "4.2. Scheme\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nTransport scheme used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Use that of ocean model\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "4.3. Use Different Scheme\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nDecribe transport scheme if different than that of ocean model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5. Key Properties --> Boundary Forcing\nProperties of biogeochemistry boundary forcing\n5.1. Atmospheric Deposition\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nDescribe how atmospheric deposition is modeled", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"from file (climatology)\" \n# \"from file (interannual variations)\" \n# \"from Atmospheric Chemistry model\" \n# TODO - please enter value(s)\n", "5.2. River Input\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nDescribe how river input is modeled", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"from file (climatology)\" \n# \"from file (interannual variations)\" \n# \"from Land Surface model\" \n# TODO - please enter value(s)\n", "5.3. Sediments From Boundary Conditions\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nList which sediments are speficied from boundary condition", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.4. Sediments From Explicit Model\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nList which sediments are speficied from explicit sediment model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Key Properties --> Gas Exchange\n*Properties of gas exchange in ocean biogeochemistry *\n6.1. CO2 Exchange Present\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs CO2 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.2. CO2 Exchange Type\nIs Required: FALSE    Type: ENUM    Cardinality: 0.1\nDescribe CO2 gas exchange", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OMIP protocol\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.3. O2 Exchange Present\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs O2 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.4. O2 Exchange Type\nIs Required: FALSE    Type: ENUM    Cardinality: 0.1\nDescribe O2 gas exchange", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OMIP protocol\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.5. DMS Exchange Present\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs DMS gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.6. DMS Exchange Type\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nSpecify DMS gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.7. N2 Exchange Present\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs N2 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.8. N2 Exchange Type\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nSpecify N2 gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.9. N2O Exchange Present\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs N2O gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.10. N2O Exchange Type\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nSpecify N2O gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.11. CFC11 Exchange Present\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs CFC11 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.12. CFC11 Exchange Type\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nSpecify CFC11 gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.13. CFC12 Exchange Present\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs CFC12 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.14. CFC12 Exchange Type\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nSpecify CFC12 gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.15. SF6 Exchange Present\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs SF6 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.16. SF6 Exchange Type\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nSpecify SF6 gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.17. 13CO2 Exchange Present\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs 13CO2 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.18. 13CO2 Exchange Type\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nSpecify 13CO2 gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.19. 14CO2 Exchange Present\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs 14CO2 gas exchange modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.20. 14CO2 Exchange Type\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nSpecify 14CO2 gas exchange scheme type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.21. Other Gases\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nSpecify any other gas exchange", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Key Properties --> Carbon Chemistry\nProperties of carbon chemistry biogeochemistry\n7.1. Type\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nDescribe how carbon chemistry is modeled", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OMIP protocol\" \n# \"Other protocol\" \n# TODO - please enter value(s)\n", "7.2. PH Scale\nIs Required: FALSE    Type: ENUM    Cardinality: 0.1\nIf NOT OMIP protocol, describe pH scale.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sea water\" \n# \"Free\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "7.3. Constants If Not OMIP\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nIf NOT OMIP protocol, list carbon chemistry constants.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Tracers\nOcean biogeochemistry tracers\n8.1. Overview\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nOverview of tracers in ocean biogeochemistry", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Sulfur Cycle Present\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs sulfur cycle modeled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "8.3. Nutrients Present\nIs Required: TRUE    Type: ENUM    Cardinality: 1.N\nList nutrient species present in ocean biogeochemistry model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Nitrogen (N)\" \n# \"Phosphorous (P)\" \n# \"Silicium (S)\" \n# \"Iron (Fe)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.4. Nitrous Species If N\nIs Required: FALSE    Type: ENUM    Cardinality: 0.N\nIf nitrogen present, list nitrous species.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Nitrates (NO3)\" \n# \"Amonium (NH4)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.5. Nitrous Processes If N\nIs Required: FALSE    Type: ENUM    Cardinality: 0.N\nIf nitrogen present, list nitrous processes.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Dentrification\" \n# \"N fixation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9. Tracers --> Ecosystem\nEcosystem properties in ocean biogeochemistry\n9.1. Upper Trophic Levels Definition\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nDefinition of upper trophic level (e.g. based on size) ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.2. Upper Trophic Levels Treatment\nIs Required: TRUE    Type: STRING    Cardinality: 1.1\nDefine how upper trophic level are treated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Tracers --> Ecosystem --> Phytoplankton\nPhytoplankton properties in ocean biogeochemistry\n10.1. Type\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nType of phytoplankton", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Generic\" \n# \"PFT including size based (specify both below)\" \n# \"Size based only (specify below)\" \n# \"PFT only (specify below)\" \n# TODO - please enter value(s)\n", "10.2. Pft\nIs Required: FALSE    Type: ENUM    Cardinality: 0.N\nPhytoplankton functional types (PFT) (if applicable)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Diatoms\" \n# \"Nfixers\" \n# \"Calcifiers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.3. Size Classes\nIs Required: FALSE    Type: ENUM    Cardinality: 0.N\nPhytoplankton size classes (if applicable)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Microphytoplankton\" \n# \"Nanophytoplankton\" \n# \"Picophytoplankton\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11. Tracers --> Ecosystem --> Zooplankton\nZooplankton properties in ocean biogeochemistry\n11.1. Type\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nType of zooplankton", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Generic\" \n# \"Size based (specify below)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.2. Size Classes\nIs Required: FALSE    Type: ENUM    Cardinality: 0.N\nZooplankton size classes (if applicable)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Microzooplankton\" \n# \"Mesozooplankton\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12. Tracers --> Disolved Organic Matter\nDisolved organic matter properties in ocean biogeochemistry\n12.1. Bacteria Present\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs there bacteria representation ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Lability\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nDescribe treatment of lability in dissolved organic matter", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Labile\" \n# \"Semi-labile\" \n# \"Refractory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13. Tracers --> Particules\nParticulate carbon properties in ocean biogeochemistry\n13.1. Method\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nHow is particulate carbon represented in ocean biogeochemistry?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Diagnostic\" \n# \"Diagnostic (Martin profile)\" \n# \"Diagnostic (Balast)\" \n# \"Prognostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Types If Prognostic\nIs Required: FALSE    Type: ENUM    Cardinality: 0.N\nIf prognostic, type(s) of particulate matter taken into account", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"POC\" \n# \"PIC (calcite)\" \n# \"PIC (aragonite\" \n# \"BSi\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.3. Size If Prognostic\nIs Required: FALSE    Type: ENUM    Cardinality: 0.1\nIf prognostic, describe if a particule size spectrum is used to represent distribution of particules in water volume", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"No size spectrum used\" \n# \"Full size spectrum\" \n# \"Discrete size classes (specify which below)\" \n# TODO - please enter value(s)\n", "13.4. Size If Discrete\nIs Required: FALSE    Type: STRING    Cardinality: 0.1\nIf prognostic and discrete size, describe which size classes are used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13.5. Sinking Speed If Prognostic\nIs Required: FALSE    Type: ENUM    Cardinality: 0.1\nIf prognostic, method for calculation of sinking speed of particules", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Function of particule size\" \n# \"Function of particule type (balast)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Tracers --> Dic Alkalinity\nDIC and alkalinity properties in ocean biogeochemistry\n14.1. Carbon Isotopes\nIs Required: TRUE    Type: ENUM    Cardinality: 1.N\nWhich carbon isotopes are modelled (C13, C14)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"C13\" \n# \"C14)\" \n# TODO - please enter value(s)\n", "14.2. Abiotic Carbon\nIs Required: TRUE    Type: BOOLEAN    Cardinality: 1.1\nIs abiotic carbon modelled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "14.3. Alkalinity\nIs Required: TRUE    Type: ENUM    Cardinality: 1.1\nHow is alkalinity modelled ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Prognostic\" \n# \"Diagnostic)\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
iurilarosa/thesis
codici/Archiviati/numpy/.ipynb_checkpoints/Prove numpy-checkpoint.ipynb
gpl-3.0
[ "import numpy\nfrom scipy import sparse", "Prove manipolazioni array", "unimatr = numpy.ones((10,10))\n#unimatr\nduimatr = unimatr*2\n#duimatr\n\nuniarray = numpy.ones((10,1))\n#uniarray\n\ntriarray = uniarray*3\n\nscalarray = numpy.arange(10)\nscalarray = scalarray.reshape(10,1)\n\n#NB fare il reshape da orizzontale a verticale è come se aggiungesse\n#una dimensione all'array facendolo diventare un ndarray\n#(prima era un array semplice, poi diventa un array (x,1), quindi puoi fare trasposto)\n#NB NUMPY NON FA TRASPOSTO DI ARRAY SEMPLICE!\n#scalarray\nscalarray.T\n\nramatricia = numpy.random.randint(2, size=36).reshape((6,6))\nramatricia2 = numpy.random.randint(2, size=36).reshape((6,6))\n\n#WARNING questa operazione moltiplica elemento per elemento\n#se l'oggetto è di dimensione inferiore moltiplica ogni riga/colonna\n# o matrice verticale/orizzontale a seconda della forma dell'oggetto\n\nduimatr*scalarray\n#duimatr*scalarray.T\n#duimatr*duimatr\nramatricia*ramatricia2\n\n#numpy dot invece fa prodotto matriciale righe per colonne\n\nnumpy.dot(duimatr,scalarray)\n#numpy.dot(duimatr,duimatr)\nnumpy.dot(ramatricia2,ramatricia)\n\nunimatricia = numpy.ones((3,3))\nrangematricia = numpy.arange(9).reshape((3,3))\nnumpy.dot(rangematricia, rangematricia)\n\nduimatr + scalarray", "Prove creazione matrice 3D con prodotti esterni", "scalarray = numpy.arange(4)\nuniarray = numpy.ones(4)\n\nmatricia = numpy.outer(scalarray, uniarray)\nmatricia\n\ntensorio = numpy.outer(matricia,scalarray).reshape(4,4,4)\ntensorio\n# metodo di creazione array nd (numpy.ndarray)", "Prove manipolazione matrici 3D numpy", "tensorio = numpy.ones(1000).reshape(10,10,10)\ntensorio\n# metodo di creazione array nd (numpy.ndarray)\n#altro metodo è con comando diretto\n#tensorio = numpy.ndarray((3,3,3), dtype = int, buffer=numpy.arange(30))\n#potrebbe essere utile con la matrice sparsa della peakmap, anche se difficilmente è maneggiabile come matrice densa\n#oppure\n\n# HO FINALMENTE SCOPERTO COME SI METTE IL DTYPE COME SI DEVE!! con \"numpy.float32\"!\n#tensorio = numpy.zeros((3,3,3), dtype = numpy.float32)\n#tensorio.dtype\n#tensorio\n\n\nscalarray = numpy.arange(10)\nuniarray = numpy.ones(10)\nscalamatricia = numpy.outer(scalarray,scalarray)\n#scalamatricia\n\n\ntensorio * 2\ntensorio + 2\ntensorio + scalamatricia\n%time tensorio + scalarray\n%time tensorio.__add__(scalarray)\n#danno stesso risultato con tempi paragonabili\n", "Prove matrici sparse", "from scipy import sparse\n\n\nramatricia = numpy.random.randint(2, size=25).reshape((5,5))\nramatricia\n\n#efficiente per colonne\n#sparsamatricia = sparse.csc_matrix(ramatricia)\n#print(sparsamatricia)\n\n#per righe\nsparsamatricia = sparse.csr_matrix(ramatricia)\nprint(sparsamatricia)\n\nsparsamatricia.toarray()\n\nrighe = numpy.array([0,0,0,1,2,3,3,4])\ncolonne = numpy.array([0,0,4,2,1,4,3,0])\nvalori = numpy.ones(righe.size)\nsparsamatricia = sparse.coo_matrix((valori, (righe,colonne)))\n\nprint(sparsamatricia)\n\nsparsamatricia.toarray()", "Prodotto di matrici\nProdotti interni\nConsidera di avere 2 matrici, a e b, in forma numpy array:\n\na*b fa il prodotto elemento per elemento (solo se a e b hanno stessa dimensione)\nnumpy.dot(a,b) fa il prodotto matriciale righe per colonne\n\nOra considera di avere 2 matrici, a e b, in forma di scipy.sparse:\n\na*b fa il prodotto matriciale righe per colonne\nnumpy.dot(a,b) non funziona per nulla\na.dot(b) fa il prodotto matriciale righe per colonne", "#vari modi per fare prodotti di matrici (con somma con operatore + è lo stesso)\ndensamatricia = sparsamatricia.toarray()\n\n#densa-densa\nprodottoPerElementiDD = densamatricia*densamatricia\nprodottoMatricialeDD = numpy.dot(densamatricia, densamatricia)\n\n#sparsa-densa\nprodottoMatricialeSD = sparsamatricia*densamatricia\nprodottoMatricialeSD2 = sparsamatricia.dot(densamatricia)\n\n#sparsa-sparsa\nprodottoMatricialeSS = sparsamatricia*sparsamatricia\nprodottoMatricialeSS2 = sparsamatricia.dot(sparsamatricia)\n\n# \"SPARSA\".dot(\"SPARSA O DENSA\") FA PRODOTTO MATRICIALE\n# \"SPARSA * SPARSA\" FA PRODOTTO MATRICIALE\n\n\nprodottoMatricialeDD - prodottoMatricialeSS\n#nb somme e sottrazioni tra matrici sparse e dense sono ok\n# prodotto matriciale tra densa e sparsa funziona come sparsa e sparsa", "Prodotti esterni", "densarray = numpy.array([\"a\",\"b\"],dtype = object)\ndensarray2 = numpy.array([\"c\",\"d\"],dtype = object)\n\nnumpy.outer(densarray,[1,2])\n\ndensamatricia = numpy.array([[1,2],[3,4]])\ndensamatricia2 = numpy.array([[\"a\",\"b\"],[\"c\",\"d\"]], dtype = object)\nnumpy.outer(densamatricia2,densamatricia).reshape(4,2,2)\n\ndensarray1 = numpy.array([0,2])\ndensarray2 = numpy.array([5,0])\ndensamatricia = numpy.array([[1,2],[3,4]])\ndensamatricia2 = numpy.array([[0,2],[5,0]])\n\nnrighe = 2\nncolonne = 2\nnpiani = 4\nprodottoEstDD = numpy.outer(densamatricia,densamatricia2).reshape(npiani,ncolonne,nrighe)\n#prodottoEstDD\n#prodottoEstDD = numpy.dstack((prodottoEstDD[0,:],prodottoEstDD[1,:]))\n\nprodottoEstDD\n\n\nsparsarray1 = sparse.csr_matrix(densarray1)\nsparsarray2 = sparse.csr_matrix(densarray2)\nsparsamatricia = sparse.csr_matrix(densamatricia)\nsparsamatricia2 = sparse.csr_matrix(densamatricia2)\n\nprodottoEstSS = sparse.kron(sparsamatricia,sparsamatricia2).toarray()\n\nprodottoEstSD = sparse.kron(sparsamatricia,densamatricia2).toarray()\nprodottoEstSD\n\n\n\n\n#prove prodotti esterni\n# numpy.outer\n# scipy.sparse.kron\n\n#densa-densa\nprodottoEsternoDD = numpy.outer(densamatricia,densamatricia)\n\n#sparsa-densa\nprodottoEsternoSD = sparse.kron(sparsamatricia,densamatricia)\n\n#sparsa-sparsa\nprodottoEsternoSS = sparse.kron(sparsamatricia,sparsamatricia)\n\n\nprodottoEsternoDD-prodottoEsternoSS\n\n# altre prove di prodotti esterni\nrarray1 = numpy.random.randint(2, size=4)\nrarray2 = numpy.random.randint(2, size=4)\nprint(rarray1,rarray2)\nramatricia = numpy.outer(rarray1,rarray2)\nunimatricia = numpy.ones((4,4)).astype(int)\n#ramatricia2 = rarray1 * rarray2.T\nprint(ramatricia,unimatricia)\n#print(ramatricia)\n#print(\"eppoi\")\n#print(ramatricia2)\n\n#sparsarray = sparse.csr_matrix(rarray1)\n#print(sparsarray)\n\n#ramatricia2 = \n\n#il mio caso problematico è che ho una matrice di cui so tutti gli elementi non zero,\n#so quante righe ho (i tempi), ma non so quante colonne di freq ho\nrandomcolonne = numpy.random.randint(10)+1\nramatricia = numpy.random.randint(2, size=10*randomcolonne).reshape((10,randomcolonne))\nprint(ramatricia.shape)\n#ramatricia\nnonzeri = numpy.nonzero(ramatricia)\nndati = len(nonzeri[0])\nndati\nramatricia\n\n#ora cerco di fare la matrice sparsa\nprint(ndati)\ndati = numpy.ones(2*ndati).reshape(ndati,2)\ndati\ncoordinateRighe = nonzeri[0]\ncoordinateColonne = nonzeri[1]\nsparsamatricia = sparse.coo_matrix((dati,(coordinateRighe,coordinateColonne)))\ndensamatricia = sparsamatricia.toarray()\ndensamatricia", "Provo a passare operazioni a array con array di coordinate", "matrice = numpy.arange(30).reshape(10,3)\nmatrice\n\nrighe = numpy.array([1,0,1,1])\ncolonne = numpy.array([2,0,2,2])\npesi = numpy.array([100,200,300,10])\nprint(righe,colonne)\n\nmatrice[righe,colonne]\n\n\nmatrice[righe,colonne] = (matrice[righe,colonne] + numpy.array([100,200,300,10]))\nmatrice\n\n%matplotlib inline\na = pyplot.imshow(matrice)\n\nnumpy.add.at(matrice, [righe,colonne],pesi)\nmatrice\n\n%matplotlib inline\na = pyplot.imshow(matrice)\n\nmatr", "Prove plots", "from matplotlib import pyplot\n%matplotlib inline\n\n\n##AL MOMENTO INUTILE, NON COMPILARE\nx = numpy.random.randint(10,size = 10)\ny = numpy.random.randint(10,size = 10)\npyplot.scatter(x,y, s = 5)\n#nb imshow si può fare solo con un 2d array\n\n#visualizzazione di una matrice, solo matrici dense a quanto pare\na = pyplot.imshow(densamatricia)\n#a = pyplot.imshow(sparsamatricia)\n#c = pyplot.matshow(densamatricia)\n\n\n#spy invece funziona anche per le sparse!\npyplot.spy(sparsamatricia,precision=0.01, marker = \".\", markersize=10)\n\n#in alternativa, scatterplot delle coordinate dal dataframe\nb = pyplot.scatter(coordinateColonne,coordinateRighe, s = 2)\n\nimport seaborn\n%matplotlib inline\n\n\nsbRegplot = seaborn.regplot(x=coordinateRighe, y=coordinateColonne, color=\"g\", fit_reg=False)\n\nimport pandas\n\ncoordinateRighe = coordinateRighe.reshape(len(coordinateRighe),1)\ncoordinateColonne = coordinateColonne.reshape(len(coordinateColonne),1)\n#print([coordinateRighe,coordinateColonne])\ncoordinate = numpy.concatenate((coordinateRighe,coordinateColonne),axis = 1)\ncoordinate\n\n\ntabella = pandas.DataFrame(coordinate)\ntabella.columns = [\"righe\", \"colonne\"]\n\n\nsbPlmplot = seaborn.lmplot(x = \"righe\", y = \"colonne\", data = tabella, fit_reg=False)\n\n", "Un esempio semplice del mio problema", "import numpy\nfrom scipy import sparse\nimport multiprocessing\nfrom matplotlib import pyplot\n\n#first i build a matrix of some x positions vs time datas in a sparse format\nmatrix = numpy.random.randint(2, size = 100).astype(float).reshape(10,10)\nx = numpy.nonzero(matrix)[0]\ntimes = numpy.nonzero(matrix)[1]\nweights = numpy.random.rand(x.size)\n\n\n\nimport scipy.io\n\nmint = numpy.amin(times)\nmaxt = numpy.amax(times)\n\nscipy.io.savemat('debugExamples/numpy.mat',{\n 'matrix':matrix, \n 'x':x, \n 'times':times, \n 'weights':weights,\n 'mint':mint,\n 'maxt':maxt,\n \n})\n\ntimes\n\n#then i define an array of y positions\nnStepsY = 5\ny = numpy.arange(1,nStepsY+1)\n\n# provo a iterare\n# VERSIONE CON HACK CON SPARSE verificato viene uguale a tutti gli altri metodi più semplici che ho provato\n# ma ha problemi con parallelizzazione\n\nnRows = nStepsY\nnColumns = 80\ny = numpy.arange(1,nStepsY+1)\nimage = numpy.zeros((nRows, nColumns))\ndef itermatrix(ithStep):\n yTimed = y[ithStep]*times\n positions = (numpy.round(x-yTimed)+50).astype(int)\n\n fakeRow = numpy.zeros(positions.size)\n matrix = sparse.coo_matrix((weights, (fakeRow, positions))).todense()\n matrix = numpy.ravel(matrix)\n missColumns = (nColumns-matrix.size)\n zeros = numpy.zeros(missColumns)\n matrix = numpy.concatenate((matrix, zeros))\n return matrix\n\n#for i in numpy.arange(nStepsY):\n# image[i] = itermatrix(i)\n\n#or\nimageSparsed = list(map(itermatrix, range(nStepsY)))\nimageSparsed = numpy.array(imageSparsed)\nscipy.io.savemat('debugExamples/numpyResult.mat', {'imageSparsed':imageSparsed}) \na = pyplot.imshow(imageSparsed, aspect = 10)\npyplot.show()\n\nimport numpy\nfrom scipy import sparse\nimport multiprocessing\nfrom matplotlib import pyplot\n\n#first i build a matrix of some x positions vs time datas in a sparse format\nmatrix = numpy.random.randint(2, size = 100).astype(float).reshape(10,10)\ntimes = numpy.nonzero(matrix)[0]\nfreqs = numpy.nonzero(matrix)[1]\nweights = numpy.random.rand(times.size)\n\n#then i define an array of y positions\nnStepsSpindowns = 5\nspindowns = numpy.arange(1,nStepsSpindowns+1)\n\n\n#PROVA CON BINCOUNT\n\ndef mapIt(ithStep):\n ncolumns = 80\n image = numpy.zeros(ncolumns)\n\n sdTimed = spindowns[ithStep]*times\n positions = (numpy.round(freqs-sdTimed)+50).astype(int)\n\n values = numpy.bincount(positions,weights)\n values = values[numpy.nonzero(values)]\n positions = numpy.unique(positions)\n image[positions] = values\n return image\n\n\n%time imageMapped = list(map(mapIt, range(nStepsSpindowns)))\nimageMapped = numpy.array(imageMapped)\n\n%matplotlib inline\na = pyplot.imshow(imageMapped, aspect = 10)\n\n# qui provo fully vectorial\ndef fullmatrix(nRows, nColumns):\n spindowns = numpy.arange(1,nStepsSpindowns+1)\n image = numpy.zeros((nRows, nColumns))\n\n sdTimed = numpy.outer(spindowns,times)\n freqs3d = numpy.outer(numpy.ones(nStepsSpindowns),freqs)\n weights3d = numpy.outer(numpy.ones(nStepsSpindowns),weights)\n spindowns3d = numpy.outer(spindowns,numpy.ones(times.size))\n positions = (numpy.round(freqs3d-sdTimed)+50).astype(int)\n\n matrix = sparse.coo_matrix((numpy.ravel(weights3d), (numpy.ravel(spindowns3d), numpy.ravel(positions)))).todense()\n return matrix\n\n%time image = fullmatrix(nStepsSpindowns, 80)\na = pyplot.imshow(image, aspect = 10)\npyplot.show()", "Confronti Debug!", "#confronto con codice ORIGINALE in matlab\nimmagineOrig = scipy.io.loadmat('debugExamples/dbOrigResult.mat')['binh_df0']\na = pyplot.imshow(immagineOrig[:,0:80], aspect = 10)\npyplot.show()\n\n#PROVA CON BINCOUNT\n\ndef mapIt(ithStep):\n ncolumns = 80\n image = numpy.zeros(ncolumns)\n\n yTimed = y[ithStep]*times\n positions = (numpy.round(x-yTimed)+50).astype(int)\n\n values = numpy.bincount(positions,weights)\n where = tf.not_equal(values, zero)\n values = values[numpy.nonzero(values)]\n positions = numpy.unique(positions)\n image[positions] = values\n return image\n\n\n%time imageMapped = list(map(mapIt, range(nStepsY)))\nimageMapped = numpy.array(imageMapped)\n\n%matplotlib inline\na = pyplot.imshow(imageMapped, aspect = 10)\n\n# qui provo con vettorializzazione di numpy (apply along axis)\nnrows = nStepsY\nncolumns = 80\nmatrix = numpy.zeros(nrows*ncolumns).reshape(nrows,ncolumns)\n\ndef applyIt(image):\n ithStep = 1\n image = numpy.zeros(ncolumns)\n\n yTimed = y[ithStep]*times\n positions = (numpy.round(x-yTimed)+50).astype(int)\n #print(positions)\n values = numpy.bincount(positions,weights)\n values = values[numpy.nonzero(values)]\n positions = numpy.unique(positions)\n image[positions] = values\n \n return image\n\n\nimageApplied = numpy.apply_along_axis(applyIt,1,matrix)\na = pyplot.imshow(imageApplied, aspect = 10)\n\n# qui provo fully vectorial\ndef fullmatrix(nRows, nColumns):\n y = numpy.arange(1,nStepsY+1)\n image = numpy.zeros((nRows, nColumns))\n\n yTimed = numpy.outer(y,times)\n x3d = numpy.outer(numpy.ones(nStepsY),x)\n weights3d = numpy.outer(numpy.ones(nStepsY),weights)\n y3d = numpy.outer(y,numpy.ones(x.size))\n positions = (numpy.round(x3d-yTimed)+50).astype(int)\n\n matrix = sparse.coo_matrix((numpy.ravel(weights3d), (numpy.ravel(y3d), numpy.ravel(positions)))).todense()\n return matrix\n\n%time image = fullmatrix(nStepsY, 80)\na = pyplot.imshow(image, aspect = 10)\npyplot.show()\n\nimageMapped = list(map(itermatrix, range(nStepsY)))\nimageMapped = numpy.array(imageMapped)\na = pyplot.imshow(imageMapped, aspect = 10)\npyplot.show()\n\n# prova con numpy.put\n\nnStepsY = 5\n\ndef mapIt(ithStep):\n ncolumns = 80\n image = numpy.zeros(ncolumns)\n\n yTimed = y[ithStep]*times\n positions = (numpy.round(x-yTimed)+50).astype(int)\n\n values = numpy.bincount(positions,weights)\n values = values[numpy.nonzero(values)]\n positions = numpy.unique(positions)\n image[positions] = values\n return image\n\n\n%time imagePutted = list(map(mapIt, range(nStepsY)))\nimagePutted = numpy.array(imagePutted)\n\n%matplotlib inline\na = pyplot.imshow(image, aspect = 10)\npyplot.show()", "Documentazione\n\nRoba di array vari di numpy\n\n\nDomanda interessante su creazione matrici (stackoverflow)\nCreazione array ND\nOperatore add equivalente ad a+b per array ND\nData types\nProdotto tensore (da vedere ancora)\nGenerazione array ND random\nGenerazione array 1D random intero (eg binario)\nDà le coordinate di tutti gli elementi nonzero\nConcatenate: unisce due array in un solo array (mette il secondo dopo il primo nello stesso array, poi eventualmenete va reshapato se si vuole fare una matrice da più arrays)\nStack: unisce due array, forse migliore di concatenate, forse li aggiunge facendo una matrice\n\n\nRoba di matrici sparse\n\n\nCreazione sparse (nb vedi esempio finale per mio caso)\nCreazione sparsa random\nForma in cui fa prodotto esterno\n\n\nRoba scatterplot et similia\n\n\nScatterplot (nb attenti alle coordinate)\nPlot di matrici (imshow)\nTutorial per imshow\nSpy FA PLOT DI MATRICI SPARSE!\nPlots con seaborn: regplot) (più semplice, come pyplot vuole solo due array delle coordinate),lmplot (vuole dataframe),pairplot (non mi dovrebbe servire)\nEsempio scatterplot con lmplot (v anche siscomp)", "ramatricia = numpy.random.randint(10, size=120).reshape((5,4,3,2))\nprint(ramatricia[0,0,:,:])\n#print(ramatricia)\n\n\nprint(ramatricia.reshape(20,3,3))" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ProfHoekstra/bluesky
docs/python_demo.ipynb
gpl-3.0
[ "Bluesky interfacing demo\nThis Jupyter Notebook demonstrates the use of Bluesky as a Python package and how it can be used to generate aircraft trajectories.\nNote: Make sure that you have Bluesky installed as a package in your Python environment.\nLet's start by importing the relevant modules.", "import numpy as np\n\nimport bluesky as bs\nfrom bluesky.simulation import ScreenIO\n", "We create a dummy class that acts as the screen of BlueSky. Since we don't want to actually load the screen from BlueSky here, a simple and small class is used instead to avoid errors when something within BlueSky is calling the echo function.", "class ScreenDummy(ScreenIO):\n \"\"\"\n Dummy class for the screen. Inherits from ScreenIO to make sure all the\n necessary methods are there. This class is there to reimplement the echo\n method so that console messages are printed.\n \"\"\"\n def echo(self, text='', flags=0):\n \"\"\"Just print echo messages\"\"\"\n print(\"BlueSky console:\", text)", "The first step to take is to initialise bluesky (here imported as bs) as a disconnected, single simulation.\nNext we replace the screen object with our derived variant so that bluesky console messages are printed.", "# initialize bluesky as non-networked simulation node\nbs.init('sim-detached')\n\n# initialize dummy screen\nbs.scr = ScreenDummy()", "Now that everything is initialized, we can get started and and generate some traffic. Here, we generate 3 aircraft of the type A320.", "# generate some trajectories\nn = 3\n\n# create n aircraft with random positions, altitudes, speeds\nbs.traf.mcre(n, actype=\"A320\")\n\n# alternative: individually initialize each aircraft by passing the initial\n# position, heading, altitude, and speed.\n# bs.traf.cre(acid=acids, actype=actypes, aclat=aclats, aclon=aclons,\n# achdg=achdgs, acalt=acalts, acspd=acspds)", "The traffic in this example are not given any initial conditions such as initial position and velocity. This is not needed when the traffic departs at an airport. You can also pass the initial conditions as additional parameters to the funciton if you want to initialize the traffic in flight.\nNext we want to assign some waypoints to the traffic. In this example we assign the same route to all the flights. This is just for the sake of simplicity and it obviously wouldn't make a lot of sense in a practical application, but it should highlight how waypoints are added.", "# iterate over traffic and add the same waypoints\n# Note that preferably, all simulation commands are initiated through the stack\n# however, if you wish, you can also call the functions directly, such as the\n# mcre command in the above cell.\nfor acid in bs.traf.id:\n # set the origin (not needed if initialized in flight),\n # and add some waypoints, here only the altitude (in m) is passed to the\n # function, but you can additionally pass a speed as well\n # finally turn on VNAV for each flight\n bs.stack.stack(f'ORIG {acid} EGLL;'\n f'ADDWPT {acid} BPK FL60;'\n f'ADDWPT {acid} TOTRI FL107;'\n f'ADDWPT {acid} MATCH FL115;'\n f'ADDWPT {acid} BRAIN FL164;'\n f'VNAV {acid} ON')\n\n # you can also set the way the waypoint should be flown\n # bs.stack.stack(f'ADDWPT {acid} FLYOVER')\n\n # you can also set a destination\n # bs.stack.stack(f'DEST {acid} EHAM')", "Now that all our traffic has a route to fly, it's time to start the simulation.", "# set simulation time step, and enable fast-time running\nbs.stack.stack('DT 1;FF')\n\n# we'll run the simulation for up to 4000 seconds\nt_max = 4000\n\nntraf = bs.traf.ntraf\nn_steps = int(t_max + 1)\nt = np.linspace(0, t_max, n_steps)\n\n# allocate some empty arrays for the results\nres = np.zeros((n_steps, 4, ntraf))\n\n# iteratively simulate the traffic\nfor i in range(n_steps):\n # Perform one step of the simulation\n bs.sim.step()\n\n # save the results from the simulator in the results array,\n # here we keep the latitude, longitude, altitude and TAS\n res[i] = [bs.traf.lat,\n bs.traf.lon,\n bs.traf.alt,\n bs.traf.tas]", "Finally, we do a bit of plotting to visualize the results. Again, the three trajectories are the same since we passed the same route to them, but this, of course, can be easily changed.", "# plot\nimport matplotlib.pyplot as plt\n\nfor idx, acid in enumerate(bs.traf.id):\n fig = plt.figure(figsize=(10, 15))\n ax1 = plt.subplot2grid((4, 1), (0, 0), rowspan=2)\n ax2 = plt.subplot2grid((4, 1), (2, 0))\n ax3 = plt.subplot2grid((4, 1), (3, 0))\n\n ax1.plot(res[:, 1, idx], res[:, 0, idx])\n\n ax2.plot(t, res[:, 2, idx])\n ax2.set_xlabel('t [s]')\n ax2.set_ylabel('alt [m]')\n\n ax3.plot(t, res[:, 3, idx])\n ax3.set_xlabel('t [s]')\n ax3.set_ylabel('TAS [m/s]')\n \n fig.suptitle(f'Trajectory {acid}')\n\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
georgetown-analytics/nba
NBA Player Statistics Workshop.ipynb
mit
[ "NBA Player Statistics Workshop\nGiven a dataset of NBA players performance and salary in 2014, use Python to load the dataset and compute the summary statistics for the SALARY field:\n\nmean\nmedian\nmode\nminimum\nmaximum\n\nYou will need to make use of the csv module to load the data and interact with it. Computations should require only simple arithmetic. (For the purposes of this exercise, attempt to use pure Python and no third party dependencies like Pandas - you can then compare and contrast the use of Pandas for this task later). \nBonus:\nDetermine the relationship of PER (Player Efficiency Rating) to Salary via a visualization of the data.\nNBA 2014 Players Dataset: http://bit.ly/gtnbads", "# Imports - you'll need some of these later, but it's traditional to put them all at the beginning.\n\nimport os\nimport csv\nimport json\nimport urllib2\n\nfrom collections import Counter\nfrom operator import itemgetter", "Fetching the Data\nYou have a couple of options of fetching the data set to begin your analysis:\n\nClick on the link above and Download the file. \nWrite a Python function that automatically downloads the data as a comma-separated value file (CSV) and writes it to disk. \n\nIn either case, you'll have to be cognizant of where the CSV file lands. Here is a quick implementation of a function to download a URL at a file and write it to disk. Note the many approaches to do this as outlined here: How do I download a file over HTTP using Python?.", "def download(url, path):\n \"\"\"\n Downloads a URL and writes it to the specified path. The \"path\" \n is like the mailing address for the file - it tells the function \n where on your computer to send it!\n \n Also note the use of \"with\" to automatically close files - this \n is a good standard practice to follow.\n \"\"\"\n response = urllib2.urlopen(url)\n with open(path, 'w') as f:\n f.write(response.read())\n \n response.close()", "Your turn: use the above function to download the data!", "## Write the Python to execute the function and download the file here:", "Loading the Data\nNow that we have the CSV file that we're looking for, we need to be able to open the file and read it into memory. The trick is that we want to read only a single line at a time - consider really large CSV files. Python provides memory efficient iteration in the form of generators and the csv.reader module exposes one such generator, that reads the data from the CSV one row at a time. Moreover, we also want to parse our data so that we have specific access to the fields we're looking for. The csv.DictReader class will give you each row as a dictionary, where the keys are derived from the first, header line of the file. \nHere is a function that reads data from disk one line at a time and yields it to the user.", "def read_csv(path):\n # First open the file\n with open(path, 'r') as f:\n # Create a DictReader to parse the CSV\n reader = csv.DictReader(f)\n for row in reader:\n # HINT: Convert SALARY column values into integers & PER column into floats.\n # Otherwise CSVs can turn ints into strs! You'll thank me later :D\n row['SALARY'] = int(row['SALARY'])\n row['PER'] = float(row['PER'])\n # Now yield each row one at a time.\n yield row", "Your turn: use the above function to open the file and print out the first row of the CSV!\nTo do this, you'll need to do three things:\nFirst, remember where you told the download function to store your file? Pass that same path into read_csv:", "## Write the Python to execute our read_csv function.", "Next step: The read_csv function \"returns\" a generator. How can we access just the first row? Remember how to access the next row of a generator?", "## Now write the Python to print the first row of the CSV here.", "Are there different ways to print the first n rows of something? Sure! Try using break, which will stop a for loop from running. E.g. the code:\npython\nfor idx in xrange(100):\n if idx > 10:\n break\n...will stop the for loop after 10 iterations.\nNext, write a for loop that can access and print every row.", "## Write the Python to print *every* row of the CSV here.", "Summary Statistics\nIn this section, you'll use the CSV data to write computations for mean, median, mode, minimum, and maximum. Use Python to access the values in the SALARY column.", "data = list(read_csv('fixtures/nba_players.csv')) #Put in your own path here.\ndata = sorted(data, key=itemgetter('SALARY'))\n\ntotal = 0\ncount = 0\n\nfor row in data:\n count += 1\n total += row['SALARY']\n\n# Total Count\nprint \"There are %d total players.\" % count\n\n# Write the Python to get the median\nmedian = \nprint \"The median salary is %d.\" % median\n\n# Write the Python to get the minimum\nminimum = \nprint \"The minimum salary is %d.\" % minimum\n\n# Write the Python to get the maximum\nmaximum = \nprint \"The maximum salary is %d.\" % maximum\n\n# Write the Python to get the mean\nmean = \nprint \"The mean salary is %d.\" % mean", "Nice work! Now... calculating the mode is a bit different. Remember about the Decorate-Sort-Undecorate pattern that we learned about in ThinkPython? That will work here!", "## Write the Python to get the mode of the salaries.", "The \"DSU\" approach is a little inefficient. Instead of using a dictionary as our data type to solve the mode problem, we could use counter() from the Collections module. Read more about counter() and try it out here:", "## Experiment with using counter() here.", "Putting the pieces together\nThe above summary statistics can actually be computed inside of a single (and elegant!) function. Give it a try!", "def statistics(path):\n \"\"\"\n Takes as input a path to `read_csv` and the field to\n compute the summary statistics upon.\n \"\"\"\n \n # Uncomment below to load the CSV into a list\n # data = list(read_csv(path))\n \n # Fill in the function here\n\n\n stats = {\n 'maximum': data[-1]['SALARY'],\n 'minimum': data[0]['SALARY'],\n 'median': data[count / 2]['SALARY'], # Any potential problems here?\n 'mode': freqs.most_common(2),\n 'mean': total / count,\n }\n\n return stats", "Keep playing with the above function to get it to work more efficiently or to reduce bad data in the computation - e.g. what are all those zero salaries? \nVisualization\nCongratulations if you've made it this far! It's time for the bonus round!\nYou've now had some summary statistics about the salaries of NBA players, but what we're really interested in is the relationship between SALARY and the rest of the fields in the data set. The PER - Player Efficiency Rating, is an aggregate score of all performance statistics; therefore if we determine the relationship of PER to SALARY, we might learn a lot about how to model NBA salaries. \nIn order to explore this, let's create a scatter plot of SALARY to PER, where each point is an NBA player.\nVisualization is going to require a third party library. You probably already have matplotlib, so that might be the simplest if you're having trouble with installation. If you don't, pip install it now! Follow the documentation to create the scatter plot inline in the notebook in the following cells.", "# Insert your Python to create the visualization here\nimport os\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n%matplotlib inline # Makes the plot appear inline in your iPython Notebook.\n\ndef read_data(path):\n # Pandas is an efficient way to wrangle the data quickly\n return pd.DataFrame(pd.read_csv(path))\n\ndef graph_data(path, xkey='PER', ykey='SALARY'):\n data = read_data(path)\n ## Fill this in yourself!\n plt.show()\n\ngraph_data('fixtures/nba_players.csv') # Or whatever your path is", "Nice work!! Matplotlib is pretty useful, but also kind of bare bones. Once you're ready to experiment with other libraries and take your visualizations to the next level, check out the following:\n\nSeaborn\nBokeh\nPandas\n\nOur favorite is Bokeh - it's interactive!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
RafaelNH/Reproduce-Hoy-et-al-2014
notebook/run_simulations_2.ipynb
bsd-3-clause
[ "This notebook contains all the code to access the optimal pair of b-values for the fwDTI model (article's Fig. 2).\nA python file version of this notebook can be found in the code folder.\nImporting relevant modules:", "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport time\nimport sys\nimport os \n%matplotlib inline\n\n# Change directory to the code folder\nos.chdir('..//code')\n\n# Functions to sample the diffusion-weighted gradient directions\nfrom dipy.core.sphere import disperse_charges, HemiSphere\n\n# Function to reconstruct the tables with the acquisition information\nfrom dipy.core.gradients import gradient_table\n\n# Functions to perform simulations based on multi-compartment models\nfrom dipy.sims.voxel import multi_tensor\n\n# Import Dipy's procedures to process diffusion tensor\nimport dipy.reconst.dti as dti\n\n# Importing procedures to fit the free water elimination DTI model\nfrom functions import nls_fit_tensor", "Defining the acquisition parameters...", "# Sample the spherical cordinates of 32 random diffusion-weighted\n# directions.\nn_pts = 32\ntheta = np.pi * np.random.rand(n_pts)\nphi = 2 * np.pi * np.random.rand(n_pts)\n\n# Convert direction to cartesian coordinates. For this, Dipy's\n# class object HemiSphere is used. Since diffusion possess central\n# symmetric, this class object also projects the direction to an\n# Hemisphere.\nhsph_initial = HemiSphere(theta=theta, phi=phi)\n\n# By using a electrostatic potential energy algorithm, the directions\n# of the HemiSphere class object are moved util them are evenly\n# distributed in the Hemi-sphere\nhsph_updated, potential = disperse_charges(hsph_initial, 5000)\ndirections = hsph_updated.vertices\n\n# Define a matrix that contains the directions for all b-values\nbvecs = np.vstack((np.zeros((6, 3)), directions, directions))\n\n# Define the minimun and maximum b-values that will be tested\nbmin = np.linspace(200, 800, num=7)\nbmax = np.linspace(300, 1500, num=13)\n\n# The SNR is defined according to Hoy et al, 2014\nSNR = 40\n\n# According to Hoy et al., the nominal SNR (SNR at b-value=0)\n# is adjusted according to the maximum b-value from its\n# associated TE values and assuming white matter T2 values of\n# 70 ms.\nT2 = 70.0\n\n# Since TE values acquired for the original article were\n# not provided, the TE values recorded from a 3T Siemmens\n# Prisma scanner was used instead. These TE values are\n# saved in the following vector:\nTE = np.array([64.0, 65.0, 66.0, 68.0, 69.0, 70.0, 71.0,\n 71.0, 72.0, 73.0, 74.0, 75.0, 75.0])\n\n# Correction factor is set such that the acquisition\n# with maximum b-value=1000 had a scaling factor of 1\nsf = 1. / np.exp(-TE[7]/T2)", "Defining the ground truth values for all simulations repetitions...", "# Similarly to the original article, this analysis is performed\n# for a volume fraction of 0.50\nF = 0.50\n\n# The value of free water diffusion is set to its known value\nDwater = 3e-3\n\n# Similarly to the original article, this analysis is performed\n# for a tissue diffusion tensor with high fractional anisotropy\nFA = 0.712\nL1 = 1.6e-3\nL2 = 0.5e-3\nL3 = 0.3e-3\nTRACE = 2.4e-3\n\n# According to Hoy et al., simulations are repeated for 120 different\n# diffusion tensor directions (and each direction repeated 100 times).\nnDTdirs = 120\nnrep = 100\n\n# These directions are sampled using the same procedure used\n# to evenly sample the diffusion gradient directions\ntheta = np.pi * np.random.rand(nDTdirs)\nphi = 2 * np.pi * np.random.rand(nDTdirs)\nhsph_initial = HemiSphere(theta=theta, phi=phi)\nhsph_updated, potential = disperse_charges(hsph_initial, 5000)\nDTdirs = hsph_updated.vertices\n\n# Prepare parameters for simulations\nmevals = np.array([[L1, L2, L3], [Dwater, Dwater, Dwater]])\nfractions = [100 - F*100, F*100]", "Generating simulations and fwDTI fitting...", "# Initializing a matrix to save all free-water DTI parameters.\n# The first dimension of this matrix corresponds to the simulation\n# repetitions for different minimum b-values tested, maximum\n# b-value tested, diffusion tensor directions. The last dimension\n# corresponds to the number of model parameters.\nfw_params = np.zeros((bmin.size, bmax.size,\n nDTdirs * nrep, 13))\n\n\n# Lopping the simulations for the given maxima b-values\nfor bmax_i in range(bmax.size):\n\n # Adjust SNR according to a given maximum b-value\n SNRa = SNR * sf * np.exp(-TE[bmax_i]/T2)\n\n # Lopping the simulations for the given minima b-values\n for bmin_i in range(bmin.size):\n\n # Perform only simulations that make sense (i.e bmax > bmin )\n if bmax[bmax_i] > bmin[bmin_i]:\n\n # Generate Dipy's acquisition table for the given minimum and\n # maximum b-values\n bvals = np.hstack((np.zeros(6), bmin[bmin_i]*np.ones(n_pts),\n bmax[bmax_i]*np.ones(n_pts)))\n gtab = gradient_table(bvals, bvecs)\n\n # Initialize a matrix that will save the data of all\n # repetitions for the given pair of minimum-maximun\n # b-value\n rep_simulates = np.zeros((nDTdirs * nrep, bvecs.shape[0]))\n\n # Repeat simulations for the 120 diffusion gradient directions\n for di in range(nDTdirs):\n d = DTdirs[di]\n\n # Repeat each direction 100 times\n for s_i in np.arange(di * nrep, (di+1) * nrep):\n\n # Multi-compartmental simulations are done using\n # Dipy's function multi_tensor\n signal, sticks = multi_tensor(gtab, mevals, S0=100,\n angles=[d, (1, 0, 0)],\n fractions=fractions,\n snr=SNRa)\n rep_simulates[s_i, :] = signal\n\n # Process NLS fitting for all simulation repetitions of\n # a given pair of minimum-maximun b-value\n fw_params[bmin_i, bmax_i, :, :] = nls_fit_tensor(gtab,\n rep_simulates)\n\n # Process computing progress\n prog = (bmax_i*1.0)/bmax.size + (bmin_i+1.0)/(bmax.size*bmin.size)\n prog = prog * 100\n time.sleep(1)\n sys.stdout.write(\"\\r%f%%\" % prog)\n sys.stdout.flush()", "Compute FA and f-value statistics and save results", "# select computed diffusion eigenvalues\nevals = fw_params[..., :3]\n\n# Compute the tissue's diffusion tensor fractional anisotropy\n# using function fractional_anisotropy of Dipy's module dti\nfa = dti.fractional_anisotropy(evals)\n\n# select computed water volume fraction\nf = fw_params[..., 12]\n\n# Diffusion tensor trace is equal to the sum of eigenvalues\ntrace = np.sum(evals, axis=3)\n\n# Compute the squared difference of each diffusion measurement\ndf2 = np.square(f - F)\ndfa2 = np.square(fa - FA)\ndtrace2 = np.square(trace - TRACE)\n\n# Compute the mean squared error of each diffusion measuremen\nmsef = np.sum(df2, axis=2) / (nrep*nDTdirs)\nmsefa = np.sum(dfa2, axis=2) / (nrep*nDTdirs)\nmsetrace = np.sum(dtrace2, axis=2) / (nrep*nDTdirs)\n\n# Compute the reciprocal scaled mean squared error\nBmax, Bmin = np.meshgrid(bmax, bmin)\n\nmsefmin = np.min(msef[Bmax > Bmin]) # MSE F minimun\nmsefamin = np.min(msefa[Bmax > Bmin]) # MSE FA minimun\nmsetracemax = np.max(msetrace[Bmax > Bmin]) # MSE Trace minimun\nmsefmax = np.max(msef[Bmax > Bmin]) # MSE F minimun\nmsefamax = np.max(msefa[Bmax > Bmin]) # MSE FA minimun\nmsetracemin = np.min(msetrace[Bmax > Bmin]) # MSE Trace minimun\n\nrmsef = 1 - (msef-msefmin) / (msefmax-msefmin)\nrmsefa = 1 - (msefa-msefamin) / (msefamax-msefamin)\nrmsetrace = 1 - (msetrace-msetracemin) / (msetracemax-msetracemin)\n\n# Figure setting\nfig = plt.figure(figsize=(15, 3.5))\nfig.subplots_adjust(wspace=0.2)\n\n# Define the plot's grid edges\nbmin_edge = np.linspace(150, 850, num=8)\nbmax_edge = np.linspace(250, 1550, num=14)\nBmax_ed, Bmin_ed = np.meshgrid(bmax_edge, bmin_edge)\n\n# Plotting he reciprocal scaled mean squared error for the\n# volume fraction\nplt.subplot(1, 3, 1)\nplt.pcolormesh(Bmax_ed, Bmin_ed, rmsef,\n vmin=0, vmax=1, cmap='Reds')\nplt.axis([250, 1550, 850, 150])\nplt.colorbar()\nplt.xlabel('Maxinum b-value (s/mm2)')\nplt.ylabel('Mininum b-value (s/mm2)')\n\n# Plotting he reciprocal scaled mean squared error for FA\nplt.subplot(1, 3, 2)\nplt.pcolormesh(Bmax_ed, Bmin_ed, rmsefa,\n vmin=0, vmax=1, cmap='Reds')\nplt.axis([250, 1550, 850, 150])\nplt.colorbar()\nplt.xlabel('Maxinum b-value (s/mm2)')\nplt.ylabel('Mininum b-value (s/mm2)')\n\n# Plotting he reciprocal scaled mean squared error for diffusion\n# trace\nplt.subplot(1, 3, 3)\nplt.pcolormesh(Bmax_ed, Bmin_ed, rmsetrace,\n vmin=0, vmax=1, cmap='Reds')\nplt.axis([250, 1550, 850, 150])\nplt.colorbar()\nplt.xlabel('Maxinum b-value (s/mm2)')\nplt.ylabel('Mininum b-value (s/mm2)')\n\nplt.show()\n\nfig.savefig('fwdti_simulations_2.png', bbox_inches='tight')", "Panels above show that our fwDTI fitting procedures are less dependent on the b-values than the original articles. For a better visualization of the optimal pair of b-values, the figures is ploted again with adjusted color-scale range.", "# Figure setting\nfig = plt.figure(figsize=(15, 3.5))\nfig.subplots_adjust(wspace=0.2)\n\n# Define the plot's grid edges\nbmin_edge = np.linspace(150, 850, num=8)\nbmax_edge = np.linspace(250, 1550, num=14)\nBmax_ed, Bmin_ed = np.meshgrid(bmax_edge, bmin_edge)\n\n# Plotting he reciprocal scaled mean squared error for the\n# volume fraction\nplt.subplot(1, 3, 1)\nplt.pcolormesh(Bmax_ed, Bmin_ed, rmsef,\n vmin=0.9, vmax=1, cmap='Reds')\nplt.axis([250, 1550, 850, 150])\nplt.colorbar()\nplt.xlabel('Maxinum b-value (s/mm2)')\nplt.ylabel('Mininum b-value (s/mm2)')\n\n# Plotting he reciprocal scaled mean squared error for FA\nplt.subplot(1, 3, 2)\nplt.pcolormesh(Bmax_ed, Bmin_ed, rmsefa,\n vmin=0.9, vmax=1, cmap='Reds')\nplt.axis([250, 1550, 850, 150])\nplt.colorbar()\nplt.xlabel('Maxinum b-value (s/mm2)')\nplt.ylabel('Mininum b-value (s/mm2)')\n\n# Plotting he reciprocal scaled mean squared error for diffusion\n# trace\nplt.subplot(1, 3, 3)\nplt.pcolormesh(Bmax_ed, Bmin_ed, rmsetrace,\n vmin=0.9, vmax=1, cmap='Reds')\nplt.axis([250, 1550, 850, 150])\nplt.colorbar()\nplt.xlabel('Maxinum b-value (s/mm2)')\nplt.ylabel('Mininum b-value (s/mm2)')\n\nplt.show()\n\nfig.savefig('fwdti_simulations_2_rescaled.png', bbox_inches='tight')", "In analogous to Hoy et al. (2014), the b-value pair $500-1500 s.mm^{-2}$ seems to be optimal for fwDTI fitting." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
watsonyanghx/CS231n
assignment2/BatchNormalization.ipynb
mit
[ "Batch Normalization\nOne way to make deep networks easier to train is to use more sophisticated optimization procedures such as SGD+momentum, RMSProp, or Adam. Another strategy is to change the architecture of the network to make it easier to train. One idea along these lines is batch normalization which was recently proposed by [3].\nThe idea is relatively straightforward. Machine learning methods tend to work better when their input data consists of uncorrelated features with zero mean and unit variance. When training a neural network, we can preprocess the data before feeding it to the network to explicitly decorrelate its features; this will ensure that the first layer of the network sees data that follows a nice distribution. However even if we preprocess the input data, the activations at deeper layers of the network will likely no longer be decorrelated and will no longer have zero mean or unit variance since they are output from earlier layers in the network. Even worse, during the training process the distribution of features at each layer of the network will shift as the weights of each layer are updated.\nThe authors of [3] hypothesize that the shifting distribution of features inside deep neural networks may make training deep networks more difficult. To overcome this problem, [3] proposes to insert batch normalization layers into the network. At training time, a batch normalization layer uses a minibatch of data to estimate the mean and standard deviation of each feature. These estimated means and standard deviations are then used to center and normalize the features of the minibatch. A running average of these means and standard deviations is kept during training, and at test time these running averages are used to center and normalize features.\nIt is possible that this normalization strategy could reduce the representational power of the network, since it may sometimes be optimal for certain layers to have features that are not zero-mean or unit variance. To this end, the batch normalization layer includes learnable shift and scale parameters for each feature dimension.\n[3] Sergey Ioffe and Christian Szegedy, \"Batch Normalization: Accelerating Deep Network Training by Reducing\nInternal Covariate Shift\", ICML 2015.", "# As usual, a bit of setup\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom cs231n.classifiers.fc_net import *\nfrom cs231n.data_utils import get_CIFAR10_data\nfrom cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array\nfrom cs231n.solver import Solver\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# for auto-reloading external modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2\n\ndef rel_error(x, y):\n \"\"\" returns relative error \"\"\"\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))\n\n# Load the (preprocessed) CIFAR10 data.\n\ndata = get_CIFAR10_data()\nfor k, v in data.iteritems():\n print '%s: ' % k, v.shape", "Batch normalization: Forward\nIn the file cs231n/layers.py, implement the batch normalization forward pass in the function batchnorm_forward. Once you have done so, run the following to test your implementation.", "# Check the training-time forward pass by checking means and variances\n# of features both before and after batch normalization\n\n# Simulate the forward pass for a two-layer network\nN, D1, D2, D3 = 200, 50, 60, 3\nX = np.random.randn(N, D1)\nW1 = np.random.randn(D1, D2)\nW2 = np.random.randn(D2, D3)\na = np.maximum(0, X.dot(W1)).dot(W2)\n\nprint 'Before batch normalization:'\nprint ' means: ', a.mean(axis=0)\nprint ' stds: ', a.std(axis=0)\n\n# Means should be close to zero and stds close to one\nprint 'After batch normalization (gamma=1, beta=0)'\na_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})\nprint ' mean: ', a_norm.mean(axis=0)\nprint ' std: ', a_norm.std(axis=0)\n\n# Now means should be close to beta and stds close to gamma\ngamma = np.asarray([1.0, 2.0, 3.0])\nbeta = np.asarray([11.0, 12.0, 13.0])\na_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})\nprint 'After batch normalization (nontrivial gamma, beta)'\nprint ' means: ', a_norm.mean(axis=0)\nprint ' stds: ', a_norm.std(axis=0)\n\n# Check the test-time forward pass by running the training-time\n# forward pass many times to warm up the running averages, and then\n# checking the means and variances of activations after a test-time\n# forward pass.\n\nN, D1, D2, D3 = 200, 50, 60, 3\nW1 = np.random.randn(D1, D2)\nW2 = np.random.randn(D2, D3)\n\nbn_param = {'mode': 'train'}\ngamma = np.ones(D3)\nbeta = np.zeros(D3)\nfor t in xrange(50):\n X = np.random.randn(N, D1)\n a = np.maximum(0, X.dot(W1)).dot(W2)\n batchnorm_forward(a, gamma, beta, bn_param)\nbn_param['mode'] = 'test'\nX = np.random.randn(N, D1)\na = np.maximum(0, X.dot(W1)).dot(W2)\na_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)\n\n# Means should be close to zero and stds close to one, but will be\n# noisier than training-time forward passes.\nprint 'After batch normalization (test-time):'\nprint ' means: ', a_norm.mean(axis=0)\nprint ' stds: ', a_norm.std(axis=0)", "Batch Normalization: backward\nNow implement the backward pass for batch normalization in the function batchnorm_backward.\nTo derive the backward pass you should write out the computation graph for batch normalization and backprop through each of the intermediate nodes. Some intermediates may have multiple outgoing branches; make sure to sum gradients across these branches in the backward pass.\nOnce you have finished, run the following to numerically check your backward pass.", "# Gradient check batchnorm backward pass\n\nN, D = 4, 5\nx = 5 * np.random.randn(N, D) + 12\ngamma = np.random.randn(D)\nbeta = np.random.randn(D)\ndout = np.random.randn(N, D)\n\nbn_param = {'mode': 'train'}\nfx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]\nfg = lambda a: batchnorm_forward(x, gamma, beta, bn_param)[0]\nfb = lambda b: batchnorm_forward(x, gamma, beta, bn_param)[0]\n\ndx_num = eval_numerical_gradient_array(fx, x, dout)\nda_num = eval_numerical_gradient_array(fg, gamma, dout)\ndb_num = eval_numerical_gradient_array(fb, beta, dout)\n\n_, cache = batchnorm_forward(x, gamma, beta, bn_param)\ndx, dgamma, dbeta = batchnorm_backward(dout, cache)\nprint 'dx error: ', rel_error(dx_num, dx)\nprint 'dgamma error: ', rel_error(da_num, dgamma)\nprint 'dbeta error: ', rel_error(db_num, dbeta)", "Batch Normalization: alternative backward\nIn class we talked about two different implementations for the sigmoid backward pass. One strategy is to write out a computation graph composed of simple operations and backprop through all intermediate values. Another strategy is to work out the derivatives on paper. For the sigmoid function, it turns out that you can derive a very simple formula for the backward pass by simplifying gradients on paper.\nSurprisingly, it turns out that you can also derive a simple expression for the batch normalization backward pass if you work out derivatives on paper and simplify. After doing so, implement the simplified batch normalization backward pass in the function batchnorm_backward_alt and compare the two implementations by running the following. Your two implementations should compute nearly identical results, but the alternative implementation should be a bit faster.\nNOTE: You can still complete the rest of the assignment if you don't figure this part out, so don't worry too much if you can't get it.", "N, D = 100, 500\nx = 5 * np.random.randn(N, D) + 12\ngamma = np.random.randn(D)\nbeta = np.random.randn(D)\ndout = np.random.randn(N, D)\n\nbn_param = {'mode': 'train'}\nout, cache = batchnorm_forward(x, gamma, beta, bn_param)\n\nt1 = time.time()\ndx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)\nt2 = time.time()\ndx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)\nt3 = time.time()\n\nprint 'dx difference: ', rel_error(dx1, dx2)\nprint 'dgamma difference: ', rel_error(dgamma1, dgamma2)\nprint 'dbeta difference: ', rel_error(dbeta1, dbeta2)\nprint 'speedup: %.2fx' % ((t2 - t1) / (t3 - t2))", "Fully Connected Nets with Batch Normalization\nNow that you have a working implementation for batch normalization, go back to your FullyConnectedNet in the file cs2312n/classifiers/fc_net.py. Modify your implementation to add batch normalization.\nConcretely, when the flag use_batchnorm is True in the constructor, you should insert a batch normalization layer before each ReLU nonlinearity. The outputs from the last layer of the network should not be normalized. Once you are done, run the following to gradient-check your implementation.\nHINT: You might find it useful to define an additional helper layer similar to those in the file cs231n/layer_utils.py. If you decide to do so, do it in the file cs231n/classifiers/fc_net.py.", "N, D, H1, H2, C = 2, 15, 20, 30, 10\nX = np.random.randn(N, D)\ny = np.random.randint(C, size=(N,))\n\nfor reg in [0, 3.14]:\n print 'Running check with reg = ', reg\n model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,\n reg=reg, weight_scale=5e-2, dtype=np.float64,\n use_batchnorm=True)\n\n loss, grads = model.loss(X, y)\n print 'Initial loss: ', loss\n\n for name in sorted(grads):\n f = lambda _: model.loss(X, y)[0]\n grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)\n print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))\n if reg == 0: print", "Batchnorm for deep networks\nRun the following to train a six-layer network on a subset of 1000 training examples both with and without batch normalization.", "# Try training a very deep net with batchnorm\nhidden_dims = [100, 100, 100, 100, 100]\n\nnum_train = 1000\nsmall_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n}\n\nweight_scale = 2e-2\nbn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)\nmodel = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)\n\nbn_solver = Solver(bn_model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=True, print_every=200)\nbn_solver.train()\n\nsolver = Solver(model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=True, print_every=200)\nsolver.train()", "Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.", "plt.subplot(3, 1, 1)\nplt.title('Training loss')\nplt.xlabel('Iteration')\n\nplt.subplot(3, 1, 2)\nplt.title('Training accuracy')\nplt.xlabel('Epoch')\n\nplt.subplot(3, 1, 3)\nplt.title('Validation accuracy')\nplt.xlabel('Epoch')\n\nplt.subplot(3, 1, 1)\nplt.plot(solver.loss_history, 'o', label='baseline')\nplt.plot(bn_solver.loss_history, 'o', label='batchnorm')\n\nplt.subplot(3, 1, 2)\nplt.plot(solver.train_acc_history, '-o', label='baseline')\nplt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')\n\nplt.subplot(3, 1, 3)\nplt.plot(solver.val_acc_history, '-o', label='baseline')\nplt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')\n \nfor i in [1, 2, 3]:\n plt.subplot(3, 1, i)\n plt.legend(loc='upper center', ncol=4)\nplt.gcf().set_size_inches(15, 15)\nplt.show()", "Batch normalization and initialization\nWe will now run a small experiment to study the interaction of batch normalization and weight initialization.\nThe first cell will train 8-layer networks both with and without batch normalization using different scales for weight initialization. The second layer will plot training accuracy, validation set accuracy, and training loss as a function of the weight initialization scale.", "# Try training a very deep net with batchnorm\nhidden_dims = [50, 50, 50, 50, 50, 50, 50]\n\nnum_train = 1000\nsmall_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n}\n\nbn_solvers = {}\nsolvers = {}\nweight_scales = np.logspace(-4, 0, num=20)\nfor i, weight_scale in enumerate(weight_scales):\n print 'Running weight scale %d / %d' % (i + 1, len(weight_scales))\n bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)\n model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)\n\n bn_solver = Solver(bn_model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=False, print_every=200)\n bn_solver.train()\n bn_solvers[weight_scale] = bn_solver\n\n solver = Solver(model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=False, print_every=200)\n solver.train()\n solvers[weight_scale] = solver\n\n# Plot results of weight scale experiment\nbest_train_accs, bn_best_train_accs = [], []\nbest_val_accs, bn_best_val_accs = [], []\nfinal_train_loss, bn_final_train_loss = [], []\n\nfor ws in weight_scales:\n best_train_accs.append(max(solvers[ws].train_acc_history))\n bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))\n \n best_val_accs.append(max(solvers[ws].val_acc_history))\n bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))\n \n final_train_loss.append(np.mean(solvers[ws].loss_history[-100:]))\n bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history[-100:]))\n \nplt.subplot(3, 1, 1)\nplt.title('Best val accuracy vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Best val accuracy')\nplt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')\nplt.legend(ncol=2, loc='lower right')\n\nplt.subplot(3, 1, 2)\nplt.title('Best train accuracy vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Best training accuracy')\nplt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')\nplt.legend()\n\nplt.subplot(3, 1, 3)\nplt.title('Final training loss vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Final training loss')\nplt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')\nplt.legend()\n\nplt.gcf().set_size_inches(10, 15)\nplt.show()", "Question:\nDescribe the results of this experiment, and try to give a reason why the experiment gave the results that it did.\nAnswer:" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jessicaowensby/We-Rise-Keras
notebooks/05_Transfer_Learning.ipynb
apache-2.0
[ "Fine Tuning Example\nTransfer learning example:\n1- Train a simple convnet on the MNIST dataset the first 5 digits [0..4].\n2- Freeze convolutional layers and fine-tune dense layers\n for the classification of digits [5..9].\nGet to 99.8% test accuracy after 5 epochs\nfor the first five digits classifier\nand 99.2% for the last five digits after transfer + fine-tuning.", "from __future__ import print_function\n\nimport datetime\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.preprocessing import image\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras import backend as K\nimport numpy as np\n\nnow = datetime.datetime.now\n\nbatch_size = 128\nnum_classes = 5\nepochs = 5\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n# number of convolutional filters to use\nfilters = 32\n# size of pooling area for max pooling\npool_size = 2\n# convolution kernel size\nkernel_size = 3", "Keras Configs\n<code>~/.keras/keras.json</code>\nSpecify whether you will use Theano or TensorFlow, Optmization options, Channel first and more.", "if K.image_data_format() == 'channels_first':\n input_shape = (1, img_rows, img_cols)\nelse:\n input_shape = (img_rows, img_cols, 1)\n\ndef train_model(model, train, test, num_classes):\n x_train = train[0].reshape((train[0].shape[0],) + input_shape)\n x_test = test[0].reshape((test[0].shape[0],) + input_shape)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n print('x_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(train[1], num_classes)\n y_test = keras.utils.to_categorical(test[1], num_classes)\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adadelta',\n metrics=['accuracy'])\n\n t = now()\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\n print('Training time: %s' % (now() - t))\n score = model.evaluate(x_test, y_test, verbose=0)\n print('Test score:', score[0])\n print('Test accuracy:', score[1])\n\n", "the data, shuffled and split between train and test sets", "(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# create two datasets one with digits below 5 and one with 5 and above\nx_train_lt5 = x_train[y_train < 5]\ny_train_lt5 = y_train[y_train < 5]\nx_test_lt5 = x_test[y_test < 5]\ny_test_lt5 = y_test[y_test < 5]\n\nx_train_gte5 = x_train[y_train >= 5]\ny_train_gte5 = y_train[y_train >= 5] - 5\nx_test_gte5 = x_test[y_test >= 5]\ny_test_gte5 = y_test[y_test >= 5] - 5", "define two groups of layers: feature (convolutions) and classification (dense)", "feature_layers = [\n Conv2D(filters, kernel_size,\n padding='valid',\n input_shape=input_shape),\n Activation('relu'),\n Conv2D(filters, kernel_size),\n Activation('relu'),\n MaxPooling2D(pool_size=pool_size),\n Dropout(0.25),\n Flatten(),\n]\n\nclassification_layers = [\n Dense(128),\n Activation('relu'),\n Dropout(0.5),\n Dense(num_classes),\n Activation('softmax')\n]\n\n# create complete model\nmodel = Sequential(feature_layers + classification_layers)\n\n# train model for 5-digit classification [0..4]\ntrain_model(model,\n (x_train_lt5, y_train_lt5),\n (x_test_lt5, y_test_lt5), num_classes)\n\n# freeze feature layers and rebuild model\nfor l in feature_layers:\n l.trainable = False\n\n# transfer: train dense layers for new classification task [5..9]\ntrain_model(model,\n (x_train_gte5, y_train_gte5),\n (x_test_gte5, y_test_gte5), num_classes)", "Test it out on your own handwriting!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
google/starthinker
colabs/sheets_copy.ipynb
apache-2.0
[ "Sheet Copy\nCopy tab from a sheet to a sheet.\nLicense\nCopyright 2020 Google LLC,\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttps://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\nDisclaimer\nThis is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team.\nThis code generated (see starthinker/scripts for possible source):\n - Command: \"python starthinker_ui/manage.py colab\"\n - Command: \"python starthinker/tools/colab.py [JSON RECIPE]\"\n1. Install Dependencies\nFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.", "!pip install git+https://github.com/google/starthinker\n", "2. Set Configuration\nThis code is required to initialize the project. Fill in required fields and press play.\n\nIf the recipe uses a Google Cloud Project:\n\nSet the configuration project value to the project identifier from these instructions.\n\n\nIf the recipe has auth set to user:\n\nIf you have user credentials:\nSet the configuration user value to your user credentials JSON.\n\n\n\nIf you DO NOT have user credentials:\n\nSet the configuration client value to downloaded client credentials.\n\n\n\nIf the recipe has auth set to service:\n\nSet the configuration service value to downloaded service credentials.", "from starthinker.util.configuration import Configuration\n\n\nCONFIG = Configuration(\n project=\"\",\n client={},\n service={},\n user=\"/content/user.json\",\n verbose=True\n)\n\n", "3. Enter Sheet Copy Recipe Parameters\n\nProvide the full edit URL for both sheets.\nProvide the tab name for both sheets.\nThe tab will only be copied if it does not already exist.\nModify the values below for your use case, can be done multiple times, then click play.", "FIELDS = {\n 'auth_read':'user', # Credentials used for reading data.\n 'from_sheet':'',\n 'from_tab':'',\n 'to_sheet':'',\n 'to_tab':'',\n}\n\nprint(\"Parameters Set To: %s\" % FIELDS)\n", "4. Execute Sheet Copy\nThis does NOT need to be modified unless you are changing the recipe, click play.", "from starthinker.util.configuration import execute\nfrom starthinker.util.recipe import json_set_fields\n\nTASKS = [\n {\n 'sheets':{\n 'auth':{'field':{'name':'auth_read','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}},\n 'template':{\n 'sheet':{'field':{'name':'from_sheet','kind':'string','order':1,'default':''}},\n 'tab':{'field':{'name':'from_tab','kind':'string','order':2,'default':''}}\n },\n 'sheet':{'field':{'name':'to_sheet','kind':'string','order':3,'default':''}},\n 'tab':{'field':{'name':'to_tab','kind':'string','order':4,'default':''}}\n }\n }\n]\n\njson_set_fields(TASKS, FIELDS)\n\nexecute(CONFIG, TASKS, force=True)\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
sripaladugu/sripaladugu.github.io
ipynb/Pandas.ipynb
mit
[ "Series", "import pandas as pd", "A Series is like a cross between a list and a dictionary. The items are stored in an order and there are labels \nwith which you can retrieve them. A Series object also has a name attribute.", "animals = [\"Lion\", \"Tiger\", \"Monkey\", None]\ns = pd.Series(animals)\nprint(s)\nprint(\"The name of this Series: \", s.name)\n\nnumbers = [1, 2, 3, None]\npd.Series(numbers)\n\nimport numpy as np\nnp.NaN == None\n\nnp.NaN == np.NaN\n\nnp.isnan(np.NaN)\n\nsports = {'Cricket': 'India', 'Football': 'America', 'Soccer': 'Brazil'}\ns = pd.Series(sports)\ns\n\ns.index\n\ns = pd.Series(['Cricket', 'Football', 'Soccer'], index = [ 'India', 'America', 'Brazil'])\ns", "Querying a Series\nA pandas Series can be queried either by the index position or the index label. As we saw if you don't give \nan index to the series, the position and the label are effectively the same values. To query by numeric location, \nstarting at zero, use the iloc attribute. To query by the index label, you can use the loc attribute.", "s.iloc[0]\n\ns.loc['America']", "iloc and loc are not methods, they are attributes.\nOkay, so now we know how to get data out of the series. Let's talk about working with the data. A common task is to want to consider all of the values inside of a series and want to do some sort of operation. This could be trying to find a certain number, summarizing data or transforming the data in some way. A typical programmatic approach to this would be to iterate over all the items in the series, and invoke the operation one is interested in. For instance, we could create a data frame of floating point values. Let's think of these as prices for different products. We could write a little routine which iterates over all of the items in the series and adds them together to get a total. \nThis works, but it's slow. Modern computers can do many tasks simultaneously, especially, but not only, tasks involving mathematics. Pandas and the underlying NumPy libraries support a method of computation called vectorization. Vectorization works with most of the functions in the NumPy library, including the sum function.", "s = pd.Series(np.random.randint(0,1000,10000))\ns.head()", "Magic functions begin with a percentage sign. If we type % sign and then hit the Tab key, we can see a list of the available magic functions. You could write your own magic functions too, but that's a little bit outside of the scope of this course. We're actually going to use what's called a cellular magic function. These start with two percentage signs and modify a raptor code in the current Jupyter cell. The function we're going to use is called timeit. And as you may have guessed from the name, this function will run our code a few times to determine, on average, how long it takes.", "%%timeit -n 100\nsummary = 0\nfor item in s:\n summary += item\n\n%%timeit -n 100\nnp.sum(s)", "Related feature in Pandas and NumPy is called broadcasting. With broadcasting, you can apply an operation to every value in the series, changing the series. For instance, if we wanted to increase every random variable by 2, we could do so quickly using the += operator directly on the series object.", "%%timeit -n 10\ns = pd.Series(np.random.randint(0,1000,10000))\nfor label, value in s.iteritems():\n s.set_value(label, value + 2)\n\n%%timeit -n 10\ns = pd.Series(np.random.randint(0,1000,10000))\nfor label, value in s.iteritems():\n s.loc[label] = value + 2", "But if you find yourself iterating through a series, you should question whether you're doing things in the best possible way. Here's how we would do this using the series set value method.", "%%timeit -n 10\ns = pd.Series(np.random.randint(0,1000,10000))\ns += 2", "Amazing. Not only is it significantly faster, but it's more concise and maybe even easier to read too. The typical mathematical operations you would expect are vectorized, and the NumPy documentation outlines what it takes to create vectorized functions of your own. One last note on using the indexing operators to access series data. The .loc attribute lets you not only modify data in place, but also add new data as well. If the value you pass in as the index doesn't exist, then a new entry is added. And keep in mind, indices can have mixed types. While it's important to be aware of the typing going on underneath, Pandas will automatically change the underlying NumPy types as appropriate. \nMixed types are also possible", "s = pd.Series([2,1,2])\ns.loc['Animal'] = 'Bear'\ns\n\noriginal_sports = pd.Series({'Archery':'Bhutan',\n 'Golf': 'Scotland',\n 'Sumo': 'Japan'})\ncricket_loving_countries = pd.Series(['Australia', 'India', 'England'], index=['Cricket','Cricket','Cricket'])\nall_countries = original_sports.append(cricket_loving_countries)\nall_countries\n\noriginal_sports", "There are a couple of important considerations when using append. First, Pandas is going to take your series and try to infer the best data types to use. In this example, everything is a string, so there's no problems here. Second, the append method doesn't actually change the underlying series. It instead returns a new series which is made up of the two appended together. We can see this by going back and printing the original series of values and seeing that they haven't changed. This is actually a significant issue for new Pandas users who are used to objects being changed in place. So watch out for it, not just with append but with other Pandas functions as well.", "all_countries['Cricket']", "Finally, we see that when we query the appended series for those who have cricket as their national sport, we don't get a single value, but a series itself. This is actually very common, and if you have a relational database background, this is very similar to every table query resulting in a return set which itself is a table. \nThe DataFrame Data Structure\nYou can create a DataFrame in many different ways, some of which you might expect. For instance, you can use a group of series, where each series represents a row of data. Or you could use a group of dictionaries, where each dictionary represents a row of data.", "purchase_1 = pd.Series({'Name':'Kasi',\n 'Item purchased': 'Dog Food',\n 'Cost': 22.50})\npurchase_2 = pd.Series({'Name':'Pradeep',\n 'Item purchased': 'Cat Food',\n 'Cost': 21.50})\npurchase_3 = pd.Series({'Name':'Sri',\n 'Item purchased': 'Bird Food',\n 'Cost': 5.50})\ndf = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store1','Store1','Store2'])\ndf\n\nprint(df.loc['Store2'])\ntype(df.loc['Store2'])\n\nprint(df.loc['Store1'])\ntype(df.loc['Store1'])", "What if we want to do column, for example we want to get a list of all the costs?", "df.T # This essential turns your column names into indicies\n\ndf.T.loc['Cost'] # We can then use the loc method", "Since iloc and loc are used for row selection, the Panda's developers reserved indexing operator directly on the DataFrame for column selection. In a Panda's DataFrame, columns always have a name. So this selection is always label based, not as confusing as it was when using the square bracket operator on the series objects. For those familiar with relational databases, this operator is analogous to column projection.", "print(df['Item purchased'])\ntype(df['Item purchased'])", "Finally, since the result of using the indexing operator is the DataFrame or series, you can chain operations together. For instance, we could have rewritten the query for all Store 1 costs as", "df.loc['Store1']['Cost']", "This looks pretty reasonable and gets us the result we wanted. But chaining can come with some costs and is best avoided if you can use another approach. In particular, chaining tends to cause Pandas to return a copy of the DataFrame instead of a view on the DataFrame. For selecting a data, this is not a big deal, though it might be slower than necessary. If you are changing data though, this is an important distinction and can be a source of error. \nHere's another method. As we saw, .loc does row selection, and it can take two parameters, the row index and the list of column names. .loc also supports slicing. If we wanted to select all rows, we can use a column to indicate a full slice from beginning to end. And then add the column name as the second parameter as a string. In fact, if we wanted to include multiply columns, we could do so in a list. And Pandas will bring back only the columns we have asked for.", "df.loc[:, ['Name','Cost']]", "So that's selecting and projecting data from a DataFrame based on row and column labels. The key concepts to remember are that the rows and columns are really just for our benefit. Underneath this is just a two axis labeled array, and transposing the columns is easy. Also, consider the issue of chaining carefully, and try to avoid it, it can cause unpredictable results. Where your intent was to obtain a view of the data, but instead Pandas returns to you a copy. In the Panda's world, friends don't let friends chain calls. So if you see it, point it out, and share a less ambiguous solution.", "df.drop('Store1')", "It's easy to delete data in series and DataFrames, and we can use the drop function to do so. This function takes a single parameter, which is the index or roll label, to drop. This is another tricky place for new users to pandas. The drop function doesn't change the DataFrame by default. And instead, returns to you a copy of the DataFrame with the given rows removed. We can see that our original DataFrame is still intact. This is a very typical pattern in Pandas, where in place changes to a DataFrame are only done if need be, usually on changes involving indices. So it's important to be aware of. Drop has two interesting optional parameters. The first is called in place, and if it's set to true, the DataFrame will be updated in place, instead of a copy being returned. The second parameter is the axis, which should be dropped. By default, this value is 0, indicating the row axis. But you could change it to 1 if you want to drop a column.", "df.drop('Cost',axis=1)", "There is a second way to drop a column, however. And that's directly through the use of the indexing operator, using the del keyword. This way of dropping data, however, takes immediate effect on the DataFrame and does not return a view.", "del df['Item purchased']\ndf", "Finally, adding a new column to the DataFrame is as easy as assigning it to some value. For instance, if we wanted to add a new location as a column with default value of none, we could do so by using the assignment operator after the square brackets. This broadcasts the default value to the new column immediately.", "df['Location'] = None\ndf", "The common work flow is to read your data into a DataFrame then reduce this DataFrame to the particular columns or rows that you're interested in working with. As you've seen, the Panda's toolkit tries to give you views on a DataFrame. This is much faster than copying data and much more memory efficient too. But it does mean that if you're manipulating the data you have to be aware that any changes to the DataFrame you're working on may have an impact on the base data frame you used originally. Here's an example using our same purchasing DataFrame from earlier. We can create a series based on just the cost category using the square brackets.", "costs = df['Cost']\ncosts", "Then we can increase the cost in this series using broadcasting.", "costs += 2\ncosts", "Now if we look at our original DataFrame, we see those costs have risen as well. This is an important consideration to watch out for. If you want to explicitly use a copy, then you should consider calling the copy method on the DataFrame for it first.", "df", "A common workflow is to read the dataset in, usually from some external file. We saw previously how you can do this using Python, and lists, and dictionaries. You can imagine how you might use those dictionaries to create a Pandas DataFrame. Thankfully, Pandas has built-in support for delimited files such as CSV files as well as a variety of other data formats including relational databases, Excel, and HTML tables. I've saved a CSV file called olympics.csv, which has data from Wikipedia that contains a summary list of the medal various countries have won at the Olympics. We can take a look at this file using the shell command cat. Which we can invoke directly using the exclamation point. What happens here is that when the Jupyter notebook sees a line beginning with an exclamation mark, it sends the rest of the line to the operating system shell for evaluation. So cat works on Linux and Macs.", "!cat olympics.csv", "We see from the cat output that there seems to be a numeric list of columns followed by a bunch of column identifiers. The column identifiers have some odd looking characters in them. This is the unicode numero sign, which means number of. Then we have rows of data, all columns separated. We can read this into a DataFrame by calling the read_csv function of the module. When we look at the DataFrame we see that the first cell has an NaN in it since it's an empty value, and the rows have been automatically indexed for us.", "df = pd.read_csv('olympics.csv')\ndf.head()", "It seems pretty clear that the first row of data in the DataFrame is what we really want to see as the column names. It also seems like the first column in the data is the country name, which we would like to make an index. Read csv has a number of parameters that we can use to indicate to Pandas how rows and columns should be labeled. For instance, we can use the index call to indicate which column should be the index and we can also use the header parameter to indicate which row from the data file should be used as the header.", "df = pd.read_csv('olympics.csv', index_col=0, skiprows=1)\ndf.head()", "Now this data came from the all time Olympic games medal table on Wikipedia. If we head to the page we could see that instead of running gold, silver and bronze in the pages, these nice little icons with a one, a two, and a three in them In our csv file these were represented with the strings 01 !, 02 !, and so on. We see that the column values are repeated which really isn't good practice. Panda's recognize this in a panda.1 and .2 to make things more unique. But this labeling isn't really as clear as it could be, so we should clean up the data file. We can of course do this just by going and editing the CSV file directly, but we can also set the column names using the Pandas name property. Panda stores a list of all of the columns in the .columns attribute.", "df.columns", "We can change the values of the column names by iterating over this list and calling the rename method of the data frame. Here we just iterate through all of the columns looking to see if they start with a 01, 02, 03 or numeric character. If they do, we can call rename and set the column parameters to a dictionary with the keys being the column we want to replace and the value being the new value we want. Here we'll slice some of the old values in two, since we don't want to lose the unique appended values. We'll also set the ever-important in place parameter to true so Pandas knows to update this data frame directly.", "df.rename?\n\nfor col in df.columns:\n if col[:2]=='01': # if the first two letters are '01'\n df.rename(columns={col:'Gold'+col[4:]}, inplace=True) #mapping changes labels\n if col[:2]=='02':\n df.rename(columns={col:'Silver'+col[4:]}, inplace=True)\n if col[:2]=='03':\n df.rename(columns={col:'Bronze'+col[4:]}, inplace=True)\n if col[:1]=='№':\n df.rename(columns={col:'#'+col[1:]}, inplace=True)\ndf.head()", "Querying a DataFrame\nBoolean masking is the heart of fast and efficient querying in NumPy. It's analogous a bit to masking used in other computational areas. A Boolean mask is an array which can be of one dimension like a series, or two dimensions like a DataFrame, where each of the values in the array are either true or false. This array is essentially overlaid on top of the data structure that we're querying. And any cell aligned with the true value will be admitted into our final result, and any sign aligned with a false value will not. Boolean masking is powerful conceptually and is the cornerstone of efficient NumPy and pandas querying. This technique is well used in other areas of computer science, for instance, in graphics. But it doesn't really have an analogue in other traditional relational databases, so I think it's worth pointing out here. Boolean masks are created by applying operators directly to the pandas series or DataFrame objects. For instance, in our Olympics data set, you might be interested in seeing only those countries who have achieved a gold medal at the summer Olympics. To build a Boolean mask for this query, we project the gold column using the indexing operator and apply the greater than operator with a comparison value of zero. This is essentially broadcasting a comparison operator, greater than, with the results being returned as a Boolean series.", "df['Gold']>0", "The resultant series is indexed where the value of each cell is either true or false depending on whether a country has won at least one gold medal, and the index is the country name. \nSo this builds us the Boolean mask, which is half the battle. What we want to do next is overlay that mask on the DataFrame. We can do this using the where function. The where function takes a Boolean mask as a condition, applies it to the DataFrame or series, and returns a new DataFrame or series of the same shape. Let's apply this Boolean mask to our Olympics data and create a DataFrame of only those countries who have won a gold at a summer games.", "only_gold = df.where(df['Gold']>0)\nonly_gold.head()", "We see that the resulting DataFrame keeps the original indexed values, and only data from countries that met the condition are retained. All of the countries which did not meet the condition have NaN data instead. This is okay. Most statistical functions built into the DataFrame object ignore values of NaN.", "df['Gold'].count()\n\nonly_gold['Gold'].count()\n\nonly_gold = only_gold.dropna()\nonly_gold.head()", "Often we want to drop those rows which have no data. To do this, we can use the drop NA function. You can optionally provide drop NA the axis it should be considering. Remember that the axis is just an indicator for the columns or rows and that the default is zero, which means rows. \nWhen you find yourself talking about pandas and saying phrases like, often I want to, it's quite likely the developers have included a shortcut for this common operation. For instance, in this example, we don't actually have to use the where function explicitly. The pandas developers allow the indexing operator to take a Boolean mask as a value instead of just a list of column names.", "only_gold = df[df['Gold']>0]\nonly_gold.head()\n\n#To get the no of countries who recieved at least one gold in Summer or Winter Olympics\nlen(df[(df['Gold']>0) | df['Gold.1']>0])\n\n#Are there any countries which won a gold in winter olympics but never in summer olympics\ndf[(df['Gold']==0) & (df['Gold.1']>0)]", "Extremely important, and often an issue for new users, is to remember that each Boolean mask needs to be encased in parenthesis because of the order of operations. This can cause no end of frustration if you're not used to it, so be careful. \nIndexing DataFrames\nThe index is essentially a row level label, and we know that rows correspond to axis zero. In our Olympics data, we indexed the data frame by the name of the country. Indices can either be inferred, such as when we create a new series without an index, in which case we get numeric values, or they can be set explicitly, like when we use the dictionary object to create the series, or when we loaded data from the CSV file and specified the header. Another option for setting an index is to use the set_index function. This function takes a list of columns and promotes those columns to an index. Set index is a destructive process, it doesn't keep the current index. If you want to keep the current index, you need to manually create a new column and copy into it values from the index attribute. Let's go back to our Olympics DataFrame. Let's say that we don't want to index the DataFrame by countries, but instead want to index by the number of gold medals that were won at summer games. First we need to preserve the country information into a new column. We can do this using the indexing operator or the string that has the column label. Then we can use the set_index to set index of the column to summer gold medal wins.", "df['country'] = df.index\ndf = df.set_index('Gold')\ndf.head()", "You'll see that when we create a new index from an existing column it appears that a new first row has been added with empty values. This isn't quite what's happening. And we know this in part because an empty value is actually rendered either as a none or an NaN if the data type of the column is numeric. What's actually happened is that the index has a name. Whatever the column name was in the Jupiter notebook has just provided this in the output. We can get rid of the index completely by calling the function reset_index. This promotes the index into a column and creates a default numbered index.", "df = df.reset_index()\ndf.head()", "One nice feature of pandas is that it has the option to do multi-level indexing. This is similar to composite keys in relational database systems. To create a multi-level index, we simply call set index and give it a list of columns that we're interested in promoting to an index. \nPandas will search through these in order, finding the distinct data and forming composite indices. A good example of this is often found when dealing with geographical data which is sorted by regions or demographics. Let's change data sets and look at some census data for a better example. This data is stored in the file census.csv and comes from the United States Census Bureau. In particular, this is a breakdown of the population level data at the US county level. It's a great example of how different kinds of data sets might be formatted when you're trying to clean them. For instance, in this data set there are two summarized levels, one that contains summary data for the whole country. And one that contains summary data for each state, and one that contains summary data for each county.", "df = pd.read_csv('census.csv')\ndf.head()", "I often find that I want to see a list of all the unique values in a given column. In this DataFrame, we see that the possible values for the sum level are using the unique function on the DataFrame. This is similar to the SQL distinct operator. Here we can run unique on the sum level of our current DataFrame and see that there are only two different values, 40 and 50.", "df['SUMLEV'].unique() #40 belongs to state level data and 50 belongs to county level data", "Let's get rid of all of the rows that are summaries at the state level and just keep the county data.", "df = df[df['SUMLEV']==50]\ndf.head()", "Also while this data set is interesting for a number of different reasons, let's reduce the data that we're going to look at to just the total population estimates and the total number of births. We can do this by creating a list of column names that we want to keep then project those and assign the resulting DataFrame to our df variable.", "columns_to_keep = ['STNAME',\n 'CTYNAME',\n 'BIRTHS2010',\n 'BIRTHS2011',\n 'BIRTHS2012',\n 'BIRTHS2013',\n 'BIRTHS2014',\n 'BIRTHS2015',\n 'POPESTIMATE2010',\n 'POPESTIMATE2011',\n 'POPESTIMATE2012',\n 'POPESTIMATE2013',\n 'POPESTIMATE2014',\n 'POPESTIMATE2015'\n ]\ndf = df[columns_to_keep]\ndf.head()", "The US Census data breaks down estimates of population data by state and county. We can load the data and set the index to be a combination of the state and county values and see how pandas handles it in a DataFrame. We do this by creating a list of the column identifiers we want to have indexed. And then calling set index with this list and assigning the output as appropriate. We see here that we have a dual index, first the state name and then the county name.", "df = df.set_index(['STNAME','CTYNAME'])\ndf.head()", "An immediate question which comes up is how we can query this DataFrame. For instance, we saw previously that the loc attribute of the DataFrame can take multiple arguments. And it could query both the row and the columns. When you use a MultiIndex, you must provide the arguments in order by the level you wish to query. Inside of the index, each column is called a level and the outermost column is level zero. For instance, if we want to see the population results from Washtenaw County, you'd want to the first argument as the state of Michigan.", "df.loc['Michigan', 'Washtenaw County']", "You might be interested in just comparing two counties. For instance, Washtenaw and Wayne County which covers Detroit. To do this, we can pass the loc method, a list of tuples which describe the indices we wish to query. Since we have a MultiIndex of two values, the state and the county, we need to provide two values as each element of our filtering list.", "df.loc[[('Michigan','Washtenaw County'),('Michigan','Wayne County')]]", "Okay so that's how hierarchical indices work in a nutshell. They're a special part of the pandas library which I think can make management and reasoning about data easier. Of course hierarchical labeling isn't just for rows. For example, you can transpose this matrix and now have hierarchical column labels. And projecting a single column which has these labels works exactly the way you would expect it to.\nQuestion", "purchase_1 = pd.Series({'Name': 'Chris',\n 'Item Purchased': 'Dog Food',\n 'Cost': 22.50})\npurchase_2 = pd.Series({'Name': 'Kevyn',\n 'Item Purchased': 'Kitty Litter',\n 'Cost': 2.50})\npurchase_3 = pd.Series({'Name': 'Vinod',\n 'Item Purchased': 'Bird Seed',\n 'Cost': 5.00})\n\ndf = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])\ndf", "Reindex the purchase records DataFrame to be indexed hierarchically, first by store, then by \nperson. Name the indexes 'Location' and 'Name'. Then add a new entry to it with the value of:\n Name: 'Kevyn', Item Purchased:'Kitty Food', 'Cost':3.00, Location:'Store 2'.", "df = df.set_index([df.index, 'Name'])\ndf.index.names = ['Location', 'Name']\ndf = df.append(pd.Series(data={'Cost': 3.00, 'Item Purchased': 'Kitty Food'}, name=('Store 2', 'Kevyn')))\ndf", "If we want we can also reset the index as columns as follows:", "df.reset_index()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ComputationalModeling/spring-2017-danielak
past-semesters/spring_2016/day-by-day/day03-order-of-magnitude-estimation/Day_3_Pre_Class_Notebook.ipynb
agpl-3.0
[ "Goals for today's pre-class assignment\n\nWrite a Python program to make simple calculations\nWork with number and string data types\nWork with the list data type\n\nAssignment instructions\nWatch the videos below, read through Section 3.1 of the Python Tutorial, and complete the assigned programming problems. Please get started early, and come to office hours if you have any questions!\nRecall that to make notebook cells that have Python code in them do something, hold down the 'shift' key and then press the 'enter' key (you'll have to do this to get the YouTube videos to run). To edit a cell (to add answers, for example) you double-click on the cell, add your text, and then enter it by holding down 'shift' and pressing 'enter'\nThis assignment is due by 11:59 p.m. the day before class, and should be uploaded into the \"Pre-class assignments\" dropbox folder for Day 3. Submission instructions can be found at the end of the notebook.", "# Imports the functionality that we need to display YouTube videos in a Jupyter Notebook. \n# You need to run this cell before you run ANY of the YouTube videos.\n\nfrom IPython.display import YouTubeVideo \n\n# Display a specific YouTube video, with a given width and height. \n# WE STRONGLY RECOMMEND that you can watch the video in full-screen mode\n# (much higher resolution) by clicking the little box in the bottom-right \n# corner of the video.\n\nYouTubeVideo(\"cCLB1sNpNYo\",width=640,height=360)", "Question 1: In the cell below, write a simple program to calculate the area of a rectangle where you give it the length and width of the rectangle as variables, store it in a third variable, and print out the resulting area. Add comments to each line to explain what you're doing!", "# write your program here. Don't forget that you execute your program by holding\n# down 'shift' and pressing 'enter'\n\n\n\n\n# Don't forget to watch the video in full-screen mode!\n\nYouTubeVideo(\"yv7klK57Ezc\",width=640,height=360)", "Question 2: In the cells below, create a variable containing a floating-point number and a second variable containing an integer. Turn both into strings and concatenate them, and store it in a new variable. Finally, print out the last value in your newly-concatenated variable. You can use more than one cell if you need to print out multiple quantities!", "# write your program here, using multiple cells if necessary (adding extra cells using\n# the 'Cell' menu at the top of this notebook). Don't forget that you can execute \n# your program by holding down 'shift' and pressing 'enter' in each cell!\n\n\n\n\n# Don't forget to watch the video in full-screen mode!\n\nYouTubeVideo(\"TJ_bGrigAMg\",width=640,height=360)", "Question 3: In the cells below, create a list that contains, in this order:\n\nyour first name as a string\nyour age as a floating-point number\nyour room or apartment number as an integar\n\nPrint this list out. Then, after you print it out, replace your first name in the list with your last name, and replace your age with the current year as an integer. Then, append one or more new variables (of whatever type), print out the length of the list using the len() function, and then print out the entire list again!", "# write your program here, using multiple cells if necessary (adding extra cells using\n# the 'Cell' menu at the top of this notebook). Don't forget that you can execute \n# your program by holding down 'shift' and pressing 'enter' in each cell!\n\n\n", "Assignment wrapup\nQuestion 4: What questions do you have, if any, about any of the topics discussed in this assignment \nafter watching the videos, reading the link to the Python tutorial, and trying to write the programs?\nPut your answer here!\nQuestion 5: Do you have any further questions or comments about this material, or anything else that's going on in class?\nPut your answer here!\nCongratulations, you're done!\nSubmit this assignment by uploading it to the course Desire2Learn web page. Go to the \"Pre-class assignments\" folder, find the dropbox link for Day 3, and upload it there.\nSee you in class!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
robertchase/rhc
mock.ipynb
mit
[ "Defining mock connections\nStart with some setup.", "import sys\nsys.path.append('/opt/rhc')\n\nimport rhc.micro as micro\nimport rhc.async as async\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG)", "Create a simple resource", "p=micro.load_connection([\n 'CONNECTION placeholder http://jsonplaceholder.typicode.com',\n 'RESOURCE document /posts/{id}',\n])\nasync.wait(micro.connection.placeholder.document(1))", "Define a mock for the resource\nHere we define an object with a method named document and assign it to the connection's mock attribute.\nNote: the method name matches the RESOURCE name.", "class MyMock(object):\n def document(self, method, path, headers, body):\n print('method', method)\n print('path', path)\n print('headers', headers)\n print('body', body)\n return 'foo'\nmicro.connection.placeholder.mock = MyMock()", "Call the mocked resource\nWith a mock in place, we can make the same call as earlier, but instead of making a network connection,\nthe document method on the connection's mock attribute is called.", "async.wait(micro.connection.placeholder.document(1))", "What is going on here?\nThe mock is not called until the arguments provided to the partial\nare evaluated and prepared for the HTTP connection; this ensures that the \nmock data matches the actual connection data.\nThe mock is called with:\n\nthe HTTP method\nthe path, with any substititions\nheaders as a dict\ncontent as a dict, or None if no content\n\nNotes:\n\nThe return value from the mock will be used as the partial's response.\nThe final line, \"foo\", is the return from the mock document RESOURCE\nas printed by the default async.wait callback handler.\nIf the mock throws an exception, the callback will be called with a non-zero result.\nThe handler, setup and wrapper functions are not called.\nThe example uses a class; it could also be a collection of functions in a module.\n\nHere is an example of content created from unused kwargs:", "async.wait(micro.connection.placeholder.document(1, test='value'))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
watsonyanghx/CS231n
assignment1/knn.ipynb
mit
[ "k-Nearest Neighbor (kNN) exercise\nComplete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the assignments page on the course website.\nThe kNN classifier consists of two stages:\n\nDuring training, the classifier takes the training data and simply remembers it\nDuring testing, kNN classifies every test image by comparing to all training images and transfering the labels of the k most similar training examples\nThe value of k is cross-validated\n\nIn this exercise you will implement these steps and understand the basic Image Classification pipeline, cross-validation, and gain proficiency in writing efficient, vectorized code.", "# Run some setup code for this notebook.\n\nimport random\nimport numpy as np\nfrom cs231n.data_utils import load_CIFAR10\nimport matplotlib.pyplot as plt\n\n# This is a bit of magic to make matplotlib figures appear inline in the notebook\n# rather than in a new window.\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# Some more magic so that the notebook will reload external python modules;\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2\n\n# Load the raw CIFAR-10 data.\ncifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\nX_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n# As a sanity check, we print out the size of the training and test data.\nprint 'Training data shape: ', X_train.shape\nprint 'Training labels shape: ', y_train.shape\nprint 'Test data shape: ', X_test.shape\nprint 'Test labels shape: ', y_test.shape\n\n# Visualize some examples from the dataset.\n# We show a few examples of training images from each class.\nclasses = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\nnum_classes = len(classes)\nsamples_per_class = 7\nfor y, cls in enumerate(classes):\n idxs = np.flatnonzero(y_train == y)\n idxs = np.random.choice(idxs, samples_per_class, replace=False)\n for i, idx in enumerate(idxs):\n plt_idx = i * num_classes + y + 1\n plt.subplot(samples_per_class, num_classes, plt_idx)\n plt.imshow(X_train[idx].astype('uint8'))\n plt.axis('off')\n if i == 0:\n plt.title(cls)\nplt.show()\n\n# Subsample the data for more efficient code execution in this exercise\nnum_training = 5000\nmask = range(num_training)\nX_train = X_train[mask]\ny_train = y_train[mask]\n\nnum_test = 500\nmask = range(num_test)\nX_test = X_test[mask]\ny_test = y_test[mask]\n\n# Reshape the image data into rows\nX_train = np.reshape(X_train, (X_train.shape[0], -1))\nX_test = np.reshape(X_test, (X_test.shape[0], -1))\nprint X_train.shape, X_test.shape\n\nfrom cs231n.classifiers import KNearestNeighbor\n\n# Create a kNN classifier instance. \n# Remember that training a kNN classifier is a noop: \n# the Classifier simply remembers the data and does no further processing \nclassifier = KNearestNeighbor()\nclassifier.train(X_train, y_train)", "We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps: \n\nFirst we must compute the distances between all test examples and all train examples. \nGiven these distances, for each test example we find the k nearest examples and have them vote for the label\n\nLets begin with computing the distance matrix between all training and test examples. For example, if there are Ntr training examples and Nte test examples, this stage should result in a Nte x Ntr matrix where each element (i,j) is the distance between the i-th test and j-th train example.\nFirst, open cs231n/classifiers/k_nearest_neighbor.py and implement the function compute_distances_two_loops that uses a (very inefficient) double loop over all pairs of (test, train) examples and computes the distance matrix one element at a time.", "# Open cs231n/classifiers/k_nearest_neighbor.py and implement\n# compute_distances_two_loops.\n\n# Test your implementation:\ndists = classifier.compute_distances_two_loops(X_test)\nprint dists.shape\n\n# We can visualize the distance matrix: each row is a single test example and\n# its distances to training examples\nplt.imshow(dists, interpolation='none')\nplt.show()", "Inline Question #1: Notice the structured patterns in the distance matrix, where some rows or columns are visible brighter. (Note that with the default color scheme black indicates low distances while white indicates high distances.)\n\nWhat in the data is the cause behind the distinctly bright rows?\nWhat causes the columns?\n\nYour Answer: fill this in.", "# Now implement the function predict_labels and run the code below:\n# We use k = 1 (which is Nearest Neighbor).\ny_test_pred = classifier.predict_labels(dists, k=1)\n\n# Compute and print the fraction of correctly predicted examples\nnum_correct = np.sum(y_test_pred == y_test)\naccuracy = float(num_correct) / num_test\nprint 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)", "You should expect to see approximately 27% accuracy. Now lets try out a larger k, say k = 5:", "y_test_pred = classifier.predict_labels(dists, k=5)\nnum_correct = np.sum(y_test_pred == y_test)\naccuracy = float(num_correct) / num_test\nprint 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)", "You should expect to see a slightly better performance than with k = 1.", "# Now lets speed up distance matrix computation by using partial vectorization\n# with one loop. Implement the function compute_distances_one_loop and run the\n# code below:\ndists_one = classifier.compute_distances_one_loop(X_test)\n\n# To ensure that our vectorized implementation is correct, we make sure that it\n# agrees with the naive implementation. There are many ways to decide whether\n# two matrices are similar; one of the simplest is the Frobenius norm. In case\n# you haven't seen it before, the Frobenius norm of two matrices is the square\n# root of the squared sum of differences of all elements; in other words, reshape\n# the matrices into vectors and compute the Euclidean distance between them.\ndifference = np.linalg.norm(dists - dists_one, ord='fro')\nprint 'Difference was: %f' % (difference, )\nif difference < 0.001:\n print 'Good! The distance matrices are the same'\nelse:\n print 'Uh-oh! The distance matrices are different'\n\n# Now implement the fully vectorized version inside compute_distances_no_loops\n# and run the code\ndists_two = classifier.compute_distances_no_loops(X_test)\n\n# check that the distance matrix agrees with the one we computed before:\ndifference = np.linalg.norm(dists - dists_two, ord='fro')\nprint 'Difference was: %f' % (difference, )\nif difference < 0.001:\n print 'Good! The distance matrices are the same'\nelse:\n print 'Uh-oh! The distance matrices are different'\n\n# Let's compare how fast the implementations are\ndef time_function(f, *args):\n \"\"\"\n Call a function f with args and return the time (in seconds) that it took to execute.\n \"\"\"\n import time\n tic = time.time()\n f(*args)\n toc = time.time()\n return toc - tic\n\ntwo_loop_time = time_function(classifier.compute_distances_two_loops, X_test)\nprint 'Two loop version took %f seconds' % two_loop_time\n\none_loop_time = time_function(classifier.compute_distances_one_loop, X_test)\nprint 'One loop version took %f seconds' % one_loop_time\n\nno_loop_time = time_function(classifier.compute_distances_no_loops, X_test)\nprint 'No loop version took %f seconds' % no_loop_time\n\n# you should see significantly faster performance with the fully vectorized implementation", "Cross-validation\nWe have implemented the k-Nearest Neighbor classifier but we set the value k = 5 arbitrarily. We will now determine the best value of this hyperparameter with cross-validation.", "num_folds = 5\nk_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]\n\nX_train_folds = []\ny_train_folds = []\n################################################################################\n# TODO: #\n# Split up the training data into folds. After splitting, X_train_folds and #\n# y_train_folds should each be lists of length num_folds, where #\n# y_train_folds[i] is the label vector for the points in X_train_folds[i]. #\n# Hint: Look up the numpy array_split function. #\n################################################################################\n\n# shuffle data first\nshuffle_indices = np.random.permutation(np.arange(len(X_train)))\nX_train = X_train[shuffle_indices]\ny_train = y_train[shuffle_indices]\n\n\n# split data into folds\nX_train_folds = np.array(np.array_split(X_train, num_folds))\ny_train_folds = np.array(np.array_split(y_train, num_folds))\n\n################################################################################\n# END OF YOUR CODE #\n################################################################################\n\n# A dictionary holding the accuracies for different values of k that we find\n# when running cross-validation. After running cross-validation,\n# k_to_accuracies[k] should be a list of length num_folds giving the different\n# accuracy values that we found when using that value of k.\nk_to_accuracies = {}\n\n\n################################################################################\n# TODO: #\n# Perform k-fold cross validation to find the best value of k. For each #\n# possible value of k, run the k-nearest-neighbor algorithm num_folds times, #\n# where in each case you use all but one of the folds as training data and the #\n# last fold as a validation set. Store the accuracies for all fold and all #\n# values of k in the k_to_accuracies dictionary. #\n################################################################################\nfor kk in k_choices:\n for i in range(num_folds):\n combinat = [x for x in xrange(num_folds) if x != i] \n X_train_this_time = np.concatenate(X_train_folds[combinat])\n y_train_this_time = np.concatenate(y_train_folds[combinat])\n \n classifier_k = KNearestNeighbor()\n classifier_k.train(X_train_this_time, y_train_this_time)\n dists_fold = classifier_k.compute_distances_no_loops(X_train_folds[i])\n y_test_pred = classifier_k.predict_labels(dists_fold, k=kk)\n num_correct = np.sum(y_test_pred == y_train_folds[i])\n accuracy = float(num_correct) / len(X_train_folds[i])\n \n k_to_accuracies.setdefault(kk, []).append(accuracy)\n \n################################################################################\n# END OF YOUR CODE #\n################################################################################\n\n# Print out the computed accuracies\nfor k in sorted(k_to_accuracies):\n for accuracy in k_to_accuracies[k]:\n print 'k = %d, accuracy = %f' % (k, accuracy)\n\n# plot the raw observations\nfor k in k_choices:\n accuracies = k_to_accuracies[k]\n plt.scatter([k] * len(accuracies), accuracies)\n\n# plot the trend line with error bars that correspond to standard deviation\naccuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])\naccuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])\nplt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)\nplt.title('Cross-validation on k')\nplt.xlabel('k')\nplt.ylabel('Cross-validation accuracy')\nplt.show()\n\n# Based on the cross-validation results above, choose the best value for k, \n# retrain the classifier using all the training data, and test it on the test\n# data. You should be able to get above 28% accuracy on the test data.\nbest_k = 1\n\nclassifier = KNearestNeighbor()\nclassifier.train(X_train, y_train)\ny_test_pred = classifier.predict(X_test, k=best_k)\n\n# Compute and display the accuracy\nnum_correct = np.sum(y_test_pred == y_test)\naccuracy = float(num_correct) / num_test\nprint 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
bassdeveloper/bassdeveloper.github.io-source
code/MLAZ/Part_1_Data_Preprocessing/Data_Preprocessing_Py.ipynb
mit
[ "# Data Preprocessing Template\n Data pre-processing is an essential step in Machine Learning.", "# Importing the libraries \nimport numpy as np # Mathematics (Linear Algebra). Makes Python programming like R.\nimport matplotlib.pyplot as plt # For plotting and viewing graphs from datasets\nimport pandas as pd # Importing and managing datasets\n\n# Importing the dataset\ndataset = pd.read_csv('Data.csv') # Read the dataset and store into the 'dataset' variable\nprint(dataset) # Lets take a look at the dataset\n%store dataset\n\n# Create Matrix of Features\n#%store -r dataset\nX = dataset.iloc[:, :-1].values # All columns except the last one\nprint(\"\\nThe X variable (dataset) predictor variables \\n\")\nprint(X)\ny = dataset.iloc[:, 3].values\nprint(\"\\nThe Y variable (dataset) \\n\")\nprint(y)\n\n# Fix missing values\nfrom sklearn.preprocessing import Imputer # Imputer class helps in data preprocessing. \n# help(Imputer)# Imputation transformer for completing missing values\n# Here, we are replacing 'NaN' (missing values) with mean\nimputer=Imputer(missing_values='NaN', strategy='mean',axis=0) # Mean is default value. \nimputer.fit(X[:,1:3])\nX[:,1:3]=imputer.transform(X[:,1:3])\nX\n\n# Convert Categorical Data\nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder_X= LabelEncoder()\nX[:,0]=labelencoder_X.fit_transform(X[:,0])\nprint(\"This is X after Label Encoding\\n\")\nprint(X)\nlabelencoder_y=LabelEncoder()\ny=labelencoder_X.fit_transform(y)\nprint(\"\\n\\nThis is y after Label Encoding\\n\")\nprint(y)", "The main problem here is that machine learning is based on equations and numbers. The categorical variables are given an ordinal value and this means that the model can be biased for the higher value. i.e. \n\n0 for France\n1 for Germany\n2 for Spain\n\ncould mean that Spain gets a higher priority than France or Germany and this could lead to an invalid model.", "from sklearn.preprocessing import OneHotEncoder\nonehotencoder=OneHotEncoder(categorical_features=[0])\nX=onehotencoder.fit_transform(X).toarray()\nX\n\n# Splitting test set and trainset\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train,y_test=train_test_split(X,y,test_size=0.2)", "Feature Scaling\nFeature scaling is a very important method. If variables are not on the same scale, it will cause issues in the machine learning model. Why is it so?\nSince a lot of machine learning models are based on Euclidean Distance. This means that they work on the squared differences. If the variable is not scaled, it would mean that the difference between one squared difference and another would be large. Thus, scaling is needed.\nThere are two types of feature scaling:\n1. Standardization\n$$x_{stand}=\\frac{x-\\bar{x}}{{\\sigma}_x}$$\nWhere ${\\sigma}_x$ is the Standard Deviation and $\\bar{x}$ is the mean of x.\n\nNormalization\n\n$$ x_{norm}=\\frac{x-min(x)}{max(x)-min(x)}$$", "# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train=sc_X.fit_transform(X_train)\nX_test=sc_X.transform(X_test)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
MaximMalakhov/coursera
Finding a structure in the data/Week 2/PCA.ipynb
mit
[ "Метод главных компонент\nВ данном задании вам будет предложено ознакомиться с подходом, который переоткрывался в самых разных областях, имеет множество разных интерпретаций, а также несколько интересных обобщений: методом главных компонент (principal component analysis).\nProgramming assignment\nЗадание разбито на две части: \n- работа с модельными данными,\n- работа с реальными данными.\nВ конце каждого пункта от вас требуется получить ответ и загрузить в соответствующую форму в виде набора текстовых файлов.", "import numpy as np\nimport pandas as pd\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches as mpatches\nmatplotlib.style.use('ggplot')\n%matplotlib inline", "Теория\nЛюбой набор данных представляет собой матрицу $X$.\nМетод главных компонент последовательно находит следующие линейные комбинации признаков (компоненты) из $X$:\n- каждая компонента ортогональна всем остальным и нормированна: $<w_i, w_j> = 0, \\quad ||w_i||=1$,\n- каждая компонента описывает максимально возможную дисперсию данных (с учётом предыдущего ограничения).\nПредположения, в рамках которых данный подход будет работать хорошо:\n- линейность компонент: мы предполагаем, что данные можно анализировать линейными методами,\n- большие дисперсии важны: предполагается, что наиболее важны те направления в данных, вдоль которых они имеют наибольшую дисперсию,\n- все компоненты ортогональны: это предположение позволяет проводить анализ главных компонент при помощи техник линейной алгебры (например, сингулярное разложение матрицы $X$ или спектральное разложение матрицы $X^TX$).\nКак это выглядит математически?\nОбозначим следующим образом выборочную матрицу ковариации данных: $\\hat{C} \\propto Q = X^TX$. ($Q$ отличается от $\\hat{C}$ нормировкой на число объектов).\nСингулярное разложение матрицы $Q$ выглядит следующим образом:\n$$Q = X^TX = W \\Lambda W^T$$\nМожно строго показать, что столбцы матрицы $W$ являются главными компонентами матрицы $X$, т.е. комбинациями признаков, удовлетворяющих двум условиям, указанным в начале. При этом дисперсия данных вдоль направления, заданного каждой компонентой, равна соответствующему значению диагональной матрицы $\\Lambda$.\nКак же на основании этого преобразования производить уменьшение размерности? Мы можем отранжировать компоненты, используя значения дисперсий данных вдоль них.\nСделаем это: $\\lambda_{(1)} > \\lambda_{(2)} > \\dots > \\lambda_{(D)}$.\nТогда, если мы выберем компоненты, соответствующие первым $d$ дисперсиям из этого списка, мы получим набор из $d$ новых признаков, которые наилучшим образом описывают дисперсию изначального набора данных среди всех других возможных линейных комбинаций исходных признаков матрицы $X$. \n- Если $d=D$, то мы вообще не теряем никакой информации.\n- Если $d<D$, то мы теряем информацию, которая, при справедливости указанных выше предположений, будет пропорциональна сумме дисперсий отброшенных компонент.\nПолучается, что метод главных компонент позволяет нам ранжировать полученные компоненты по \"значимости\", а также запустить процесс их отбора.\nПример\nРассмотрим набор данных, который сэмплирован из многомерного нормального распределения с матрицей ковариации $C = \\begin{pmatrix} 3 & 1 \\ 1 & 2 \\end{pmatrix}$.", "from sklearn.decomposition import PCA\n\nmu = np.zeros(2)\nC = np.array([[3,1],[1,2]])\n\ndata = np.random.multivariate_normal(mu, C, size=50)\nplt.scatter(data[:,0], data[:,1])\nplt.show()", "Путём диагонализации истинной матрицы ковариаций $C$, мы можем найти преобразование исходного набора данных, компоненты которого наилучшим образом будут описывать дисперсию, с учётом их ортогональности друг другу:", "v, W_true = np.linalg.eig(C)\nprint W_true\nplt.scatter(data[:,0], data[:,1])\n# построим истинные компоненты, вдоль которых максимальна дисперсия данных\nplt.plot(data[:,0], (W_true[0,0]/W_true[0,1])*data[:,0], color=\"g\")\nplt.plot(data[:,0], (W_true[1,0]/W_true[1,1])*data[:,0], color=\"g\")\ng_patch = mpatches.Patch(color='g', label='True components')\nplt.legend(handles=[g_patch])\nplt.axis('equal')\nlimits = [np.minimum(np.amin(data[:,0]), np.amin(data[:,1])),\n np.maximum(np.amax(data[:,0]), np.amax(data[:,1]))]\nplt.xlim(limits[0],limits[1])\nplt.ylim(limits[0],limits[1])\nplt.draw()", "А теперь сравним эти направления с направлениями, которые выбирает метод главных компонент:", "def plot_principal_components(data, model, scatter=True, legend=True):\n W_pca = model.components_\n if scatter:\n plt.scatter(data[:,0], data[:,1])\n plt.plot(data[:,0], -(W_pca[0,0]/W_pca[0,1])*data[:,0], color=\"c\")\n plt.plot(data[:,0], -(W_pca[1,0]/W_pca[1,1])*data[:,0], color=\"c\")\n if legend:\n c_patch = mpatches.Patch(color='c', label='Principal components')\n plt.legend(handles=[c_patch], loc='lower right')\n # сделаем графики красивыми:\n plt.axis('equal')\n limits = [np.minimum(np.amin(data[:,0]), np.amin(data[:,1]))-0.5,\n np.maximum(np.amax(data[:,0]), np.amax(data[:,1]))+0.5]\n plt.xlim(limits[0],limits[1])\n plt.ylim(limits[0],limits[1])\n plt.draw()\n\nmodel = PCA(n_components=2)\nmodel.fit(data)\n\nplt.scatter(data[:,0], data[:,1])\n# построим истинные компоненты, вдоль которых максимальна дисперсия данных\nplt.plot(data[:,0], (W_true[0,0]/W_true[0,1])*data[:,0], color=\"g\")\nplt.plot(data[:,0], (W_true[1,0]/W_true[1,1])*data[:,0], color=\"g\")\n# построим компоненты, полученные с использованием метода PCA:\nplot_principal_components(data, model, scatter=False, legend=False)\nc_patch = mpatches.Patch(color='c', label='Principal components')\nplt.legend(handles=[g_patch, c_patch])\nplt.draw()", "Видно, что уже при небольшом количестве данных они отличаются незначительно. Увеличим размер выборки:", "data_large = np.random.multivariate_normal(mu, C, size=5000)\n\nmodel = PCA(n_components=2)\nmodel.fit(data_large)\nplt.scatter(data_large[:,0], data_large[:,1], alpha=0.1)\n# построим истинные компоненты, вдоль которых максимальна дисперсия данных\nplt.plot(data_large[:,0], (W_true[0,0]/W_true[0,1])*data_large[:,0], color=\"g\")\nplt.plot(data_large[:,0], (W_true[1,0]/W_true[1,1])*data_large[:,0], color=\"g\")\n# построим компоненты, полученные с использованием метода PCA:\nplot_principal_components(data_large, model, scatter=False, legend=False)\nc_patch = mpatches.Patch(color='c', label='Principal components')\nplt.legend(handles=[g_patch, c_patch])\nplt.draw()", "В этом случае главные компоненты значительно точнее приближают истинные направления данных, вдоль которых наблюдается наибольшая дисперсия.\nСтатистический взгляд на модель\nКак формализовать предположения метода, указанные выше? При помощи вероятностной модели!\nЗадача, стоящая за любым методом уменьшения размерности: получить из набора зашумлённых признаков $X$ истинные значения $Y$, которые на самом деле определяют набор данных (т.е. сведение датасета с большим количеством признаков к данным, имеющим т.н. \"эффективную размерность\").\nВ случае метода главных компонент мы хотим найти направления, вдоль которых максимальна дисперсия, с учётом описанных выше предположений о структуре данных и компонент.\nМатериал, описанный ниже в данной секции, не обязателен для ознакомления для выполнения следующего задания, т.к. требует некоторых знаний статистики.\nДля тех, кто собирается его пропустить: в конце раздела мы получим метрику качества, которая должна определять, насколько данные хорошо описываются построенной моделью при заданном числе компонент. Отбор признаков при этом сводится к тому, что мы выбираем то количество компонент, при котором используемая метрика (логарифм правдоподобия) является максимальной.\nС учётом предположений задача метода главных компонент выглядит следующим образом:\n$$ x = Wy + \\mu + \\epsilon$$\nгде:\n- $x$ -- наблюдаемые данные\n- $W$ -- матрица главных компонент (каждый стобец -- одна компонента)\n- $y$ -- их проекция на главные компоненты\n- $\\mu$ -- среднее наблюдаемых данных\n- $\\epsilon \\sim \\mathcal{N}(0, \\sigma^2I)$ -- нормальный шум\nИсходя из распределения шума, выпишем распределение на $x$:\n$$p(x \\mid y) = \\mathcal{N}(Wx + \\mu, \\sigma^2I) $$\nВведём априорное распределение на $y$:\n$$p(y) = \\mathcal{N}(0, 1)$$\nВыведем из этого при помощи формулы Байеса маргинальное распределение на $p(x)$:\n$$p(x) = \\mathcal{N}(\\mu, \\sigma^2I + WW^T)$$\nТогда правдоподобие набора данных при условии используемой модели выглядит следующим образом:\n$$\\mathcal{L} = \\sum_{i=1}^N \\log p(x_i) = -N/2 \\Big( d\\log(2\\pi) + \\log |C| + \\text{tr}(C^{-1}S) \\Big)$$\nгде:\n- $C = \\sigma^2I + WW^T$ -- матрица ковариации в маргинальной модели\n- $S = \\frac{1}{N} \\sum_{i=1}^N (x_i - \\mu)(x_i - \\mu)^T$ -- выборочная ковариация\nЗначение $\\mathcal{L}$ имеет смысл логарифма вероятности получения набора данных $X$ при условии, что он удовлетворяет предположениям модели метода главных компонент. Чем оно больше -- тем лучше модель описывает наблюдаемые данные.\nЗадание 1. Автоматическое уменьшение размерности данных при помощи логарифма правдоподобия $\\mathcal{L}$\nРассмотрим набор данных размерности $D$, чья реальная размерность значительно меньше наблюдаемой (назовём её $d$). От вас требуется:\n\nДля каждого значения $\\hat{d}$ в интервале [1,D] построить модель PCA с $\\hat{d}$ главными компонентами.\nОценить средний логарифм правдоподобия данных для каждой модели на генеральной совокупности, используя метод кросс-валидации с 3 фолдами (итоговая оценка значения логарифма правдоподобия усредняется по всем фолдам).\nНайти модель, для которой он максимален, и внести в файл ответа число компонент в данной модели, т.е. значение $\\hat{d}_{opt}$.\n\nДля оценки логарифма правдоподобия модели для заданного числа главных компонент при помощи метода кросс-валидации используйте следующие функции:\nmodel = PCA(n_components=n)\nscores = cv_score(model, data)\n\nОбратите внимание, что scores -- это вектор, длина которого равна числу фолдов. Для получения оценки на правдоподобие модели его значения требуется усреднить.\nДля визуализации оценок можете использовать следующую функцию:\nplot_scores(d_scores)\n\nкоторой на вход передаётся вектор полученных оценок логарифма правдоподобия данных для каждого $\\hat{d}$.\nДля интересующихся: данные для заданий 1 и 2 были сгенерированны в соответствии с предполагаемой PCA моделью. То есть: данные $Y$ с эффективной размерностью $d$, полученные из независимых равномерных распределений, линейно траснформированны случайной матрицей $W$ в пространство размерностью $D$, после чего ко всем признакам был добавлен независимый нормальный шум с дисперсией $\\sigma$.", "from sklearn.decomposition import PCA\nfrom sklearn.cross_validation import cross_val_score as cv_score\n\ndef plot_scores(d_scores):\n n_components = np.arange(1,d_scores.size+1)\n plt.plot(n_components, d_scores, 'b', label='PCA scores')\n plt.xlim(n_components[0], n_components[-1])\n plt.xlabel('n components')\n plt.ylabel('cv scores')\n plt.legend(loc='lower right')\n plt.show()\n \ndef write_answer_1(optimal_d):\n with open(\"pca_answer1.txt\", \"w\") as fout:\n fout.write(str(optimal_d))\n \ndata = pd.read_csv('data_task1.csv')\n\n# place your code here\npass\n\ndata.head()\n\ndata.isnull().any().any()\n\nd_scores = []\n\nfor d in xrange(1,len(data.columns)+1):\n model = PCA(n_components = d,svd_solver='full')\n scores = cv_score(model, data)\n d_scores = np.append(d_scores, scores.mean())\n \nplot_scores(d_scores)\n\nans1 = np.argmax(d_scores,axis = 0) + 1 # отсчет с 0\nprint ans1\nwrite_answer_1(ans1)", "Вариационный взгляд на модель\nМы знаем, что каждой главной компоненте соответствует описываемая ей дисперсия данных (дисперсия данных при проекции на эту компоненту). Она численно равна значению диагональных элементов матрицы $\\Lambda$, получаемой из спектрального разложения матрицы ковариации данных (смотри теорию выше).\nИсходя из этого, мы можем отсортировать дисперсию данных вдоль этих компонент по убыванию, и уменьшить размерность данных, отбросив $q$ итоговых главных компонент, имеющих наименьшую дисперсию.\nДелать это можно двумя разными способами. Например, если вы вдальнейшем обучаете на данных с уменьшенной размерностью модель классификации или регрессии, то можно запустить итерационный процесс: удалять компоненты с наименьшей дисперсией по одной, пока качество итоговой модели не станет значительно хуже.\nБолее общий способ отбора признаков заключается в том, что вы можете посмотреть на разности в дисперсиях в отсортированном ряде $\\lambda_{(1)} > \\lambda_{(2)} > \\dots > \\lambda_{(D)}$: $\\lambda_{(1)}-\\lambda_{(2)}, \\dots, \\lambda_{(D-1)} - \\lambda_{(D)}$, и удалить те компоненты, на которых разность будет наибольшей. Именно этим методом вам и предлагается воспользоваться для тестового набора данных.\nЗадание 2. Ручное уменьшение размерности признаков посредством анализа дисперсии данных вдоль главных компонент\nРассмотрим ещё один набор данных размерности $D$, чья реальная размерность значительно меньше наблюдаемой (назовём её также $d$). От вас требуется:\n\nПостроить модель PCA с $D$ главными компонентами по этим данным.\nСпроецировать данные на главные компоненты.\nОценить их дисперсию вдоль главных компонент.\nОтсортировать дисперсии в порядке убывания и получить их попарные разности: $\\lambda_{(i-1)} - \\lambda_{(i)}$.\nНайти разность с наибольшим значением и получить по ней оценку на эффективную размерность данных $\\hat{d}$.\nПостроить график дисперсий и убедиться, что полученная оценка на $\\hat{d}{opt}$ действительно имеет смысл, после этого внести полученное значение $\\hat{d}{opt}$ в файл ответа.\n\nДля построения модели PCA используйте функцию:\nmodel.fit(data)\n\nДля трансформации данных используйте метод:\nmodel.transform(data)\n\nОценку дисперсий на трансформированных данных от вас потребуется реализовать вручную. Для построения графиков можно воспользоваться функцией\nplot_variances(d_variances)\n\nкоторой следует передать на вход отсортированный по убыванию вектор дисперсий вдоль компонент.", "from sklearn.decomposition import PCA\nfrom sklearn.cross_validation import cross_val_score as cv_score\n\ndef plot_variances(d_variances):\n n_components = np.arange(1,d_variances.size+1)\n plt.plot(n_components, d_variances, 'b', label='Component variances')\n plt.xlim(n_components[0], n_components[-1])\n plt.xlabel('n components')\n plt.ylabel('variance')\n plt.legend(loc='upper right')\n plt.show()\n \ndef write_answer_2(optimal_d):\n with open(\"pca_answer2.txt\", \"w\") as fout:\n fout.write(str(optimal_d))\n \ndata = pd.read_csv('data_task2.csv')\n\n# place your code here\npass\n\nmodel = PCA(n_components=len(data.columns)) #1\nmodel.fit(data) #2\n# Доля информации (доля от общей дисперсии)\nd_variances = model.explained_variance_ratio_ #3\nplot_variances(d_variances)\n\nsort_d_variances = np.sort(d_variances) #4\ndifference_d_variances = []\n#5\nfor i in xrange(1,len(sort_d_variances)):\n diff = sort_d_variances[i] - sort_d_variances[i-1]\n difference_d_variances = np.append(difference_d_variances,diff)\n# 6 \nplot_variances(difference_d_variances)\n\n# -1, для задаия одинаковой размерности\nans2 = len(data.columns) - 1 - np.argmax(difference_d_variances)\nprint ans2\nwrite_answer_2(ans2)", "Интерпретация главных компонент\nВ качестве главных компонент мы получаем линейные комбинации исходных призанков, поэтому резонно возникает вопрос об их интерпретации.\nДля этого существует несколько подходов, мы рассмотрим два:\n- рассчитать взаимосвязи главных компонент с исходными признаками\n- рассчитать вклады каждого конкретного наблюдения в главные компоненты\nПервый способ подходит в том случае, когда все объекты из набора данных не несут для нас никакой семантической информации, которая уже не запечатлена в наборе признаков.\nВторой способ подходит для случая, когда данные имеют более сложную структуру. Например, лица для человека несут больший семантический смысл, чем вектор значений пикселей, которые анализирует PCA.\nРассмотрим подробнее способ 1: он заключается в подсчёте коэффициентов корреляций между исходными признаками и набором главных компонент.\nТак как метод главных компонент является линейным, то предлагается для анализа использовать корреляцию Пирсона, выборочный аналог которой имеет следующую формулу:\n$$r_{jk} = \\frac{\\sum_{i=1}^N (x_{ij} - \\bar{x}j) (y{ik} - \\bar{y}k)}{\\sqrt{\\sum{i=1}^N (x_{ij} - \\bar{x}j)^2 \\sum{i=1}^N (y_{ik} - \\bar{y}_k)^2}} $$\nгде:\n- $\\bar{x}_j$ -- среднее значение j-го признака,\n- $\\bar{y}_k$ -- среднее значение проекции на k-ю главную компоненту.\nКорреляция Пирсона является мерой линейной зависимости. Она равна 0 в случае, когда величины независимы, и $\\pm 1$, если они линейно зависимы. Исходя из степени корреляции новой компоненты с исходными признаками, можно строить её семантическую интерпретацию, т.к. смысл исходных признаков мы знаем.\nЗадание 3. Анализ главных компонент при помощи корреляций с исходными признаками.\n\nОбучите метод главных компонент на датасете iris, получите преобразованные данные.\nПосчитайте корреляции исходных признаков с их проекциями на первые две главные компоненты.\nДля каждого признака найдите компоненту (из двух построенных), с которой он коррелирует больше всего.\nНа основании п.3 сгруппируйте признаки по компонентам. Составьте два списка: список номеров признаков, которые сильнее коррелируют с первой компонентой, и такой же список для второй. Нумерацию начинать с единицы. Передайте оба списка функции write_answer_3.\n\nНабор данных состоит из 4 признаков, посчитанных для 150 ирисов. Каждый из них принадлежит одному из трёх видов. Визуализацию проекции данного датасета на две компоненты, которые описывают наибольшую дисперсию данных, можно получить при помощи функции\nplot_iris(transformed_data, target, target_names)\n\nна вход которой требуется передать данные, преобразованные при помощи PCA, а также информацию о классах. Цвет точек отвечает одному из трёх видов ириса.\nДля того чтобы получить имена исходных признаков, используйте следующий список:\niris.feature_names\n\nПри подсчёте корреляций не забудьте центрировать признаки и проекции на главные компоненты (вычитать из них среднее).", "from sklearn import datasets\n\ndef plot_iris(transformed_data, target, target_names):\n plt.figure()\n for c, i, target_name in zip(\"rgb\", [0, 1, 2], target_names):\n plt.scatter(transformed_data[target == i, 0],\n transformed_data[target == i, 1], c=c, label=target_name)\n plt.legend()\n plt.show()\n \ndef write_answer_3(list_pc1, list_pc2):\n with open(\"pca_answer3.txt\", \"w\") as fout:\n fout.write(\" \".join([str(num) for num in list_pc1]))\n fout.write(\" \")\n fout.write(\" \".join([str(num) for num in list_pc2]))\n\n# загрузим датасет iris\niris = datasets.load_iris()\ndata = iris.data\ntarget = iris.target\ntarget_names = iris.target_names\n\n# place your code here\n\nmodel = PCA(n_components=data.shape[1]) #1\nmodel.fit(data)\ndata_new = model.transform(data)\n\nplot_iris(data_new, target, target_names)\n\n# 3 pearson\ndef corr_pearson(x,y):\n x_mean = x.mean()\n y_mean = y.mean()\n return np.dot((x - x_mean),(y-y_mean)) / \\\n np.sqrt(np.dot((x - x_mean),(x - x_mean)) * np.dot((y - y_mean),(y - y_mean)))\n\nr = np.zeros([data.shape[1],data_new.shape[1]])\n\nfor i in range(data.shape[1]):\n for j in range(data_new.shape[1]):\n r[i, j] = abs(corr_pearson(data[:, i],data_new[:, j]))\n \nr\n\n# 4\nres_1_corr = []\nres_2_corr = []\nfor i in range(r.shape[0]):\n if r[i, 0] >= r[i, 1]:\n res_1_corr.append(i+1)\n else:\n res_2_corr.append(i+1)\n \n \nprint res_1_corr,' ',res_2_corr\nwrite_answer_3(res_1_corr, res_2_corr)", "Интерпретация главных компонент с использованием данных\nРассмотрим теперь величину, которую можно проинтерпретировать, как квадрат косинуса угла между объектом выборки и главной компонентой:\n$$ cos^2_{ik} = \\frac{f_{ik}^2}{\\sum_{\\ell=1}^d f_{i\\ell}^2} $$\nгде\n- i -- номер объекта\n- k -- номер главной компоненты\n- $f_{ik}$ -- модуль центрированной проекции объекта на компоненту\nОчевидно, что\n$$ \\sum_{k=1}^d cos^2_{ik} = 1 $$\nЭто значит, что для каждого объекта мы в виде данной величины получили веса, пропорциональные вкладу, которую вносит данный объект в дисперсию каждой компоненты. Чем больше вклад, тем более значим объект для описания конкретной главной компоненты.\nЗадание 4. Анализ главных компонент при помощи вкладов в их дисперсию отдельных объектов\n\nЗагрузите датасет лиц Olivetti Faces и обучите на нём модель RandomizedPCA (используется при большом количестве признаков и работает быстрее, чем обычный PCA). Получите проекции признаков на 10 первых главных компонент.\nПосчитайте для каждого объекта его относительный вклад в дисперсию каждой из 10 компонент, используя формулу из предыдущего раздела (d = 10).\n\nДля каждой компоненты найдите и визуализируйте лицо, которое вносит наибольший относительный вклад в неё. Для визуализации используйте функцию\nplt.imshow(image.reshape(image_shape))\n\n\n\nПередайте в функцию write_answer_4 список номеров лиц с наибольшим относительным вкладом в дисперсию каждой из компонент, список начинается с 0.", "from sklearn.datasets import fetch_olivetti_faces\nfrom sklearn.decomposition import RandomizedPCA\n\ndef write_answer_4(list_pc):\n with open(\"pca_answer4.txt\", \"w\") as fout:\n fout.write(\" \".join([str(num) for num in list_pc]))\n\ndata = fetch_olivetti_faces(shuffle=True, random_state=0).data\nimage_shape = (64, 64)\n\n# 1\npca = RandomizedPCA(n_components=10)\npca.fit(data)\npca_data_new = pca.transform(data)\n\n# 2\npca_means = []\nres_cos = np.zeros([data.shape[1],pca_data_new.shape[1]])\nfor i in range(pca_data_new.shape[1]):\n pca_means.append(pca_data_new[:,i].mean())\n\nfor i in range(pca_data_new.shape[0]):\n cos_denom = 0.\n for j in range(pca_data_new.shape[1]):\n cos_denom += (abs(pca_data_new[i,j]) - pca_means[j])**2\n \n for h in range(pca_data_new.shape[1]):\n res_cos[i,h] = (abs(pca_data_new[i,h]) - pca_means[h])**2 / cos_denom\n\nprint res_cos[0]\n\nres = []\nfor i in range(pca_data_new.shape[1]):\n max_cos = 0.\n ind_cos = 0\n for j in range(res_cos.shape[0]):\n if res_cos[j,i]>max_cos:\n max_cos = res_cos[j,i]\n ind_cos = j\n res.append(ind_cos)\nres\n\nwrite_answer_4(res)\n\nplt.imshow(data[res[0]].reshape(image_shape))", "Анализ основных недостатков метода главных компонент\nРассмотренные выше задачи являются, безусловно, модельными, потому что данные для них были сгенерированы в соответствии с предположениями метода главных компонент. На практике эти предположения, естественно, выполняются далеко не всегда. Рассмотрим типичные ошибки PCA, которые следует иметь в виду перед тем, как его применять.\nНаправления с максимальной дисперсией в данных неортогональны\nРассмотрим случай выборки, которая сгенерирована из двух вытянутых нормальных распределений:", "C1 = np.array([[10,0],[0,0.5]])\nphi = np.pi/3\nC2 = np.dot(C1, np.array([[np.cos(phi), np.sin(phi)],\n [-np.sin(phi),np.cos(phi)]]))\n\ndata = np.vstack([np.random.multivariate_normal(mu, C1, size=50),\n np.random.multivariate_normal(mu, C2, size=50)])\nplt.scatter(data[:,0], data[:,1])\n# построим истинные интересующие нас компоненты\nplt.plot(data[:,0], np.zeros(data[:,0].size), color=\"g\")\nplt.plot(data[:,0], 3**0.5*data[:,0], color=\"g\")\n# обучим модель pca и построим главные компоненты\nmodel = PCA(n_components=2)\nmodel.fit(data)\nplot_principal_components(data, model, scatter=False, legend=False)\nc_patch = mpatches.Patch(color='c', label='Principal components')\nplt.legend(handles=[g_patch, c_patch])\nplt.draw()", "В чём проблема, почему pca здесь работает плохо? Ответ прост: интересующие нас компоненты в данных коррелированны между собой (или неортогональны, в зависимости от того, какой терминологией пользоваться). Для поиска подобных преобразований требуются более сложные методы, которые уже выходят за рамки метода главных компонент.\nДля интересующихся: то, что можно применить непосредственно к выходу метода главных компонент, для получения подобных неортогональных преобразований, называется методами ротации. Почитать о них можно в связи с другим методом уменьшения размерности, который называется Factor Analysis (FA), но ничего не мешает их применять и к главным компонентам.\nИнтересное направление в данных не совпадает с направлением максимальной дисперсии\nРассмотрим пример, когда дисперсии не отражают интересующих нас направлений в данных:", "C = np.array([[0.5,0],[0,10]])\nmu1 = np.array([-2,0])\nmu2 = np.array([2,0])\n\ndata = np.vstack([np.random.multivariate_normal(mu1, C, size=50),\n np.random.multivariate_normal(mu2, C, size=50)])\nplt.scatter(data[:,0], data[:,1])\n# обучим модель pca и построим главные компоненты\nmodel = PCA(n_components=2)\nmodel.fit(data)\nplot_principal_components(data, model)\nplt.draw()", "Очевидно, что в данном случае метод главных компонент будет считать вертикальную компоненту более значимой для описания набора данных, чем горизонтальную. \nНо, например, в случае, когда данные из левого и правого кластера относятся к разным классам, для их линейной разделимости вертикальная компонента является шумовой. Несмотря на это, её метод главных компонент никогда шумовой не признает, и есть вероятность, что отбор признаков с его помощью выкинет из ваших данных значимые для решаемой вами задачи компоненты просто потому, что вдоль них значения имеют низкую дисперсию.\nСправляться с такими ситуациями могут некоторые другие методы уменьшения размерности данных, например, метод независимых компонент (Independent Component Analysis, ICA)." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
openfisca/combine-calculators
scripts/reform_no_revenu.ipynb
gpl-3.0
[ "Guillaume Chaslot (guillaume.chaslot@data.gouv.fr)\nTax Law Simplifier\nTax law is more than 200,000 lines long and opaque.\nWe believe that with this tool, we can make it less than 100 lines, transparent, and 99% similar to the existing.\nHow it works:\n\nDefine your concepts and budget\nAutomatic tuning helps you tune your reform to match the current legislation\nFrom the biggest discrepencies, improve your concepts\nRepeat until you reach a legislation that is as close as the current as you like, but much simpler\n\nBeta version:\nFor this test, we only take a population of people from all ages, who have 0 to 5 children, no salary, married, veuf, paces, divorces or celibataires, and simulate the aides sociales.", "from compare_simulators import CalculatorComparator\nfrom population_simulator import CerfaPopulationSimulator\nfrom utils import show_histogram\nfrom utils import percent_diff\nfrom utils import scatter_plot\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\n\n%matplotlib inline", "Load population data\nThe population data is simulated for now, see population_simulator.py for details. Just do it.", "comp = CalculatorComparator()\ncomp.load_results_from_json('1aj-1bj-f-2000')\n\nrevdisp = comp.get_variable_from_openfisca('revdisp')", "Extracts concepts of interest\nYou can add concepts here if you like. The input population is in the CERFA declaration des revenus format.", "population = []\noriginal_index = []\n\ndef cas_improbable(case):\n # It is highly unlikely to have enfants a charge above 64 so we remove those cases\n if (int(case['0DA']) <= 1950 or ('0DB' in case and int(case['0DA'] <= 1950))) and 'F' in case and int(case['F']) > 0:\n return True\n return False\n\nfor i in range(0, len(comp.testcases)):\n case = comp.testcases[i]\n if case.get('1AJ', 0) < 1 and case.get('1BJ', 0) < 1 and not cas_improbable(case):\n original_index.append(i)\n new_family = {}\n new_family['taxable_income'] = case.get('1AJ', 0)\n new_family['revdisp'] = revdisp[i]\n\n if 'F' in case:\n if case['F'] == 1:\n new_family['enfant_unique'] = 1\n if case['F'] >= 2:\n new_family['enfants_deux_ou_plus'] = 1\n if case['F'] > 2:\n new_family['nb_enfants_au_dessus_de_2'] = case['F'] - 2\n new_family['nb_enfants'] = case['F']\n\n if 'O' in case or 'M' in case:\n new_family['two_people'] = 1 \n\n one_declarant_above_24 = False\n both_declarant_parent_below_24 = 'F' in case\n\n if '0DA' in case:\n age_1 = 2014 - case['0DA']\n new_family['age-dec1'] = age_1\n# if age <= 24 and 'F' in case:\n# new_family['declarant parent <= 24 ans'] = 1\n if age_1 >= 24:\n one_declarant_above_24 = True\n both_declarant_parent_below_24 = False\n# new_family['declarant > 24 ans'] = 1\n if age_1 > 64:\n new_family['declarant > 64 ans'] = 1\n new_family['declarants > 64 ans'] = 1\n\n age_2 = 0 \n if '0DB' in case:\n age_2 = 2014 - case['0DB']\n new_family['age-dec2'] = age_2\n if age_2 >= 24:\n one_declarant_above_24 = True\n both_declarant_parent_below_24 = False\n\n# new_family['codeclarant > 24 ans'] = 1\n if age_2 > 64:\n new_family['declarants > 64 ans'] = new_family.get('declarants > 64 ans', 0) + 1\n new_family['codeclarant > 64 ans'] = 1\n\n if age_1 >= 24 and age_2 >= 24:\n new_family['both > 24 ans'] = 1\n\n if both_declarant_parent_below_24:\n new_family['both_declarant_parent_below_24'] = True\n \n if one_declarant_above_24:\n new_family['one_declarant_above_24'] = True\n\n if 'F' in case and ('C' in case or 'D' in case or 'V' in case):\n new_family['parent_isolé'] = True\n \n if 'F' in case and ('M' in case or 'O' in case):\n new_family['parents_en_couple'] = True\n\n population.append(new_family)\n\nprint 'Number of family: ' + repr(len(population))\n\ntotal_people = 0\nfor family in comp.testcases:\n total_people += 1\n if '0DB' in family and family['0DB'] == 1:\n total_people += 1\n if 'F' in family:\n total_people += family['F']\n\n# We assume that there are 2000000 people with RSA\nechantillon = float(total_people) / 2000000\nprint 'Echantillon of ' + repr(total_people) + ' people, in percent of french population for similar revenu: ' + repr(echantillon)\n", "Plots Revenu disponible before reform", "revdisp_when_no_salary = list(family['revdisp'] for family in population)\n\nshow_histogram(revdisp_when_no_salary, 'Distribution of revenu disponible')", "Define your reform here !\nGive all the concepts you care about either as:\n\n\n<strong> boolean parameters</strong> add value if present (e.g., one_declarant_above_24)\n\n\n<strong> linear parameters </strong> add value proportional to the parameters (e.g., nb_enfants)\n\n\n<strong> segmentation parameters </strong> are segmented by the algorithm into several boolean parameters (e.g., age_declarant_principal might be broken down automatically by the algo into 0-25 / 25-64 / 65+)", "from reformators import Excalibur\n\nsword = Excalibur(population, 'revdisp', 'taxable_income', echantillon=echantillon)\nsimulated_reform = sword.suggest_reform(\n boolean_parameters = ['one_declarant_above_24',\n 'codeclarant > 64 ans',\n 'declarant > 64 ans',\n 'both_declarant_parent_below_24',\n 'two_people'],\n linear_parameters = ['nb_enfants'],\n barem_parameters = [],\n save=0)", "Plots revenu disponible after reform", "xmin = 4900\nxmax = 19000\nnb_buckets = 25\n\nbins = np.linspace(xmin, xmax, nb_buckets)\n\nplt.hist(revdisp_when_no_salary, bins, alpha=0.5, label='current')\nplt.hist(simulated_reform, bins, alpha=0.5, label='reform')\nplt.legend(loc='upper right')\nplt.show()", "Distribution of the changes in revenu in euros", "difference = list(simulated_reform[i] - revdisp_when_no_salary[i] for i in range(len(simulated_reform)))\n\nshow_histogram(difference, 'Changes in revenu')", "Distribution of the change in revenu in percentage", "percentage_difference = list(100 * percent_diff(simulated_reform[i], revdisp_when_no_salary[i]) for i in range(len(simulated_reform)))\n\nshow_histogram(percentage_difference, 'Changes in revenu')", "Change as a function of the number of children", "nb_children = list((population[i].get('nb_enfants', 0) for i in range(len(population)))) \n\nscatter_plot(nb_children, difference, 'Age declarant 1', 'Difference reform - current', alpha=0.01)", "Change as a function of the age of declarant 1", "age_dec1 = list((population[i].get('age-dec1', 0) for i in range(len(population)))) \n\nscatter_plot(age_dec1, difference, 'Age declarant 1', 'Difference reform - current', alpha=0.1)", "Cases that\nThis is the heart of this tool: by seeing the worse cases, you can discover when the current legislation is smarter than yours, and improve it further.", "original_population = []\noriginal_revdisp = []\nfor i in range(0, len(population)):\n original_population.append(comp.testcases[original_index[i]])\n original_revdisp.append(revdisp[original_index[i]])\n\norder = sorted(range(len(simulated_reform)), key=lambda k: -abs(simulated_reform[k] - revdisp_when_no_salary[k]))\n\nfor i in order:\n print 'Case ' + repr(original_population[i]) + ' Current =' + repr(int(revdisp_when_no_salary[i])) + ' Reform = ' + repr(int(simulated_reform[i]))", "Best compromise simplicity / matching current legislation:", "sword = Excalibur(population,'revdisp', 'taxable_income', echantillon=echantillon)\nres = sword.suggest_reform(boolean_parameters=[\n 'enfant_unique',\n 'enfants_deux_ou_plus',\n 'nb_enfants_au_dessus_de_2',\n 'one_declarant_above_24',\n 'declarant > 64 ans',\n 'codeclarant > 64 ans',\n 'two_people',\n ],\n save=0)\n\noriginal_population = []\noriginal_revdisp = []\nfor i in range(0, len(population)):\n original_population.append(comp.testcases[original_index[i]])\n original_revdisp.append(revdisp[original_index[i]])\n\norder = sorted(range(len(simulated_reform)), key=lambda k: -abs(simulated_reform[k] - revdisp_when_no_salary[k]))\n\nfor i in order:\n print 'Case ' + repr(original_population[i]) + ' Current =' + repr(int(revdisp_when_no_salary[i])) + ' Reform = ' + repr(int(simulated_reform[i]))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
NICTA/revrand
demos/regression_demo.ipynb
apache-2.0
[ "Example of how to use revrand for regression\nIn this notebook we demonstrate revrand's standard linear model (SLM) and generalised linear model (GLM) fitting a random draw from a GP. We also compare the perfomance of these algorithms to a full GP.", "%matplotlib inline\n\nimport matplotlib.pyplot as pl\npl.style.use('ggplot')\nimport numpy as np\nfrom scipy.stats import gamma\n\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import WhiteKernel, RBF\n\nfrom revrand import StandardLinearModel, GeneralizedLinearModel, likelihoods, Parameter, Positive\nfrom revrand.metrics import msll, smse\nfrom revrand.utils.datasets import gen_gausprocess_se\nfrom revrand import basis_functions as bs\nfrom revrand.optimize import AdaDelta, Adam\n", "Dataset settings and creation", "N = 150 # Number of training points\nNs = 250 # Number of test points\nlenscale_true = 1.2\nnoise_true = 0.1\n\nXtrain, ytrain, Xtest, ftest = \\\n gen_gausprocess_se(N, Ns, lenscale=lenscale_true, noise=noise_true)\nytest = ftest + np.random.randn(Ns) * noise_true\n ", "Algorithm Settings", "# Common settings\nnbases = 20 # Number of unique random bases to use for approximating a kernel\nlenscale = gamma(a=1, scale=1) # Initial value for the lengthscale\nvar = gamma(a=0.1, scale=2) # Initial value for target noise\nreg = gamma(a=1, scale=1) # Initial weight prior\n\n# GLM specific settings\nmaxiter = 10000\nbatch_size = 10\nupdater = Adam()\n\n# Setup random basis functions\nbase = bs.RandomRBF(Xdim=1,\n nbases=nbases,\n lenscale=Parameter(lenscale, Positive()),\n regularizer=Parameter(reg, Positive())\n )\n", "Parameter learning", "# SLM\nslm = StandardLinearModel(base, var=Parameter(var, Positive()),)\nslm.fit(Xtrain, ytrain)\n\n# GLM\nllhood = likelihoods.Gaussian(var=Parameter(var, Positive()))\nglm = GeneralizedLinearModel(llhood,\n base,\n batch_size=batch_size,\n maxiter=maxiter,\n updater=updater\n )\nglm.fit(Xtrain, ytrain)\n\n# GP\nkern = WhiteKernel(noise_level=np.sqrt(var.mean())) + 1**2 * RBF(length_scale=lenscale.mean())\ngp = GaussianProcessRegressor(kernel=kern)\ngp.fit(Xtrain, ytrain)\n\n", "Model Querying", "# SLM\nEy_e, Vy_e = slm.predict_moments(Xtest)\nSy_e = np.sqrt(Vy_e)\n \n# GLM\nEy_g, Vf_g = glm.predict_moments(Xtest)\nVy_g = Vf_g + glm.like_hypers_\nSy_g = np.sqrt(Vy_g)\n\n# GP\nEy_gp, Sy_gp = gp.predict(Xtest, return_std=True) \nVy_gp = Sy_gp**2\n", "Score the models", "LL_s = msll(ytest, Ey_e, Vy_e, ytrain)\nLL_gp = msll(ytest, Ey_gp, Vy_gp, ytrain)\nLL_g = msll(ytest, Ey_g, Vy_g, ytrain)\n\nsmse_s = smse(ytest, Ey_e)\nsmse_gp = smse(ytest, Ey_gp)\nsmse_glm = smse(ytest, Ey_g)\n\nprint(\"SLM: msll = {}, smse = {}, noise: {}, hypers: {}\"\n .format(LL_s, smse_s, np.sqrt(slm.var_), slm.hypers_))\nprint(\"GLM: msll = {}, smse = {}, noise: {}, hypers: {}\"\n .format(LL_g, smse_glm, np.sqrt(glm.like_hypers_),\n glm.basis_hypers_))\nprint(\"GP: msll = {}, smse = {}, noise: {}, hypers: {}\"\n .format(LL_gp, smse_gp, gp.kernel_.k1.noise_level,\n gp.kernel_.k2.k2.length_scale))\n", "Plot predictions", "Xpl_t = Xtrain.flatten()\nXpl_s = Xtest.flatten()\n\n# Training/Truth\npl.figure(figsize=(15, 10))\npl.plot(Xpl_t, ytrain, 'k.', label='Training')\npl.plot(Xpl_s, ftest, 'k-', label='Truth')\n\n# ELBO Regressor\npl.plot(Xpl_s, Ey_e, 'g-', label='Bayesian linear regression')\npl.fill_between(Xpl_s, Ey_e - 2 * Sy_e, Ey_e + 2 * Sy_e, facecolor='none',\n edgecolor='g', linestyle='--', label=None)\n\n# GP\npl.plot(Xpl_s, Ey_gp, 'b-', label='GP')\npl.fill_between(Xpl_s, Ey_gp - 2 * Sy_gp, Ey_gp + 2 * Sy_gp,\n facecolor='none', edgecolor='b', linestyle='--',\n label=None)\n\n# GLM Regressor\npl.plot(Xpl_s, Ey_g, 'm-', label='GLM')\npl.fill_between(Xpl_s, Ey_g - 2 * Sy_g, Ey_g + 2 * Sy_g, facecolor='none',\n edgecolor='m', linestyle='--', label=None)\n\npl.legend()\n\npl.grid(True)\npl.ylabel('y')\npl.xlabel('x')\n\npl.show()\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Caranarq/01_Dmine
Datasets/CNGMD/2015.ipynb
gpl-3.0
[ "Limpieza de dataset del Censo Nacional de Gobiernos Municipales y Delegacionales 2015\n1. Introduccion\nIndicadores que salen de este dataset:\nID |Descripción\n---|:----------\nP0306|Programas de modernización catastral \nP0307|Disposiciones normativas sustantivas en materia de desarrollo urbano u ordenamiento territorial \nP1001|Promedio diario de RSU recolectados\nP1003|Número de municipios con disponibilidad de servicios relacionados con los RSU\nP1006|Número de municipios con aplicación de programas locales orientados a la GIRSU\nP1009|Número de municipios con estudios de generación de RSU\n2. Descarga de datos", "descripciones = {\n'P0306' : 'Programas de modernización catastral',\n'P0307' : 'Disposiciones normativas sustantivas en materia de desarrollo urbano u ordenamiento territorial',\n'P1001' : 'Promedio diario de RSU recolectados',\n'P1003' : 'Número de municipios con disponibilidad de servicios relacionados con los RSU',\n'P1006' : 'Número de municipios con aplicación de programas locales orientados a la GIRSU',\n'P1009' : 'Número de municipios con estudios de generación de RSU',\n}\n\n# Librerias utilizadas\nimport pandas as pd\nimport sys\nimport urllib\nimport os\nimport zipfile\nimport csv\nimport pprint\nimport re\n\n# Configuracion del sistema\nprint('Python {} on {}'.format(sys.version, sys.platform))\nprint('Pandas version: {}'.format(pd.__version__))\nimport platform; print('Running on {} {}'.format(platform.system(), platform.release()))\n\nroot = r'http://www.beta.inegi.org.mx/contenidos/proyectos/censosgobierno/municipal/cngmd/2015/datosabiertos/'\n\nlinks = {\n 'P0306' : r'm1/Programa_modernizacion_catastral_cngmd2015_csv.zip', # Programas de modernización catastral\n 'P0307' : r'm2/Marco_regulatorio_cngmd2015_csv.zip', # Disposiciones normativas sustantivas en materia de desarrollo urbano u ordenamiento territorial\n 'P1001' : r'm6/Rec_RSU_cngmd2015_csv.zip', # Promedio diario de RSU recolectados\n 'P1006' : r'm6/Prog_gest_int_RSU_cngmd2015_csv.zip', # Número de municipios con aplicación de programas locales orientados a la GIRSU\n 'P1009' : r'm6/Est_gen_comp_RSU_cngmd2015_csv.zip', # Número de municipios con estudios de generación de RSU\n}", "En el caso del parámetro P1003, los datos se extraen desde 3 archivos. Estos archivos son una base de datos para cada servicio relacionado con los RSU, Utilizando nuevamente el archivo que utiliza P1001 y dos adicionales:", "P1003links = { # Número de municipios con disponibilidad de servicios relacionados con los RSU\n 1 : r'm6/Rec_RSU_cngmd2015_csv.zip',\n 2 : r'm6/Trat_RSU_cngmd2015_csv.zip',\n 3 : r'm6/Disp_final_RSU_cngmd2015_csv.zip'\n}\n\n# Destino local\ndestino = r'D:\\PCCS\\00_RawData\\01_CSV\\cngmd\\2015'\n\n# Descarga de zips para parametros que se encuentran en un solo archivo\nm_archivos = {} # Diccionario para guardar memoria de descarga\nfor parametro, fuente in links.items():\n file = fuente.split('/')[1]\n remote_path = root+fuente\n local_path = destino + r'\\{}'.format(file)\n if os.path.isfile(local_path):\n print('Ya existe el archivo: {}'.format(local_path))\n m_archivos[parametro] = local_path\n else:\n print('Descargando {} ... ... ... ... ... '.format(local_path))\n urllib.request.urlretrieve(remote_path, local_path) #\n m_archivos[parametro] = local_path\n print('se descargó {}'.format(local_path))\n \n\n# Descarga de zips para parametro P1003\nm_archivos2 = {} # Diccionario para guardar memoria de descarga\nfor parametro, fuente in P1003links.items():\n file = fuente.split('/')[1]\n remote_path = root+fuente\n local_path = destino + r'\\{}'.format(file)\n if os.path.isfile(local_path):\n print('Ya existe el archivo: {}'.format(local_path))\n m_archivos2[parametro] = local_path\n else:\n print('Descargando {} ... ... ... ... ... '.format(local_path))\n urllib.request.urlretrieve(remote_path, local_path) #\n m_archivos2[parametro] = local_path\n print('se descargó {}'.format(local_path))\n \n\n# Descompresión de archivos de m_parametro\nunzipped = {}\nfor parametro, comprimido in m_archivos.items():\n target = destino + '\\\\' + parametro\n if os.path.isfile(target):\n print('Ya existe el archivo: {}'.format(target))\n unzipped[parametro] = target\n else:\n print('Descomprimiendo {} ... ... ... ... ... '.format(target))\n descomprimir = zipfile.ZipFile(comprimido, 'r')\n descomprimir.extractall(target)\n descomprimir.close\n unzipped[parametro] = target\n \n\n# Descompresión de archivos de m_parametro2\nunzipped2 = {}\nfor parametro, comprimido in m_archivos2.items():\n target = destino + '\\\\P1003\\\\' + str(parametro)\n if os.path.isfile(target):\n print('Ya existe el archivo: {}'.format(target))\n unzipped2[parametro] = target\n else:\n print('Descomprimiendo {} ... ... ... ... ... '.format(target))\n descomprimir = zipfile.ZipFile(comprimido, 'r')\n descomprimir.extractall(target)\n descomprimir.close\n unzipped2[parametro] = target\n \n\n# Localizacion de archivos de cada parametro\n# Cada parametro tiene rutas y estructuras distintas. En este paso localizo manualmente \n# cada tabla y estructura desde los comprimidos. cada valor del diccionario contiene la ruta hacia\n# donde se encuentran las tablas.\n\ncd = r'\\conjunto_de_datos'\n\ntablas = {\n 'P0306' : destino + r'\\P0306' + cd,\n 'P0307' : destino + r'\\P0307\\marco_regulatorio_cngmd2015_dbf' + cd,\n 'P1001' : destino + r'\\P1001\\Rec_RSU_cngmd2015_csv' + cd,\n 'P1006' : destino + r'\\P1006\\Prog_gest_int_RSU_cngmd2015_csv' + cd,\n 'P1009' : destino + r'\\P1009\\Est_gen_comp_RSU_cngmd2015_csv' + cd,\n}\n\n# Tablas para P1003\ndestino2 = destino + r'\\P1003'\ntablasP1003 = {\n '1' : destino2 + r'\\1' + r'\\Rec_RSU_cngmd2015_csv' + cd,\n '2' : destino2 + r'\\2' + r'\\Trat_RSU_cngmd2015_csv' + cd,\n '3' : destino2 + r'\\3' + r'\\Disp_final_RSU_cngmd2015_csv' + cd,\n}", "Construccion de datasets estándar\nLos datasets para cada parámetro surgen de diferentes preguntas del censo por lo que sus estructuras son muy desemejantes, razon por la cual:\n(1) : Cada parámetro tiene que procesarse individualmente.\n(2) : Es conveniente extraer de manera individual los metadatos de cada parámetro. Con este propósito, el siguiente script sirve para extraer los metadatos de cada dataset:", "# Script para extraer metadatos:\ndef getmeta(path, charcoding): # Path es el contenido en las variables 'tablas' para cada parametro\n cat = r'\\catalogos'\n dic = r'\\diccionario_de_datos'\n metadict = {}\n metapath = path.replace(cd, cat)\n metafiles = os.listdir(metapath)\n dicdict = {}\n dicpath = path.replace(cd, dic)\n dicfiles = os.listdir(dicpath)\n for file in metafiles:\n variable = file.replace('.csv', '')\n if file.endswith('.csv'):\n csvpath = metapath+'\\\\'+file\n metadf = pd.DataFrame.from_csv(csvpath, parse_dates=False)\n try:\n metadf.index = metadf.index.map(str.lower)\n except:\n pass\n metadict[variable] = metadf\n else:\n dothis = input('El archivo {} no es csv, que deseas hacer? [DD]etener [CC]ontinuar'.format(file))\n dothis = dothis.lower()\n if dothis == 'dd':\n raise GeneratorExit('Script detenido por el usuario')\n elif dothis == 'cc':\n continue\n else:\n raise KeyError('No entendi la instruccion {}'.format(dothis))\n for file in dicfiles:\n if file.endswith('.csv'):\n filename = file.replace('.csv', '')\n csvpath = dicpath+'\\\\'+file\n try:\n dicdf = pd.read_csv(csvpath, skiprows=2, usecols=[1, 2], index_col=0, parse_dates=False).dropna()\n except:\n dicdf = pd.read_csv(csvpath, skiprows=2, usecols=[1, 2], index_col=0, parse_dates=False, encoding = charcoding).dropna()\n dicdf.index = dicdf.index.map(str.lower)\n dicdict[filename] = dicdf\n \n return dicdict, metadict\n\n# Funcion para revisar metadatos\ndef queryvar(var, tablelen=10, colprint = 125, dictio = p0306dic, metadat = p0306meta):\n pdefault = pd.get_option('display.max_colwidth')\n pd.set_option('display.max_colwidth', colprint) # Expande el espacio para imprimir columnas\n print('\"{}\" :\\n{}'.format(var, dictio.loc[var][0].upper()))\n if len(metadat[var]) > tablelen:\n print('{}\\nImprimiendo {} de {} registros'.format('-'*40,tablelen, len(metadat[var])))\n print(metadat[var].head(tablelen))\n pd.set_option('display.max_colwidth', pdefault) # Regresa la variable de impresion de columnas a su default", "P0306 - Programas de modernización catastral\nExistencia de programas de modernización catastral en los municipios.", "# Creacion de diccionarios con metadatos para cada variable de P0306:\npar = 'P0306'\np0306dic, p0306meta = getmeta(tablas['P0306'], 'mbcs')\nprint('Se extrajeron metadatos para las siguientes variables de {}:'.format(par))\nfor key in p0306meta.keys(): print(key)\nprint('\\nDiccionarios disponibles para {}:'.format(par))\nfor key in p0306dic.keys(): print(key)\n\n# Para P0306, solo existe una tabla de descripciones por lo que se convierte a un dataframe unico para poder indexar\np0306dic = p0306dic['diccionario_de_datos_programa_modernizacion_catastral_cngmd2015_dbf']\np0306dic\n\nlist(p0306dic.index)\n\nqueryvar('acc_modr')\n\nprint('** Descripciones de variables **\\n'.upper())\nfor i in p0306dic.index:\n queryvar(i)\n print('\\n')\n\n# Carga de datos\nP0306f = tablas['P0306']+'\\\\'+os.listdir(tablas['P0306'])[0]\ndf = pd.read_csv(P0306f, dtype={'ubic_geo':'str'})\ndf = df.rename(columns = {'ubic_geo':'CVE_MUN'})\ndf.set_index('CVE_MUN', inplace = True)\nP0306 = df.where((pd.notnull(df)), None)", "El archivo está estructurado de manera inconveniente, teniendo un renglón para cada variable. Lo conveniente es que cada renglón contenga toda la información de un solo municipio.", "# subset para pruebas\ntest = P0306.loc['15045']\ntest", "Para corregirlo, primero hacemos dataframes separados para cada variable. Afortunadamente, la columna 'Estructu' sirve para agrupar estructuralmente el dataframe", "queryvar('estructu')\n\n# ¿El municipio cuenta con un programa de modernización catastral?\nP0306_00 = P0306[P0306['estructu'] == 240500]['prog_mod'].astype('int')\nprint(P0306_00.head(10))\nprint('-'*50)\nqueryvar('prog_mod')\n\n# ¿En que periodo se realizaron las acciones del programa de modernización catastral?\nP0306_03 = P0306[P0306['estructu'] == 240503]['perio_ac'].astype('int')\nprint(P0306_03.head(10))\nprint('-'*50)\nqueryvar('perio_ac')\n\n# ¿Qué acciones se realizaron?\nP0306_02 = P0306[P0306['estructu'] == 240502]['acc_modr'].astype('int').groupby('CVE_MUN').apply(list)\nprint(P0306_02.head(10))\nqueryvar('acc_modr')\n\n# ¿Cuantas acciones se realizaron?\nP0306_02b = P0306_02.apply(len).rename('n_acc_modr')\nP0306_02b.head(10)\n\nqueryvar('inst_enc')\n\n# ¿Que instituciones se han involucrado en la modernizacion catastral, y de qué manera?\nP0306_01t = P0306[P0306['estructu'] == 240501][['inst_enc', 'tip_inst']] # tipo de apoyo e institucion \nP0306_01t.head()", "Se reemplazarán numeros por descripciones en tip_inst:", "queryvar('tip_inst')\n\n# Institucion involucrada\ninstit = {\n 1:'Administración pública de la entidad federativa',\n 2:'BANOBRAS',\n 3:'SEDATU',\n 4:'OTRA INSTITUCION'\n}\n\nP0306_01t['tip_inst'] = P0306_01t['tip_inst'].replace(instit)\nP0306_01t.head()", "Y se separará la columna 'inst_enc' en 2:", "queryvar('inst_enc')\n\nP0306_01t1 = P0306_01t[P0306_01t['inst_enc'] == 1]['tip_inst'].groupby('CVE_MUN').apply(list).rename('i_coord_ejecuta')\nP0306_01t2 = P0306_01t[P0306_01t['inst_enc'] == 2]['tip_inst'].groupby('CVE_MUN').apply(list).rename('i_otorga_apoyos')\n\nP0306_01t1.head()\n\nP0306_01t2.head()", "Finalmente, se unirán todas las series en un solo dataframe", "# Convertir series en Dataframes\nP0306_00 = P0306_00.to_frame()\nP0306_03 = P0306_03.to_frame()\nP0306_02 = P0306_02.to_frame()\nP0306_02b = P0306_02b.to_frame()\nP0306_01t1 = P0306_01t1.to_frame()\nP0306_01t2 = P0306_01t2.to_frame()\n\n# Unir dataframes\nP0306 = P0306_00.join(P0306_03).join(P0306_02).join(P0306_02b).join(P0306_01t1).join(P0306_01t2)\nP0306 = P0306.where((pd.notnull(P0306)), None)\n\nP0306.head()", "Metadatos para P0306", "P0306meta = {\n 'Nombre del Dataset' : 'Censo Nacional de Gobiernos Municipales y Delegacionales 2015',\n 'Descripcion del dataset' : 'Censo Nacional de Gobiernos Municipales y Delegacionales 2015',\n 'Disponibilidad Temporal' : '2015',\n 'Periodo de actualizacion' : 'Bienal',\n 'Nivel de Desagregacion' : 'Municipal',\n 'Notas' : 's/n',\n 'Fuente' : 'INEGI',\n 'URL_Fuente' : 'http://www.beta.inegi.org.mx/contenidos/proyectos/censosgobierno/municipal/cngmd/2015/datosabiertos/',\n 'Dataset base' : '\"P0306.xlsx\" disponible en \\nhttps://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/CNGMD/2015',\n}\n\nP0306meta = pd.DataFrame.from_dict(P0306meta, orient='index', dtype=None)\nP0306meta.columns = ['Descripcion']\nP0306meta = P0306meta.rename_axis('Metadato')\nP0306meta\n\nlist(P0306meta)\n\nP0306.head()", "EXPORTAR A EXCEL", "file = r'D:\\PCCS\\01_Dmine\\Datasets\\CNGMD\\P0306.xlsx'\nwriter = pd.ExcelWriter(file)\nP0306.to_excel(writer, sheet_name = 'P0306')\nP0306meta.to_excel(writer, sheet_name ='METADATOS')\nwriter.save()", "P0307: Disposiciones normativas sustantivas en materia de desarrollo urbano u ordenamiento territorial\nEs necesario cambiar el encoding para leer los archivos de este parametro", "# Redefinición de la función para revisar metadatos, porque los datos de la carpeta 'catálogos' de P0307 \n# no coinciden con los titulos de las columnas en la carpeta 'Conjunto de datos'.\n\ndef getmetab(csvpath, textcoding):\n # Importa el csv\n try: dicdf = pd.read_csv(csvpath, \n index_col=0, \n parse_dates=False\n )\n except: dicdf = pd.read_csv(csvpath, \n index_col=0, \n parse_dates=False, \n encoding = textcoding,\n )\n \n # Renombra las columnas\n dicdf.columns = list(dicdf.iloc[1])\n \n # Crea columna con el indice\n dicdf['text_arc'] = dicdf.index\n\n # Extrae el nombre del csv fuente en una columna independiente\n def getarc(x):\n try: return re.search('(?<=(o: ))([A-Z])\\w+', x).group()\n except: return None\n dicdf['arc'] = dicdf['text_arc'].apply(lambda x: getarc(x))\n\n # Extrae la descripcion del archivo en una columna independiente\n def getdescarc(x):\n try: return re.search('\\(([^)]+)\\)', x).group(1)\n except: return None\n dicdf['desc_arc'] = dicdf['text_arc'].apply(lambda x: getdescarc(x))\n \n # Marca columnas que se van a eliminar (Las columnas de donde se sacaron las variables 'arc' y 'desc_arc')\n dicdf['delete1'] = dicdf[list(dicdf.columns)[1:6]].notnull().sum(axis = 1)\n \n # Rellenar valores NaN\n dicdf = dicdf.fillna(method='ffill')\n \n # Eliminar valores marcados previaente\n dicdf = dicdf[dicdf.delete1 != 0]\n \n # Eliminar encabezados de columna repetidos\n dicdf = dicdf[dicdf.Descripción != 'Descripción']\n \n # Asignar nuevo indice y eliminar columna 'arc'\n dicdf = dicdf.set_index('arc')\n \n # Elimina columna delete1\n del dicdf['delete1']\n \n # Renombra la columna de descripciones de codigos\n dicdf.columns.values[5] = 'Descripcion codigos'\n \n # Dame el DataFrame\n return dicdf\n\n# También es necesario redefinir la función para hacer consultas a los metadatos\ndef queryvar(filename, var = '', tablelen=10, colprint = 125, dictio = metadatos):\n pdefault = pd.get_option('display.max_colwidth')\n pd.set_option('display.max_colwidth', colprint) # Expande el espacio para imprimir columnas\n frame = dictio.loc[filename]\n print('Archivo \"{}.csv\" {}'.format(filename, '-'*30)) # Muestra el nombre del archivo\n print(frame.iloc[0]['desc_arc']) # Muestra la descripcion del archivo\n if var == '': pass\n else:\n print('\\n{}{}'.format(var.upper(), '-'*30)) # Muestra el nombre de la variable\n varframe = frame[frame['Nombre de la \\ncolumna'] == var.upper()] # Haz un subset con los datos de la variable\n varframe = varframe.set_index('Códigos válidos en la columna')\n print(varframe['Descripción'][0]) # Muestra la descripcion de la variable\n print(varframe[['Descripcion codigos']]) # Imprime las descripciones de codigos\n\ncsvpath = r'D:\\PCCS\\00_RawData\\01_CSV\\cngmd\\2015\\P0307\\marco_regulatorio_cngmd2015_dbf\\diccionario_de_datos\\diccionario_de_datos_marco_regulatorio_cngmd2015.csv'\nmetadatos = getmetab(csvpath, 'mbcs')\n\n# Definición de rutas de archivos\npar = 'P0307'\nP0307files = {}\nfor file in os.listdir(tablas[par]):\n P0307files[file.replace('.csv', '')] = tablas[par]+'\\\\'+file", "El contenido de los archivos en la carpeta \"Conjunto de datos\" es el siguiente:", "for file in P0307files.keys():\n print(file)\n queryvar(file.upper())\n print('\\n')", "La información para el parámetro P0307 se encuentra en el archivo M_REGULA.csv", "print('P0307 - {}\\n'.format(descripciones['P0307']))\nqueryvar('m_regula'.upper())\n\n# Carga de datos\nP0307f = tablas['P0307']+'\\\\'+ os.listdir(tablas['P0307'])[4]\ndf = pd.read_csv(P0307f, dtype={'ubic_geo':'str'})\ndf = df.rename(columns = {'ubic_geo':'CVE_MUN'})\ndf.set_index('CVE_MUN', inplace = True)\nP0307 = df.where((pd.notnull(df)), None)\n\nP0307.head()\n\nP0307.columns", "¿Dónde están los datos sobre desarrollo urbano y ordenamiento territorial?", "queryvar('m_regula'.upper(), 'tema_nis')", "Los datos de DU y OT estan en la columna TEMA_NIS. El código 41 en esta column indica DU y OT", "P0307 = P0307[P0307['tema_nis'] == 41]\nP0307.head()\n\n# Quita las columnas que estén vacías\nP0307 = P0307.dropna(axis=1, how = 'all')\nP0307.head()\n\n# Metadatos\nmeta = P0306meta\nmeta.at['Dataset base','Descripcion'] = meta.at['Dataset base','Descripcion'].replace('P0306', 'P0307')\nmeta", "Exportar archivo", "par = 'P0307'\nfile = r'D:\\PCCS\\01_Dmine\\Datasets\\CNGMD'+'\\\\'+par+'.xlsx'\nwriter = pd.ExcelWriter(file)\nP0307.to_excel(writer, sheet_name = par)\nmeta.to_excel(writer, sheet_name ='METADATOS')\nwriter.save()", "P1001 - Promedio diario de RSU recolectados", "# Rutas de archivos\nparam = 'P1001'\nrutadatos = tablas[param]\nrutameta = tablas[param].replace('conjunto_de_datos', 'diccionario_de_datos')\nrutameta = rutameta + '\\\\' + os.listdir(rutameta)[0]\nprint('{}\\n{}'.format(rutadatos, rutameta))\n\n# Obtencion de metadatos\n# Cada hoja de metadatos es muy muy similar, pero con muy ligeras variaciones\n# La unica parte del proceso que es seguro automatizar es la importación del archivo hacia Python\ndef getmeta(csvpath, textcoding):\n # Importa el csv\n try: \n dicdf = pd.read_csv(csvpath, \n index_col=0, \n parse_dates=False\n )\n except: \n dicdf = pd.read_csv(csvpath, \n index_col=0, \n parse_dates=False, \n encoding = textcoding,\n )\n \n # Renombra las columnas\n dicdf.columns = list(dicdf.iloc[1])\n \n # Dame el archivo\n return dicdf\n\nos.listdir(r'D:\\PCCS\\00_RawData\\01_CSV\\cngmd\\2015\\P1001\\Rec_RSU_cngmd2015_csv\\diccionario_de_datos')\n\nmetadatos = getmeta(rutameta, 'mbcs')\n# Crea columna con el indice\nmetadatos['Nombre de la \\ncolumna'] = metadatos.index\n\n# Extrae el nombre del csv fuente en una columna independiente\ndef getarc(x):\n try: return x.split(' ')[1]\n except: return None\nmetadatos['archivo'] = metadatos['Nombre de la \\ncolumna'].apply(getarc)\n\n# Extrae la descripcion del archivo en una columna independiente\ndef getdescarc(x):\n try: return x.split('(')[1].replace(')','')\n except: return None\nmetadatos['desc_arc'] = metadatos['Nombre de la \\ncolumna'].apply(getdescarc)\n\n# En la columna 'arc', reemplaza las celdas cuyo valor es 'de'\nmetadatos['archivo'] = metadatos['archivo'].replace({'de':None})\n\n# Marca columnas que se van a eliminar (Las columnas de donde se sacaron las variables 'arc' y 'desc_arc')\nmetadatos['delete1'] = metadatos[list(metadatos.columns)[1:6]].notnull().sum(axis = 1)\n\n# Rellenar valores NaN\nmetadatos = metadatos.fillna(method='ffill')\n\n# Eliminar valores marcados previaente\nmetadatos = metadatos[metadatos.delete1 != 0]\n\n# Eliminar columnas sin datos\nmetadatos = metadatos.dropna(axis = 1, how = 'all')\n\n# Eliminar encabezados de columna repetidos\nmetadatos = metadatos[metadatos.Descripción != 'Descripción']\n\n# Asignar nuevo indice y eliminar columna 'text_arc'\nmetadatos = metadatos.set_index('archivo')\n\n# Elimina columna delete1\ndel metadatos['delete1']\n\n# Renombra la columna de descripciones de codigos\nmetadatos.columns.values[3] = 'Descripcion codigos'\n\n# Reordena las columnas\nneworder = ['Nombre de la \\ncolumna', 'Descripción', 'Tipo de dato', 'Rango válido', 'Descripcion codigos', \n 'Pregunta textual', 'Página de Cuestionario', 'Definición', 'desc_arc']\n\nmetadatos = metadatos.reindex(columns= neworder)\n\n# Renombra las columnas para que funcionen con queryvar\nmetadatos = metadatos.rename({'Rango válido':'Códigos válidos en la columna'})\n\nmetadatos.head(3)", "¿Donde estan los datos?", "metadatos.loc['secc_i_tr_cngmd15_m6'][metadatos.loc['secc_i_tr_cngmd15_m6']['Nombre de la \\ncolumna'] == 'P2_2']", "Los datos se encuentran en el archivo secc_i_tr_cngmd15_m6, en la columna P2_2", "# Definición de rutas a archivos de datos\nParamfiles = {}\nfor file in os.listdir(rutadatos):\n Paramfiles[file.replace('.csv', '')] = rutadatos+'\\\\'+file\n\nfor file, path in Paramfiles.items():\n print('{}:\\n{}\\n'.format(file, path))\n\n# Carga de datos\nP1001f = tablas[param]+'\\\\'+ os.listdir(tablas[param])[0]\ndf = pd.read_csv(P1001f, dtype={'folio':'str'}, encoding = 'mbcs')\ndf = df.rename(columns = {'folio':'CVE_MUN'})\ndf.set_index('CVE_MUN', inplace = True)\nP1001 = df.where((pd.notnull(df)), None)\n\nP1001.head(1)\n\nP1001 = P1001['p2_2'].to_frame()\nP1001.head(1)", "Exportar archivos", "# Metadatos\nmeta = meta # Utiliza el archivo de metadatos que habías definido anteriormente\nmeta.at['Dataset base','Descripcion'] = '\"P1001.xlsx\" disponible en \\nhttps://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/CNGMD/2015'\nmeta.at['Notas','Descripcion'] = 'p2_2: Cantidad de residuos sólidos recolectada en kilogramos.'\nmeta\n\nfile = r'D:\\PCCS\\01_Dmine\\Datasets\\CNGMD'+'\\\\'+param+'.xlsx'\nwriter = pd.ExcelWriter(file)\nP1001.to_excel(writer, sheet_name = param)\nmeta.to_excel(writer, sheet_name ='METADATOS')\nwriter.save()", "P1006 - Número de municipios con aplicación de programas locales orientados a la GIRSU", "# Rutas de archivos\nparam = 'P1006'\nrutadatos = tablas[param]\nrutameta = tablas[param].replace('conjunto_de_datos', 'diccionario_de_datos')\nrutameta = rutameta + '\\\\' + os.listdir(rutameta)[0]\nprint('{}\\n{}'.format(rutadatos, rutameta))", "¿Donde estan los datos?\nEl archivo secc_v_tr_cngmd15_m6.csv Contiene variables que caracterizan a los municipios de acuerdo a los programas orientados a la gestión integral de los residuos sólidos urbanos, durante el año 2014. En este archivo, la columna P13 Indica si se cuenta con algún programa orientado a la gestión integral de residuos sólidos urbanos (1 = Cuenta con Programas; 2 = No cuenta con programas).\nEl archivo secc_v_tr_cngmd15_m6_p13_1.csv Contiene la variable P13_1_1_2, que indica el tipo de programa orientado a la gestión integral de residuos sólidos urbanos.", "# Definición de rutas a archivos de datos\nParamfiles = {}\nfor file in os.listdir(rutadatos):\n Paramfiles[file.replace('.csv', '')] = rutadatos+'\\\\'+file\n\nfor file, path in Paramfiles.items():\n print('{}:\\n{}\\n'.format(file, path))\n\nos.listdir(tablas[param])[0]\n\n# Carga de datos\nP1006f = tablas[param]+'\\\\'+ os.listdir(tablas[param])[0]\ndf = pd.read_csv(P1006f, dtype={'folio':'str'}, encoding = 'mbcs')\ndf = df.rename(columns = {'folio':'CVE_MUN'})\ndf.set_index('CVE_MUN', inplace = True)\nP1006 = df.where((pd.notnull(df)), None)", "Exportar Archivos", "P1006 = P1006['p13'].to_frame()\n\n# Metadatos\nmeta = meta # Utiliza el archivo de metadatos que habías definido anteriormente\nmeta.at['Dataset base','Descripcion'] = '\"P1006.xlsx\" disponible en \\nhttps://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/CNGMD/2015'\nmeta.at['Notas','Descripcion'] = 'En la columna p13, ¿El municipio cuenta con Programas de Gestion de Residuos? 1: Si, 2: No'\n\nmeta\n\nfile = r'D:\\PCCS\\01_Dmine\\Datasets\\CNGMD'+'\\\\'+param+'.xlsx'\nwriter = pd.ExcelWriter(file)\nP1006.to_excel(writer, sheet_name = param)\nmeta.to_excel(writer, sheet_name ='METADATOS')\nwriter.save()", "P1009 - Número de municipios con estudios de generación de RSU", "# Rutas de archivos\nparam = 'P1009'\nrutadatos = tablas[param]\nrutameta = tablas[param].replace('conjunto_de_datos', 'diccionario_de_datos')\nrutameta = rutameta + '\\\\' + os.listdir(rutameta)[0]\nprint('{}\\n{}'.format(rutadatos, rutameta))", "¿Donde están los datos?\nsecc_iv_tr_cngmd15_m6 Contiene variables que caracterizan a los municipios de acuerdo a los estudios sobre la generación y composición de los residuos sólidos urbanos, durante el año 2014. \nLa columna P12 Indica si se cuenta con algún estudio sobre la generación de residuos sólidos urbanos (1 = Si; 2 = No).", "# Definición de rutas a archivos de datos\nParamfiles = {}\nfor file in os.listdir(rutadatos):\n Paramfiles[file.replace('.csv', '')] = rutadatos+'\\\\'+file\n\nfor file, path in Paramfiles.items():\n print('{}:\\n{}\\n'.format(file, path))\n\n# Carga de datos\nP1009f = tablas[param]+'\\\\'+ os.listdir(tablas[param])[0]\ndf = pd.read_csv(P1009f, dtype={'folio':'str'}, encoding = 'mbcs')\ndf = df.rename(columns = {'folio':'CVE_MUN'})\ndf.set_index('CVE_MUN', inplace = True)\nP1009 = df.where((pd.notnull(df)), None)\ndel(P1009['entidad'])\ndel(P1009['municipio'])", "Exportar archivos", "meta\n\n# Metadatos\nmeta = meta # Utiliza el archivo de metadatos que habías definido anteriormente\nmeta.at['Dataset base','Descripcion'] = '\"P1009.xlsx\" disponible en \\nhttps://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/CNGMD/2015'\nmeta.at['Notas','Descripcion'] = 'Para la columna P12, ¿El Municipio cuenta con estudios de generacion de residuos? 1: Si 2: No'\nmeta\n\nfile = r'D:\\PCCS\\01_Dmine\\Datasets\\CNGMD'+'\\\\'+param+'.xlsx'\nwriter = pd.ExcelWriter(file)\nP1009.to_excel(writer, sheet_name = param)\nmeta.to_excel(writer, sheet_name ='METADATOS')\nwriter.save()", "P1003 - Número de municipios con disponibilidad de servicios relacionados con los RSU\n¿Donde estan los datos?\nLa informacion de este parametro se encuentra dividida entre diferentes carpetas.", "tablasP1003", "La Carpeta 1 Contiene 2 archivos:\nsecc_i_tr_cngmd15_m6.csv - Contiene variables que caracterizan a los municipios de acuerdo a la recolección de residuos sólidos urbanos, durante el año 2014. En este archivo, la variable P1 indica la disponibilidad del servicio de recolección (1: Si, 2:No)\nsecc_i_tr_cngmd15_m6_p6_3_2.csv - Contiene variables que caracterizan a los municipios de acuerdo al parque vehicular utilizado para la recolección y traslado de residuos sólidos urbanos, durante el año 2014. En este archivo, la variable P6_3_2_1_3 contiene el número de vehículos utilizados para la recolección de Residuos solidos urbanos. (Esta variable puede utilizarse para la construcción del parámetro 1005)\nLa Carpeta 2 Contiene 1 archivo:\nsecc_ii_tr_cngmd15_m6.csv - Contiene variables que caracterizan a los municipios de acuerdo al tratamiento de los residuos, durante el año 2014. En este archivo, la variable P10 Identifica si al menos una fracción de los residuos sólidos urbanos recolectados por el municipio o delegación es enviada a plantas de tratamiento (1: Si, 2:No)\nLa Carpeta 3 contiene 1 archivo:\nsecc_iii_tr_cngmd15_m6.csv - Contiene variables que caracterizan a los municipios de acuerdo a la disposición final de los residuos sólidos urbanos, durante el año 2014. En este archivo, la variable P11 Identifica el número de sitios de disposición final a los que son son remitidos los residuos que se recolectan en todo el municipio o delegación", "# Rutas de archivos\nparam = 'P1003'\nrutasdatos = list(tablasP1003.values())\nfor ruta in rutasdatos:\n print(ruta)\n\n# Definición de rutas a archivos de datos\nParamfiles = {}\nfor rutadatos in rutasdatos:\n for file in os.listdir(rutadatos):\n Paramfiles[file.replace('.csv', '')] = rutadatos+'\\\\'+file\n\nfor file, path in Paramfiles.items():\n print('{}:\\n{}\\n'.format(file, path))\n\n# Carga de datos\n# Es necesario hacer 3 dataframes, uno por cada archivo, y después unir las columnas para cada parámetro.\nP1003f1 = Paramfiles['secc_i_tr_cngmd15_m6']\ndf = pd.read_csv(P1003f1, dtype={'folio':'str'}, encoding = 'mbcs')\ndf = df.rename(columns = {'folio':'CVE_MUN'})\ndf.set_index('CVE_MUN', inplace = True)\nP1003f1 = df.where((pd.notnull(df)), None)\n\nP1003f2 = Paramfiles['secc_ii_tr_cngmd15_m6']\ndf = pd.read_csv(P1003f2, dtype={'folio':'str'}, encoding = 'mbcs')\ndf = df.rename(columns = {'folio':'CVE_MUN'})\ndf.set_index('CVE_MUN', inplace = True)\nP1003f2 = df.where((pd.notnull(df)), None)\n\n# El Parametro en realidad no utiliza el numero de sitios de disposicion de residuos. \n# Y no está documentado el significado de NS en la columna P11 lo que dificulta la lectura de los datos\n''' \nP1003f3 = Paramfiles['secc_iii_tr_cngmd15_m6']\ndf = pd.read_csv(P1003f3, dtype={'folio':'str'}, encoding = 'mbcs')\ndf = df.rename(columns = {'folio':'CVE_MUN'})\ndf.set_index('CVE_MUN', inplace = True)\nP1003f3 = df.where((pd.notnull(df)), None)\n'''\n\n# Aislar datos de interés\nP1003 = P1003f1['p1'].to_frame()\nP1003['p10'] = P1003f2['p10']\n# P1003['p11'] = P1003f3['p11'] #p11 se excluye del analisis por los motivos descritos antes\n\nP1003.head(1)", "Exportar archivos", "# Metadatos\nmeta = meta # Utiliza el archivo de metadatos que habías definido anteriormente\nmeta.at['Dataset base','Descripcion'] = '\"P1003.xlsx\" disponible en \\nhttps://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/CNGMD/2015'\nmeta.at['Notas','Descripcion'] = 'para p1: ¿Dispone de servicio de recoleccion? (1: Si 2: No)\\npara p10: ¿Al menos una fracción de los RSU es enviada a plantas de tratamiento? (1: Si 2: No)\\npara p11: ¿A cuantos sitios de disposición final son remitidos los residuos?'\nmeta\n\nparam\n\nfile = r'D:\\PCCS\\01_Dmine\\Datasets\\CNGMD'+'\\\\'+param+'.xlsx'\nwriter = pd.ExcelWriter(file)\nP1003.to_excel(writer, sheet_name = param)\nmeta.to_excel(writer, sheet_name ='METADATOS')\nwriter.save()", "P1005 - Número de vehículos utilizados para la recolección de residuos sólidos urbanos\n¿Donde están los datos?\nLa Carpeta 1 de P1003 (Procesada previamente) contiene 2 archivos:\nsecc_i_tr_cngmd15_m6.csv, y\nsecc_i_tr_cngmd15_m6_p6_3_2.csv - Contiene variables que caracterizan a los municipios de acuerdo al parque vehicular utilizado para la recolección y traslado de residuos sólidos urbanos, durante el año 2014. En este archivo, la variable P6_3_2_1_3 contiene el número de vehículos utilizados para la recolección de Residuos solidos urbanos. (Esta variable puede utilizarse para la construcción del parámetro 1005)", "# Carga de datos\nP1005f = Paramfiles['secc_i_tr_cngmd15_m6_p6_3_2']\ndf = pd.read_csv(P1005f, dtype={'FOLIO':'str'}, encoding = 'mbcs')\ndf = df.rename(columns = {'FOLIO':'CVE_MUN'})\ndf.set_index('CVE_MUN', inplace = True)\nP1005f = df.where((pd.notnull(df)), None)\n\nP1005f.head(1)", "Exportar archivos", "P1005 = P1005f['P6_3_2_1_3'].to_frame()\nP1005.head(3)\n\n# Metadatos\nmeta = meta # Utiliza el archivo de metadatos que habías definido anteriormente\nmeta.at['Dataset base','Descripcion'] = '\"P1005.xlsx\" disponible en \\nhttps://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/CNGMD/2015'\nmeta.at['Notas','Descripcion'] = 'P6_3_2_1_3: Numero de vehiculos utilizados para la recolección de Residuos Solidos Urbanos'\nmeta\n\nparam = 'P1005'\nfile = r'D:\\PCCS\\01_Dmine\\Datasets\\CNGMD'+'\\\\'+param+'.xlsx'\nwriter = pd.ExcelWriter(file)\nP1005.to_excel(writer, sheet_name = param)\nmeta.to_excel(writer, sheet_name ='METADATOS')\nwriter.save()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
rainyear/pytips
Tips/2016-05-11-Floating-Point-Arithmetic.ipynb
mit
[ "Python 浮点数运算\n浮点数用来存储计算机中的小数,与现实世界中的十进制小数不同的是,浮点数通过二进制的形式来表示一个小数。在深入了解浮点数的实现之前,先来看几个 Python 浮点数计算有意思的例子:", "0.1 == 0.10000000000000000000001\n\n0.1+0.1+0.1 == 0.3", "IEEE 浮点数表示法\n这些看起来违反常识的“错误”并非 Python 的错,而是由浮点数的规则所决定的,即使放到其它语言中结果也是这样的。要理解计算机中浮点数的表示规则,先来看现实世界中十进制小数是如何表示的:\n1.234 = 1 + 1/10 + 2/100 + 3/1000\n可以用下面的公式来表示:\n$$d = \\sum_{i=-n}^m10^i*d_i$$\n其中 $d_i$ 是十进制中 0~9 的数字。而如果是一个二进制的小数:\n1.001 = 1 + 0/2 + 0/4 + 1/8\n可以用下面的公式来表示:\n$$d = \\sum_{i=-n}^m2^i*d_i$$\n其中 $d_i$ 是二进制中的 0 或 1。Python 中的浮点数都是双精度的,也就说采用 64 位来表示一个小数,那这 64 位分别有多少用来表示整数部分和小数部分呢?根据 IEEE 标准,考虑到符号位,双精度表示法是这样分配的:\n$$d = s * \\sum_{i=-52}^{11} 2^i*d_i$$\n也就是说用1位表示符号位,11位表示整数部分,52位表示小数部分。正如十进制中我们无法精确表示某些分数(如10/3),浮点数中通过 d1/2 + d2/4 + ... 的方式也会出现这种情况,比如上面的例子中,十进制中简单的 0.1 就无法在二进制中精确描述,而只能通过近似表示法表示出来:", "(0.1).as_integer_ratio()", "也就是说 0.1 是通过 3602879701896397/36028797018963968 来近似表示的,很明显这样近似的表示会导致许多差距很小的数字公用相同的近似表示数,例如:", "(0.10000000000000001).as_integer_ratio()", "在 Python 中所有这些可以用相同的近似数表示的数字统一采用最短有效数字来表示:", "print(0.10000000000000001)", "浮点数运算\n既然有些浮点数是通过近似值表示的,那么在计算过程中就很容易出现误差,就像最开始的第二个例子一样:", "a = .1 + .1 + .1\nb = .3\nprint(a.as_integer_ratio())\nprint(b.as_integer_ratio())\nprint(a == b)", "为了解决运算中的问题,IEEE 标准还指定了一个舍入规则(round),即 Python 中内置的 round 方法,我们可以通过舍入的方式取得两个数的近似值,来判断其近似值是否相等:", "round(a, 10) == round(b, 10)", "当然这种舍入的方式并不一定是可靠的,依赖于舍入的选择的位数,位数太大,就失去了 round 的作用,太小,就会引入别的错误:", "print(round(a, 17) == round(b, 17))\nprint(round(0.1, 1) == round(0.111, 1))", "Python 中使用更精确的浮点数可以通过 decimal 和 fractions 两个模块,从名字上也能猜到,decimal 表示完整的小数,而 fractions 通过分数的形式表示小数:", "from decimal import Decimal\na = Decimal(0.1)\nb = Decimal(0.1000000000000001)\nc = Decimal(0.10000000000000001)\nprint(a)\nprint(b)\nprint(c)\n\na == b == c\n\nfrom fractions import Fraction\nf1 = Fraction(1, 10) # 0.1\nprint(float(f1))\nf3 = Fraction(3, 10) # 0.3\nprint(float(f3))\n\nprint(f1 + f1 + f1 == f3)", "总结\n浮点数这些奇特的特性让我们不得不在使用的时候格外注意,尤其是当有一定的精度要求的情况下。如果真的是对精度要求较高且需要频繁使用浮点数,建议使用更专业的 SciPy 科学计算包。" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
GoogleCloudPlatform/dialogflow-chatbot
notebooks/ProcessSynonyms.ipynb
apache-2.0
[ "Process Synonyms\nThis notebook uses a combination of Python data science libraries and the Google Natural Language API (machine learning) to expand the vocabulary of the chatbot by generating synonyms for topics created in the previous notebook.", "!pip uninstall -y google-cloud-datastore\n\n!pip install google-cloud-datastore\n\n!pip install inflect", "Hit Reset Session > Restart, then resume with the following cells.", "# Only need to do this once...\nimport nltk\nnltk.download('stopwords')\nnltk.download('wordnet')\n\nfrom nltk.corpus import stopwords\nstop = set(stopwords.words('english'))\n\nfrom google.cloud import datastore\n\ndatastore_client = datastore.Client()\n\nclient = datastore.Client()\nquery = client.query(kind='Topic')\nresults = list(query.fetch())\n\nimport inflect\nplurals = inflect.engine()", "Extract Synonyms with Python\nSplit the topic into words and use PyDictionary to look up synonyms in a \"thesaurus\" for each word. Store these in Datastore and link them back to the topic. Note this section uses the concept of \"stop words\" to filter out articles and other parts of speech that don't contribute to meaning of the topic.", "from nltk.corpus import wordnet\nfrom sets import Set\n\nfor result in results:\n for word in result.key.name.split():\n \n if word in stop:\n continue\n\n \n synonyms = Set()\n for syn in wordnet.synsets(word):\n \n if \".n.\" in str(syn):\n\n for l in syn.lemmas():\n lemma = l.name()\n if (lemma.isalpha()):\n synonyms.add(lemma)\n synonyms.add(plurals.plural(lemma))\n \n if \".a.\" in str(syn):\n synonyms = Set()\n break\n\n print result.key.name, word, synonyms\n \n kind = 'Synonym'\n synonym_key = datastore_client.key(kind, result.key.name)\n\n synonym = datastore.Entity(key=synonym_key)\n synonym['synonym'] = result.key.name\n\n datastore_client.put(synonym)\n \n synonym_key = datastore_client.key(kind, word)\n\n synonym = datastore.Entity(key=synonym_key)\n synonym['synonym'] = result.key.name\n\n datastore_client.put(synonym)\n \n for dictionary_synonym in synonyms:\n \n synonym_key = datastore_client.key(kind, dictionary_synonym)\n\n synonym = datastore.Entity(key=synonym_key)\n synonym['synonym'] = result.key.name\n\n datastore_client.put(synonym)\n \n synonym_key = datastore_client.key(kind, plurals.plural(word))\n\n synonym = datastore.Entity(key=synonym_key)\n synonym['synonym'] = result.key.name\n\n datastore_client.put(synonym)\n " ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
paix120/DataScienceLearningClubActivities
Activity05/Mushroom Edibility Classification - Naive Bayes.ipynb
gpl-2.0
[ "Mushroom Classification - Edible or Poisonous?\nby Renee Teate\nUsing Gaussian Naive Bayes Classification from scikit-learn\nFor Activity 5 of the Data Science Learning Club: http://www.becomingadatascientist.com/learningclub/forum-13.html\nDataset from UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/Mushroom", "#import pandas and numpy libraries\nimport pandas as pd\nimport numpy as np\nimport sys #sys needed only for python version\n#import gaussian naive bayes from scikit-learn\nimport sklearn as sk\n#seaborn for pretty plots\nimport seaborn as sns\n\n#display versions of python and packages\nprint('\\npython version ' + sys.version)\nprint('pandas version ' + pd.__version__)\nprint('numpy version ' + np.__version__)\nprint('sk-learn version ' + sk.__version__)\nprint('seaborn version ' + sns.__version__)\n", "The dataset doesn't include column names, and the values are text characters", "#read in data. it's comma-separated with no column names.\ndf = pd.read_csv('agaricus-lepiota.data', sep=',', header=None,\n error_bad_lines=False, warn_bad_lines=True, low_memory=False)\n# set pandas to output all of the columns in output\npd.options.display.max_columns = 25\n#show the first 5 rows\nprint(df.sample(n=5))", "Added column names from the UCI documentation", "#manually add column names from documentation (1st col is class: e=edible,p=poisonous; rest are attributes)\ndf.columns = ['class','cap-shape','cap-surface','cap-color','bruises','odor','gill-attachment',\n 'gill-spacing','gill-size','gill-color','stalk-shape','stalk-root',\n 'stalk-surf-above-ring','stalk-surf-below-ring','stalk-color-above-ring','stalk-color-below-ring',\n 'veil-type','veil-color','ring-number','ring-type','spore-color','population','habitat']\n\nprint(\"Example values:\\n\")\nprint(df.iloc[3984]) #this one has a ? value - how are those treated by classifier?", "The dataset is split fairly evenly between the edible and poison classes", "#show plots in notebook\n%matplotlib inline\n\n#bar chart of classes using pandas plotting\nprint(df['class'].value_counts())\ndf['class'].value_counts().plot(kind='bar')\n", "Edibility by Mushroom Cap Shape\nnote that none of the cap shapes seem particularly predictive of edibility", "#seaborn factorplot to show edible/poisonous breakdown by different factors\ndf_forplot = df.loc[:,('class','cap-shape','gill-color')]\ng = sns.factorplot(\"class\", col=\"cap-shape\", data=df_forplot,\n kind=\"count\", size=2.5, aspect=.8, col_wrap=6)\n", "Edibility by Mushroom Gill Color\nnote that buff gills (b) appear to always indicate poison, and the others aren't as clear-cut", "g = sns.factorplot(\"class\", col=\"gill-color\", data=df_forplot,\n kind=\"count\", size=2.5, aspect=.8, col_wrap=6)", "Let's see how well our classifier can identify poisonous mushrooms by combinations of features", "#put the features into X (everything except the 0th column)\nX = pd.DataFrame(df, columns=df.columns[1:len(df.columns)], index=df.index)\n#put the class values (0th column) into Y \nY = df['class']\n\n#encode the text category labels as numeric\nfrom sklearn import preprocessing\nle = preprocessing.LabelEncoder()\nle.fit(Y)\n#print(le.classes_)\n#print(np.array(Y))\n#Y values now boolean values; poison = 1\ny = le.transform(Y)\n#print(y_train)\n\n#have to initialize or get error below\nx = pd.DataFrame(X,columns=[X.columns[0]])\n#encode each feature column and add it to x_train\nfor colname in X.columns:\n le.fit(X[colname])\n print(colname, le.classes_)\n x[colname] = le.transform(X[colname])\n\nprint('\\nExample Feature Values - row 1 in X:')\nprint(X.iloc[1])\nprint('\\nExample Encoded Feature Values - row 1 in x:')\nprint(x.iloc[1])\nprint('\\nClass Values (Y):')\nprint(np.array(Y))\nprint('\\nEncoded Class Values (y):')\nprint(y)\n\n\n#split the dataset into training and test sets\nfrom sklearn.cross_validation import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33)\n\n#initialize and fit the naive bayes classifier\nfrom sklearn.naive_bayes import GaussianNB\nskgnb = GaussianNB()\nskgnb.fit(x_train,y_train)\ntrain_predict = skgnb.predict(x_train)\n#print(train_predict)\n\n#see how accurate the training data was fit\nfrom sklearn import metrics\nprint(\"Training accuracy:\",metrics.accuracy_score(y_train, train_predict))\n\n#use the trained model to predict the test values\ntest_predict = skgnb.predict(x_test)\nprint(\"Testing accuracy:\",metrics.accuracy_score(y_test, test_predict))\n\n\nprint(\"\\nClassification Report:\")\nprint(metrics.classification_report(y_test, test_predict, target_names=['edible','poisonous']))\nprint(\"\\nConfusion Matrix:\")\nskcm = metrics.confusion_matrix(y_test,test_predict)\n#putting it into a dataframe so it prints the labels\nskcm = pd.DataFrame(skcm, columns=['predicted-edible','predicted-poisonous'])\nskcm['actual'] = ['edible','poisonous']\nskcm = skcm.set_index('actual')\n\n#NOTE: NEED TO MAKE SURE I'M INTERPRETING THE ROWS & COLS RIGHT TO ASSIGN THESE LABELS!\nprint(skcm)\n\nprint(\"\\nScore (same thing as test accuracy?): \", skgnb.score(x_test,y_test))\n\n", "Add interpretation of numbers above (after verifying I entered the parameters correctly and the metrics are labeled right)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ajkavanagh/pyne-sqlalchemy-2015-04
notebook/ORM Examples.ipynb
gpl-3.0
[ "SQL Alchemy ORM Examples\nSo, these are the same as the CORE expression language, but using the ORM toolkit\nCreate an in memory SQLite database engine", "from sqlalchemy import create_engine\nengine = create_engine('sqlite:///:memory:')", "Create some tables using ORM declarative", "from sqlalchemy.ext.declarative import declarative_base\nBase = declarative_base()", "declarative_base is a factory that makes a class on which to define ORM classes. We use it to create our models like this:", "from sqlalchemy import Column, Integer, String, MetaData, ForeignKey\n\nclass User(Base):\n __tablename__ = 'user'\n \n id_user = Column(Integer, primary_key=True)\n name = Column(String)\n age = Column(Integer)\n \n def __repr__(self):\n return \"<User(id_user={}, name={}, age={})\".format(self.id_user, self.name, self.age)\n\nfrom sqlalchemy.orm import relationship, backref\n \nclass Item(Base):\n __tablename__ = 'item'\n \n id_item = Column(Integer, primary_key=True)\n id_user = Column(Integer, ForeignKey('user.id_user'))\n thing = Column(String)\n \n user = relationship(\"User\", backref=backref('items', order_by=id_item))\n \n def __repr__(self):\n return \"<Item(id_item={}, id_user={}, thing={})\".format(self.id_item, self.id_user, self.thing)\n\nUser.__table__", "Now create the tables in the engine\nThis is the equivalent of metadata.create_all(engine).", "Base.metadata.create_all(engine)", "Create a User instance - this is just in Python memory - not in the DB!", "u = User(id_user=1, name=\"Billy\", age=40)\nprint(u)", "Sessions\nAnd now for something different. We need to talk about sessions.", "from sqlalchemy.orm import sessionmaker\nSession = sessionmaker(bind=engine)\nsession = Session()", "Let's add a some users", "people = [\n (1, 'Bob', '20'),\n (2, 'Sally', '25'),\n (3, 'John', '30')]\nfor (id_user, name, age) in people:\n u = User(id_user=id_user, name=name, age=age)\n session.add(u)\n", "Let's query for a user", "u1 = session.query(User).get(1)\nprint(u1)", "So this is similar to:\npython\nuser_tuple = connection.execute(select([user]).where(user.c.id_user == 1)).fetchone()\nAnd we can also count the users in the table:", "count = session.query(User).count()\nprint(count)", "Let's add the items to the database too", "items = (\n (1, 1, 'Peanuts'),\n (2, 1, 'VW'),\n (3, 1, 'iPad'),\n (4, 2, 'Raisins'),\n (5, 2, 'Fiat'),\n (6, 2, 'Nexus 10'),\n (7, 2, 'Timex'),\n (8, 3, 'Caviar'),\n (9, 3, 'Porche'),\n (10, 3, 'Surface Pro'),\n (11, 3, 'Rolex'),\n (12, 3, 'Boat'),\n (13, 3, 'Plane'))\nfor (id_item, id_user, thing) in items:\n i = Item(id_item=id_item, id_user=id_user, thing=thing)\n session.add(i)\n\nprint(session.query(Item).count())", "Inspecting the data\nAs we're in the domain model now, we need to look at things like objects. Let's look at the John User() item and see what's there:", "john = session.query(User).get(1)\nprint(john)\n\nfor i in john.items:\n print(i)\n\nitem1 = john.items[0]\nprint(item1)\nprint(item1.user)", "Let's list out all of the users and items:", "for (u, i) in session.query(User, Item).filter(User.id_user == Item.id_user).all():\n print(u, i)", "Let's find the user who has a Timex:", "u = session.query(User).join(Item).filter(Item.thing.ilike('timex')).one()\nprint(u)", "How about func.count() and friends?", "from sqlalchemy import func\n\nresults = session.query(User, func.count(Item.id_item)).join(Item).group_by(Item.id_user).all()\nfor r in results:\n print(r)", "That's all for ORM" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
PyladiesMx/Pyladies_ifc
2. Lists_and_conditionals/lists and conditionals.ipynb
mit
[ "¡Bienvenid@s nuevamente!\nHoy veremos otro tipo de objetos en python llamados listas y además empezaremos a tomar decisiones con expresiones condicionales.\n¿Y qué pasa si queremos coleccionar valores? Acerca de las listas...\nEn la reunión pasada vimos que en python podemos tener diversos tipos de objetos numéricos, booleanos y de caracteres. Incluso aprendimos cómo guardar estos valores en variables, pero que pasa si queremos operar no sólo con un valor sino con una colección de valores. Para este caso hay otro tipo de objetos y el primero que veremos se llama lista.\nEn python una lista es una secuencia de elementos mutables que llevan asignados un índice. Para crear una lista en python los que se hace es empezar con corchetes que van a ser llenados con valores separados por una coma, éstos valores no necesitan ser del mismo tipo para poder habitar en la misma lista.\nVeamos unos ejemplos...", "lista1 = [\"Hola\", 1, 2.0, True]\n\nlista1\n\nlista2 = [2+3, 5+1, 4**2]\n\nlista2", "Vamos a explorar ahora la parte de los índices y mutabilidad en una lista\nPara acceder a un valor en una lista podemos hacer uso de los índices. Los índices empiezan desde cero.", "#Para especificar que queremos acceder a un valor en python, el objeto es seguido de corchetes con el índice deseado\nlista1[0]\n\nlista2[1]\n\n#¿Qué pasa si damos un índice que no tiene un valor?\nlista1[4]", "¡Obvio! nos da un error de índice... Y nos dice qué está fuera del rango de valores que tenemos guardados en la lista\nTambién tenemos acceso a un rango de valores", "#Vamos a pedirle a python que nos muestre los primero tres elementos de la lista1\nlista1[0:3]", "Pregunta 1.\nYa vimos que lista1 posee elementos con índices del 0 al 3, ¿por qué te imaginas que en la expresión anterior no nos mostró el último elemento si le pedimos ver del 0:3?", "#Veamos que pasa si le damos un índice mayor, a pesar de que sabemos que lista1[4] nos da un error\nlista1[0:4]\n\n#Otra forma de mostrar todos los elementos es con \":\"\nlista2[:]\n\nlista2\n\ncopia = lista2\n\ncopia\n\ncopia *2\n\ncopia = copia *2\n\ncopia\n\nlista2\n\ncopia = lista2[:]\n\ncopia\n\ncopia is lista2\n\ncopia.append(13)\n\ncopia\n\nlista2\n\n#Aquí le estamos pidiendo a python que nos de todos los elementos de la lista menos el primero\nlista1[1:]\n\n#Si queremos ver el último elemento pero no sabemos cuántos hay en una lista podemos usar el siguiente truco\nlista1[:-1]", "Pregunta 2.\n¿Es posible ir saltando un número en el rango con el que seleccionamos las listas? Esto es cómo pedirle a python (si es posible) que me de los elementos con índices pares del rango 1:6 (los índices 2, 4 y 6)", "#Si no sabemos un índice, pero sabemos que hay un valor en una lista podemos preguntarle a python cuál es ese índice \n#esto nos dice el índice del primer elemento que se encuentre con ese valor\n#(si hay valores repetidos nos da el de menor índice)\nlista1.index(2.0)\n\nlista2.index(5)", "Pregunta 3.\n¿Qué resultaría de la siguiente expresión? lista1[::-1]", "lista3 = lista1[::-1]\n\nlista3\n\nlista1\n\nlista1[::-1]", "La parte \"mutable\" de las listas significa que podemos alterar el contenidos de estas al agregar elementos a la colección y/o cambiando el valor de algunos elementos.\nVeamos como agregar elementos a las listas. Para esto usaremos el método .append(elemento) que tienen las listas", "lista1.append(\"nuevo\")\n\nlista1", "Como lo has de haber notado, el elemento nuevo que agregaste se va al final de la lista. Si quisieras que tu nuevo elemento estuviera en una posición específica tendrías que usar el método .insert(índice, elemento)", "lista1.insert(2, \"soy\")\n\nlista1", "Hay ocasiones en las que no quieres agregar nuevos elementos, pero alterar el valor que tenías de los mismos, para esto puedes accesar por índice al elemento que quieras y directamente asignarle otro valor", "lista1[-1]='Nuevo'\n\nlista1", "Pregunta 4\n¿Cómo le harías para borrar elementos de una lista?\nPista: del y remove", "lista3\n\nlista3.remove('Hola')\n\nlista3", "Pregunta 5\nCrea una lista nueva que se llame \"copia\" de la lista2", "lista3.append(True)\n\nlista3.remove(True)\n\nlista3", "Expresiones condicionales\nImagina que estás en una calle a punto de cruzarla, ¿Qué es lo que te hace decidir si la cruzas o no?\nLo mismo sucede con python. En este caso le tienes que dar ciertas condiciones que deben ser cumplidas, esto es que al evaluar una expresión, esta resulte en un cierto valor o en True para que siga con una operación. Si es necesario también se le pueden dar instrucciones para cuando la condición no sea cumplida.\nVeamos unos ejemplos", "#Crea un elemento nuevo en una lista si la longitud es menor a 7 elementos\nif len(lista1) < 7:\n lista1.append('elemento')\n\n#Veamos qué pasó con lista1\nlista1\n\ndel(lista1[3])\n\nlista1\n\n#Si el primer valor de una lista es integer conviértelo a float, de lo contratio conviértelo a integer\nif type(lista2[0]) is int:\n lista2[0] = float(lista2[0])\n lista2[1] = float(lista2[1])\nelif lista2[2] == 10:\n print(lista2[2])\nelif lista2[2] > 10:\n print(\"si\")\n\n\n\n#Veamos cómo quedó la lista 2\nlista2", "Aquí hay unas cosas que notar:\n1. La estructura inicial de una expresión condicional es if &lt;condición&gt; :\n2. Después de esto hay un espacio que nos indica que el código que escribiremos forma parte del condicional\n3. La expresión se puede terminar ahí o podemos darle otra instrucción con else: (nótese que else se encuentra al mismo nivel que if)\n4. El código que forma parte de else lleva también un espacio (estos espacios son llamados indentación)\n\nPregunta 6\n¿Qué pasa si tengo más de una condición que debe ser cumplida?", "and " ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
djfan/wifind
viz/hp_target_ct_cb.ipynb
mit
[ "https://www1.nyc.gov/site/planning/data-maps/open-data/districts-download-metadata.page", "import shapefile as shp\nimport math\nimport pandas as pd\nimport geopandas as gpd\nimport pylab as pl\nfrom fiona.crs import from_epsg\n\n%pylab inline\n\nhp_target = gpd.read_file(\"./hp_target/hp_target.shp\")\nhp_target.to_crs(epsg=2263, inplace=True)\n\nct = gpd.read_file(\"./nyct2010_17b/nyct2010.shp\")\ncb = gpd.read_file(\"./nycb2010_17b/nycb2010.shp\")", "https://www1.nyc.gov/assets/planning/download/pdf/data-maps/open-data/nyct2010_metadata.pdf?ver=17b\nhttps://www1.nyc.gov/assets/planning/download/pdf/data-maps/open-data/nycb2010_metadata.pdf?ver=17b", "ct.to_crs(epsg=2263, inplace=True)\ncb.to_crs(epsg=2263, inplace=True)\n\nct.BoroCT2010.unique().shape\ncb.BCTCB2010.unique().shape", "CT merge\nintersect & within", "hp_intersect_ct = gpd.sjoin(hp_target, ct, how='left', op='intersects')\nhp_within_ct = gpd.sjoin(hp_target, ct, how='left', op='within')\nct_within_hp = gpd.sjoin(ct, hp_target, how='inner', op='within')\n\nhp_intersect_ct.groupby('developmen').apply(lambda x: x.BoroCT2010)\n\nhp_within_ct.groupby('developmen').apply(lambda x: x.BoroCT2010)\n\nct_within_hp.shape", "CB merge", "hp_intersect_cb = gpd.sjoin(hp_target, cb, how='left', op='intersects')\nhp_within_cb = gpd.sjoin(hp_target, cb, how='left', op='within')\ncb_within_hp = gpd.sjoin(cb, hp_target, how='inner', op='within')\n\nhp_intersect_cb.groupby('developmen').apply(lambda x: x.BCTCB2010)\n\nhp_within_cb.groupby('developmen').apply(lambda x: x.BCTCB2010)\n\ncb_within_hp.shape", "<br/>\n<br/>\nSelect op = 'intersects'", "hp_ct = hp_intersect_ct.groupby('developmen').apply(lambda x: x.BoroCT2010.values)\nhp_cb = hp_intersect_cb.groupby('developmen').apply(lambda x: x.BCTCB2010.values)\n\nhp_target['CT'] = hp_ct.values\nhp_target['CB'] = hp_cb.values\n\nhp_target" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
brettavedisian/phys202-2015-work
assignments/assignment08/InterpolationEx01.ipynb
mit
[ "Interpolation Exercise 1", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\nfrom scipy.interpolate import interp1d", "2D trajectory interpolation\nThe file trajectory.npz contains 3 Numpy arrays that describe a 2d trajectory of a particle as a function of time:\n\nt which has discrete values of time t[i].\nx which has values of the x position at those times: x[i] = x(t[i]).\ny which has values of the y position at those times: y[i] = y(t[i]).\n\nLoad those arrays into this notebook and save them as variables x, y and t:", "f=np.load('trajectory.npz')\nx=np.array(f['x'])\ny=np.array(f['y'])\nt=np.array(f['t'])\n\nassert isinstance(x, np.ndarray) and len(x)==40\nassert isinstance(y, np.ndarray) and len(y)==40\nassert isinstance(t, np.ndarray) and len(t)==40", "Use these arrays to create interpolated functions $x(t)$ and $y(t)$. Then use those functions to create the following arrays:\n\nnewt which has 200 points between ${t_{min},t_{max}}$.\nnewx which has the interpolated values of $x(t)$ at those times.\nnewy which has the interpolated values of $y(t)$ at those times.", "newt=np.linspace(t.min(),t.max(),200)\nxt=interp1d(t,x,kind='cubic')\nyt=interp1d(t,y,kind='cubic')\nnewx=xt(newt)\nnewy=yt(newt)\n\nassert newt[0]==t.min()\nassert newt[-1]==t.max()\nassert len(newt)==200\nassert len(newx)==200\nassert len(newy)==200", "Make a parametric plot of ${x(t),y(t)}$ that shows the interpolated values and the original points:\n\nFor the interpolated points, use a solid line.\nFor the original points, use circles of a different color and no line.\nCustomize you plot to make it effective and beautiful.", "# referenced http://goo.gl/gixqML for legend formatting\nplt.plot(newx,newy,label='interpolated xy-data')\nplt.plot(x,y,marker='o',linestyle='',label='original xy-data')\nplt.ylim(bottom=-1.0)\nplt.xlim(left=-1.0)\nplt.xlabel('x-position')\nplt.ylabel('y-position')\nplt.legend(loc=10, bbox_to_anchor=(1.2,0.5))\nplt.title('2D Trajectory');\n\nassert True # leave this to grade the trajectory plot" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
crazyhottommy/scripts-general-use
Python/pybedtools_intro.ipynb
mit
[ "This notebook is to get myself to be familair with the pybedtools\nimport the pybedtools module", "import pybedtools\nimport sys\nimport os", "get the working directory and you can change to the directory you want by os.chdir(path)\nlist all the files in the directory os.listdir(path)", "os.getcwd()\n# use a pre-shipped bed file as an example\na = pybedtools.example_bedtool('a.bed')", "a is a bedtool object, one can acess the object by index", "print a[0]\nprint a[1]\nfeature = a[1]", "see what type of the interval is by Interval.file_type\nAll features, no matter what the file type, have chrom, start, stop, name, score, and strand attributes. Note that start and stop are long integers, while everything else (including score) is a string.", "print feature.file_type\nprint feature\nprint feature.chrom\nprint feature.start\nprint feature.stop\nprint feature.name\nprint feature.score\nprint feature.strand\nprint feature.fields", "interval can also be accessed by index or like a dictionary", "print feature[0]\nprint feature[\"chrom\"]\nprint feature[1]\nprint feature[\"start\"]\n\nprint a[1:4]", "slice get an itertools object that can be interated", "for interval in a[1:4]:\n print interval", "for each interval, one can access the chr,star, end by", "for interval in a[1:4]:\n print interval.chrom", "Let's do some intersection for 2 bed files", "a = pybedtools.example_bedtool('a.bed')\nb = pybedtools.example_bedtool('b.bed')\n\nprint a.head() # print out only the first 10 lines if you have big bed file\nprint b.head()", "use the Bedtools.intersect() method", "a_and_b = a.intersect(b)\n\na_and_b.head()", "one can add flags to the intersect call just as the command line intersectbed \n-wa Write the original entry in A for each overlap. may have duplicated entries from A\n-u Write original A entry once if any overlaps found in B. In other words, just report the fact at least one overlap was found in B.\nThe following toy example returns the same result for -u and -wa flag. \na.intersect(b, wa=True).head()", "a.intersect(b, u=True).head()", "saving files\nsave the Bedtool object to a file, you can add a trackline.", "c = a_and_b.saveas('intersection-of-a-and-b.bed', trackline='track name=\"a and b\"')\n\nos.listdir(\".\")\nprint c", "one can chain the methods of pybedtools just like the pipe in the command line.\nThe following intersect a with b first, and save the intersection in a file.\nbecause the intersect() method returns a Bedtool object, it can be chained using .merge()\nmethod and finally saved the mearged bed file", " x4 = a\\\n .intersect(b, u=True)\\\n .saveas('a-with-b.bed')\\\n .merge()\\\n .saveas('a-with-b-merged.bed')", "demonstrate the filter method\ngrep out only the intervals with length bigger than 100", "print a\nfor interval in a:\n print len(interval)", "Let's use filter to extract the intervals with length >100", "print a.filter(lambda x: len(x) >100)", "Or, use a more generic function", "def len_filter(feature, L):\n return len(feature) > L", "Then call this function inside the filter method:", "print a.filter(len_filter, 100)", "we got the same results as using the lambda. Using len_filter function is more flexiabl, as you can supply any length that you want to filter.\ndemonstrate the each() method\neach() method can apply a function for each interval in the BedTool object.\nIt is similar to the apply functions in R\nLet's add counts of how many hits in b intersect a", "with_count = a.intersect(b, c=True)\nprint with_count", "Normalize the counts by dividing the length of the interval.use a scalar of 0.001 to normalize it to\ncounts per 1kb", "def normalize_count(feature, scalar=0.001):\n count = float(feature[-1])\n normalized_count = count/len(feature) * scalar\n ## write the score back, need to turn it to a string first\n feature.score = str(normalized_count)\n return feature\n\nprint with_count.each(normalize_count)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
davewsmith/notebooks
temperature/MovingTheSensor.ipynb
mit
[ "Moving the sensor away from the ESP8266\nThe previous experiment showed that adding a piece of foam core board between the ESP8266 board and SHT30 temperature sensor board reduced spread in temperature readings, but those values were still high compared to the room thermostat. The burning question: How much is heat from the ESP8266 board influencing the sensor?\nFor this experiment, I chose two probes (:A0 and:2B) that seemed to be behaving very closely with respect to temperature. :2B will be the control.\nInitial conditions where a 75F reading on the room thermostat, and 76F on a digital cooking thermometer placed atop the thermostat. Moving the tip of the cooking thermometer to the back of the CPU board showed a temperature of 86F. Something on the ESP8266 board is putting off significant heat.\nJust before noon, I pulled the sensor board off of the :A0 ESP8266, and reattached it using 4\" jumper wires. (I was concerned about this being a fragile configuration, but the jumpers attached firmly.) That changed the configuration to\n\n(The SHT30 is an I2C device, so only four jumpers are needed for a connection.)\nThe code below is explained in the InitialTemperatureValues notebook.", "%matplotlib inline\nimport matplotlib\nmatplotlib.rcParams['figure.figsize'] = (12, 5)\nimport pandas as pd\n\ndf = pd.read_csv('movesensors.csv', header=None, names=['time', 'mac', 'f', 'h'], parse_dates=[0])\n\nper_sensor_f = df.pivot(index='time', columns='mac', values='f')\ndownsampled_f = per_sensor_f.resample('2T').mean()\ndownsampled_f.plot();", "Wow. Moving the sensor away from the ESP8266 drops the temperature reading over 10F, producing values that are consistent with the room thermostat. It also appears to reduce jitter in the readings.\nHow are humidity readings affected?", "per_sensor_h = df.pivot(index='time', columns='mac', values='h')\ndownsampled_h = per_sensor_h.resample('2T').mean()\ndownsampled_h.plot();", "This brings the humidity in line with the weather report (Weather Underground was reporting the local humidity as 51%, but the house gets stuffy when closed up for the day, so 56% is believable).\nConclusions and next steps\nBeing able to snap a temperature/humidity sensor onto the small Wemos D1 mini ESP8266 board sounds simple and appealing. Alas, experience and measurement has shown that something on the ESP8266 board is producing significant heat. The SHT30 chip may be fairly precise, but when parked next to a heat source, the SHT30's ability to measure the ambient air temperature in a still room isn't very good.\nNext, the other 4 sensors get the jumper treatment. I'll then run them that way for a while to see how much additional adjustment is needed. (There's still one errant sensor producing low temperature readings; I suspect it'll read even lower.)" ]
[ "markdown", "code", "markdown", "code", "markdown" ]
dlsun/symbulate
labs/Lab 6 - Joint and Conditional Distributions.ipynb
mit
[ "Symbulate Lab 6 - Joint and Conditional Distributions\nThis Jupyter notebook provides a template for you to fill in. Read the notebook from start to finish, completing the parts as indicated. To run a cell, make sure the cell is highlighted by clicking on it, then press SHIFT + ENTER on your keyboard. (Alternatively, you can click the \"play\" button in the toolbar above.)\nIn this lab you will use the Symbulate package. Many of the commands are discussed in the Multiple RV Section, the Conditioning Section, or the Graphics Section of the Symbulate documentation. You should use Symbulate commands whenever possible. If you find yourself writing long blocks of Python code, you are probably doing something wrong. For example, you should not need to write any for loops.\nThere are 3 parts, and at the end of each part there are some reflection questions. There is no need to type a response to the reflection questions, but you should think about them and discuss them with your partner to try to make sense of your simulation results.\nWarning: You may notice that many of the cells in this notebook are not editable. This is intentional and for your own safety. We have made these cells read-only so that you don't accidentally modify or delete them. However, you should still be able to execute the code in these cells.", "from symbulate import *\n%matplotlib inline", "Part I: Two Discrete random variables\nRoll a fair six-sided die five times and let $X$ be the largest of the five rolls and $Y$ the smallest.\nBefore proceeding, make some guesses about how the following will behave.\n- Joint distribution of $X$ and $Y$\n- Conditional distribution of $Y$ given $X=5$.\na)\nDefine the random variables $X$ and $Y$.", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "b)\nSimulate 10000 $(X, Y)$ pairs and store the values as xy. Estimate the covariance and the correlation. (Hint and hint and hint)", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "c)\nMake a scatterplot of the simulated values. (Hint. Note that it is recommnded to use jitter=True when the variables involved are discrete.)", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "d)\nMake a tile plot of the simulated values. (Hint)", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "e)\nUse simulation to approximate the conditional distribution of $Y$ given $X=5$ and approximate the conditional mean $E(Y | X=5)$ and the conditional standard deviation. (Hint, but also see all of the Conditioning Section.)", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "f) Reflection questions\nRecall the guesses you made at the start of the problem, and inspect your results from the previous parts. Can you explain the behavior you observed for the following?\n\nJoint distribution of $X$ and $Y$\nConditional distribution of $Y$ given $X=5$.\n\nTYPE YOUR RESPONSE HERE.\nPart II: Two continuous random variables\nSuppose that the base $U$ and height $V$ of a random rectangle are independent random variables, with each following a Uniform(0, 1) distribution. Let $X$ be the perimeter of the rectangle and $Y$ its area. In this part you will investigate the joint distribution of $X$ and $Y$.\nBefore proceeding, make some guesses about how the following will behave.\n- Joint distribution of $X$ and $Y$\n- Marginal distribution of $Y$\n- Conditional distribution of $Y$ given $X=2$.\na)\nDefine appropriate random variables $U, V, X, Y$. (Hint, but also see the Multiple RV Section in general.)", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "b)\nSimulate 10000 $(X, Y)$ pairs and store the values as xy. Estimate the covariance and the correlation. (Hint and hint and hint)", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "c)\nMake a scatterplot of the simulated values. (Hint)", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "d)\nMake a two-dimensional histogram of the simulated values. (Hint)", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "e)\nMake a two-dimensional density plot of the simulated values. (Hint)", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "f)\nUse simulation to approximate the marginal distribution of $Y$ and approximate its mean and standard deviation.", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "g)\nUse simulation to approximate the conditional distribution of $Y$ given $X=2$ and approximate the conditional mean $E(Y | X=2)$ and the conditional standard deviation. (Warning: be careful! See this hint and especially this hint.)", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "h) Reflection questions\nRecall the guesses you made at the start of the problem, and inspect your results from the previous parts. Can you explain the behavior you observed for the following?\n\nJoint distribution of $X$ and $Y$\nMarginal distribution of $Y$\nConditional distribution of $Y$ given $X=2$.\n\nTYPE YOUR RESPONSE HERE.\nPart III: Joint Gaussian random variables\nJust like Gaussian (Normal) distributions are the most important probability distributions, joint Gaussian (Multivariate Normal) distributions are the most important joint distributions. In this part you will investigate two random variables which have a joint Gaussian distribution.\nSuppose that SAT Math ($M$) and Reading ($R$) scores of CalPoly students have a Bivariate Normal\n(joint Gaussian) distribution.\n- Math scores have mean 635 and SD 85.\n- Reading scores have mean 595 and SD 70.\n- The correlation between scores is 0.6.\nLet $X = M + R$, the total of the two scores. Let $Y = M- R$, the difference between Math and Reading scores.\na)\nDefine RVs $M, R, X, Y$. (Hint)", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "b)\nSimulate 10000 $(M, R)$ pairs. Use the simulation results to approximate $E(M)$, $E(R)$, $SD(M)$, $SD(R)$, and $Corr(M, R)$.", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "c)\nMake a scatterplot of the simulated values. Add histograms of the marginal distributions. (Hint: .plot(type=[\"scatter\", \"marginal\"]).", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "d)\nMake a density plot of the simulated values. Add density plots of the marginal distributions. (Hint: .plot(type=[\"density\", \"marginal\"]).", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "e)\nNow simulate 10000 values of $X = M+R$. Plot the approximate distribution of $X$ and estimate $E(X)$ and $SD(X)$.", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "f)\nNow simulate 10000 values of $Y = M - R$. Plot the approximate distribution of $Y$ and estimate $E(Y)$ and $SD(Y)$.", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "g)\nUse simulation to approximate the distribution of $M$ given $R=700$. Make a plot of the approximate distribution and estimate the conditional mean $E(M | R = 700)$ and the conditional standard deviation. (Warning: be careful! See this hint and especially this hint.)", "# Type all of your code for this problem in this cell.\n# Feel free to add additional cells for scratch work, but they will not be graded.", "h) Reflection questions\nInspect your results from the previous parts.\n\nHow would you describe the shape of the scatterplot/density plot of $M$ and $R$?\nHow would you describe the marginal distributions of $M$ and $R$?\nHow does the distribution of $M+R$ compare to the distribution of $M-R$? In particular, how do the SDs compare? How do the SDs compare to the case when $M$ and $R$ are independent? Can you explain why this makes sense?\nHow would you describe the conditional distribution of $M$ given $R=700$? How does it compare to the marginal distribution of $M$? Can you explain why this makes sense? Be sure to consider mean and sd.\n\nTYPE YOUR RESPONSE HERE.\nSubmission Instructions\nBefore you submit this notebook, click the \"Kernel\" drop-down menu at the top of this page and select \"Restart & Run All\". This will ensure that all of the code in your notebook executes properly. Please fix any errors, and repeat the process until the entire notebook executes without any errors." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
francisc0garcia/autonomous_bicycle
docs/python_notebooks/EKF_Design.ipynb
apache-2.0
[ "Extended Kalman Filter design for bicycle's kinematic motion model", "# Import dependencies\nfrom __future__ import division, print_function\n%matplotlib inline\n\nimport scipy\n\nfrom BicycleTrajectory2D import *\nfrom BicycleUtils import *\nfrom FormatUtils import *\nfrom PlotUtils import *", "Simulation of kinematic motion model", "[N, dt, wheel_distance] = [300, 0.05, 1.1] # simulation parameters\nadd_noise = True # Enable/disable gaussian noise\n\n# Define initial state --------------------------------------------------------\ndelta = math.radians(6) # steering angle\nphi = math.radians(0) # Lean angle\nX_init = np.array([1.0, 3.0, 0.0, np.tan(delta)/wheel_distance, 0.0, phi]) # [x, y, z, sigma, psi, phi]\n\n# Define constant inputs ------------------------------------------------------\nU_init = np.array([1.0, 0.01, 0.01]) # [v, phi_dot, delta_dot]\n\n# Define standard deviation for gaussian noise model --------------------------\n# [xf, xr, yf, yr, zf, zr, za, delta, psi, phi]\nif add_noise:\n noise = [0.5, 0.5, 0.5, 0.5, 0.1, 0.1, 0.1, 0.01, 0.01, 0.01]\nelse:\n noise = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n \n# Create object simulator ------------------------------------------------------\nbike = BicycleTrajectory2D(X_init=X_init, U_init=U_init, noise=noise)\n\n# Simulate path ----------------------------------------------------------------\n(gt_sim, zs_sim, time) = bike.simulate_path(N=N, dt=dt)\n\nplot_results(xs=[], zs_sim=zs_sim, gt_sim=gt_sim, time=time, plot_xs=False)", "Implementation of EKF for $\\sigma$-model\nDefine state vector:\n$$ X = \n\\begin{bmatrix}x & y & z & v & \\sigma & \\psi & \\phi \\end{bmatrix}^\\mathsf T$$\nDefine measurement vector:\n$$ Z = \n\\begin{bmatrix}x_f & x_r & y_f & y_r & z_f & z_r & z_a & \\sigma & \\psi & \\phi \\end{bmatrix}^\\mathsf T$$", "class EKF_sigma_model_fusion(object):\n \"\"\"Implements an EKF to bicycle model\"\"\"\n def __init__(self, xs, P, R_std, Q_std, wheel_distance=1.2, dt=0.1, alpha=1.0):\n self.w = wheel_distance #Set the distance between the wheels\n self.xs = xs *0.0 #Set the initial state\n self.P = P #Set the initial Covariance\n self.dt = dt\n self.R_std = R_std\n self.Q_std = Q_std\n self.alpha = alpha\n self.K = np.zeros((6, 6)) # Kalman gain\n \n #Set the process noise covariance\n self.Q = np.diag([self.Q_std[0], # v\n self.Q_std[1], # phi_dot\n self.Q_std[2] # delta_dot\n ])\n \n # Set the measurement noise covariance\n self.R = np.diag([self.R_std[0], # xf\n self.R_std[1], # xr\n self.R_std[2], # yf\n self.R_std[3], # yr\n self.R_std[4], # zf\n self.R_std[5], # zr\n self.R_std[6], # za\n self.R_std[7], # sigma\n self.R_std[8], # psi\n self.R_std[9]]) # phi\n \n # Linear relationship H - z = Hx\n self.H = np.zeros((10, 6)) # 10 measurements x 6 state variables\n [self.H[0, 0], self.H[1, 0]] = [1.0, 1.0] # x\n [self.H[2, 1], self.H[3, 1]] = [1.0, 1.0] # y\n [self.H[4, 2], self.H[5, 2], self.H[6, 2]] = [1.0, 1.0, 1.0] # z\n [self.H[7, 3], self.H[8, 4], self.H[9, 5]] = [1.0, 1.0, 1.0] # sigma - psi - phi\n \n def Fx(self, xs, u):\n \"\"\" Linearize the system with the Jacobian of the x \"\"\"\n F_result = np.eye(len(xs))\n \n v = u[0]\n phi_dot = u[1]\n delta_dot = u[2]\n \n sigma = xs[3]\n psi = xs[4]\n phi = xs[5]\n t = self.dt\n\n F04 = -t * v * np.sin(psi) \n F14 = t * v * np.cos(psi)\n F33 = (2 * t * delta_dot * sigma * self.w) + 1\n F43 = (t * v)/np.cos(phi)\n F45 = t * sigma * v * np.sin(phi) / np.cos(phi)**2\n\n F_result[0, 4] = F04\n F_result[1, 4] = F14\n F_result[3, 3] = F33\n F_result[4, 3] = F43\n F_result[4, 5] = F45\n\n return F_result\n \n def Fu(self, xs, u):\n \"\"\" Linearize the system with the Jacobian of the u \"\"\"\n v = u[0]\n phi_dot = u[1]\n delta_dot = u[2]\n \n sigma = xs[3]\n psi = xs[4]\n phi = xs[5]\n t = self.dt\n \n V_result = np.zeros((len(xs), len(u)))\n \n V00 = t * np.cos(psi)\n V10 = t * np.sin(psi)\n V32 = (t/self.w)*((sigma**2)*(self.w**2) + 1)\n V40 = t * sigma / np.cos(phi)\n V51 = t\n\n V_result[0, 0] = V00\n V_result[1, 0] = V10\n V_result[3, 2] = V32\n V_result[4, 0] = V40\n V_result[5, 1] = V51\n\n return V_result\n \n def f(self, xs, u):\n \"\"\" Estimate the non-linear state of the system \"\"\"\n v = u[0]\n phi_dot = u[1]\n delta_dot = u[2]\n \n sigma = xs[3]\n psi = xs[4]\n phi = xs[5]\n t = self.dt\n \n fxu_result = np.zeros((len(xs), 1))\n \n fxu_result[0] = xs[0] + t * v * np.cos(psi)\n fxu_result[1] = xs[1] + t * v * np.sin(psi)\n fxu_result[2] = xs[2]\n fxu_result[3] = xs[3] + (t*phi_dot/self.w)*((sigma**2)*(self.w**2) +1)\n fxu_result[4] = xs[4] + t * v * sigma / np.cos(phi)\n fxu_result[5] = xs[5] + t * phi_dot\n \n return fxu_result\n\n def h(self, x):\n \"\"\" takes a state variable and returns the measurement\n that would correspond to that state. \"\"\" \n sensor_out = np.zeros((10, 1))\n sensor_out[0] = x[0]\n sensor_out[1] = x[0]\n sensor_out[2] = x[1]\n sensor_out[3] = x[1]\n sensor_out[4] = x[2]\n sensor_out[5] = x[2]\n sensor_out[6] = x[2]\n sensor_out[7] = x[3] # sigma\n sensor_out[8] = x[4] # psi\n sensor_out[9] = x[5] # phi\n \n return sensor_out\n\n def Prediction(self, u):\n x_ = self.xs\n P_ = self.P\n self.xs = self.f(x_, u)\n self.P = self.alpha * self.Fx(x_, u).dot(P_).dot((self.Fx(x_,u)).T) + \\\n self.Fu(x_,u).dot(self.Q).dot((self.Fu(x_,u)).T)\n \n def Update(self, z):\n \"\"\"Update the Kalman Prediction using the meazurement z\"\"\"\n y = z - self.h(self.xs)\n self.K = self.P.dot(self.H.T).dot(np.linalg.inv(self.H.dot(self.P).dot(self.H.T) + self.R))\n \n self.xs = self.xs + self.K.dot(y)\n self.P = (np.eye(len(self.xs)) - self.K.dot(self.H)).dot(self.P)", "Execute EKF", "np.random.seed(850)\n\n[N, dt, wheel_distance, number_state_variables] = [300, 0.05, 1.1, 6]\ndelta = math.radians(6)\nphi = math.radians(0)\n\nU_init = np.array([1.0, 0.01, 0.01]) # [v, phi_dot, delta_dot]\nX_init = np.array([1.0, 3.0, 0.0, np.tan(delta)/wheel_distance, 0.0, phi]) # [x, y, z, sigma, psi, phi]\n\n# noise = [xf, xr, yf, yr, zf, zr, za, delta, psi, phi]\n#noise = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\nnoise = [0.5, 0.5, 0.5, 0.5, 0.1, 0.1, 0.1, 0.01, 0.01, 0.01]\n#noise = [5.5, 5.5, 5.5, 5.5, 5.1, 5.1, 5.1, 1.1, 1.1, 1.1]\n\nbike = BicycleTrajectory2D(X_init=X_init, U_init=U_init, w=wheel_distance, noise=noise)\n\n(gt_sim, zs_sim, time_t) = bike.simulate_path(N=N, dt=dt)\n\nalpha = 1.00\n\n# covariance matrix\nP = np.eye(number_state_variables) * 1e0\n# process noise covariance Q\nQ_std = [(0.10)**2, (0.10)**2, (0.10)**2 ] # v, phi_dot, delta_dot\n# Measurement noise covariance R \n# [xf, xr, yf, yr, zf, zr, za, delta, psi, phi]\nR_std = [0.8**2, 0.8**2, # x\n 0.8**2, 0.8**2, # y\n 0.5**2, 0.5**2, 0.5**2, # z\n 1.5**2, 0.4**2, 1.8**2] # delta - psi - phi\n\nfilter_ekf = EKF_sigma_model_fusion(X_init, P, R_std=R_std, Q_std=Q_std, wheel_distance=wheel_distance, dt=dt, alpha=alpha)\n\nxs = np.zeros((N, number_state_variables))\nps = np.zeros((N, number_state_variables, number_state_variables))\nPU = np.zeros((N, number_state_variables))\nKU = np.zeros((N, number_state_variables))\ntime_t = np.zeros((N, 1))\nt = 0\nz_t = np.zeros((10, 1))\n\nfor i in range(N): \n P = filter_ekf.P\n K = filter_ekf.K\n PU[i] = [P[0,0], P[1,1], P[2,2], P[3,3], P[4,4], P[5,5]]\n KU[i] = [K[0,0], K[1,1], K[2,2], K[3,3], K[4,4], K[5,5]]\n xs[i] = filter_ekf.xs.T\n xs[i, 3] = np.arctan2(xs[i, 3], 1/wheel_distance) # sigma to delta conversion\n \n # predict\n filter_ekf.Prediction(U_init)\n \n # update measurements [xf, xr, yf, yr, zf, zr, za, delta, psi, phi]\n z_t[0] = zs_sim[i].xf\n z_t[1] = zs_sim[i].xr\n z_t[2] = zs_sim[i].yf\n z_t[3] = zs_sim[i].yr\n z_t[4] = zs_sim[i].zf\n z_t[5] = zs_sim[i].zr\n z_t[6] = zs_sim[i].za\n z_t[7] = np.tan(zs_sim[i].delta)/wheel_distance # sigma\n z_t[8] = zs_sim[i].psi # psi\n z_t[9] = zs_sim[i].phi # phi\n\n filter_ekf.Update(z_t)\n \n cov = np.array([[P[0, 0], P[2, 0]], \n [P[0, 2], P[2, 2]]])\n mean = (xs[i, 0], xs[i, 1])\n\n #plot_covariance_ellipse(mean, cov, fc='g', std=3, alpha=0.3, title=\"covariance\")\n \n time_t[i] = t\n t += dt\n filter_ekf.time_t = t\n\nplot_results(xs=xs, zs_sim=zs_sim, gt_sim=gt_sim, time=time_t, plot_xs=True)", "Plot Kalman gain and process covariance", "fig = plt.figure(figsize=(12,8))\nplt.plot(time_t,KU[:,0], label='$x$')\nplt.plot(time_t,KU[:,1], label='$y$')\nplt.plot(time_t,KU[:,2], label='$z$')\nplt.plot(time_t,KU[:,3], label='$\\sigma$')\nplt.plot(time_t,KU[:,4], label='$\\psi$')\nplt.plot(time_t,KU[:,5], label='$\\phi$')\nplt.title(\"Kalman gain\")\nplt.legend(bbox_to_anchor=(0., 0.91, 1., .06), loc='best',\n ncol=9, borderaxespad=0.,prop={'size':16})\n\nfig = plt.figure(figsize=(12,8))\nplt.semilogy(time_t,PU[:,0], label='$x$')\nplt.step(time_t,PU[:,1], label='$y$')\nplt.step(time_t,PU[:,2], label='$z$')\nplt.step(time_t,PU[:,3], label='$\\sigma$')\nplt.step(time_t,PU[:,4], label='$\\psi$')\nplt.step(time_t,PU[:,5], label='$\\phi$')\nplt.title(\"Process covariance\")\nplt.legend(bbox_to_anchor=(0., 0.91, 1., .06), loc='best',\n ncol=9, borderaxespad=0.,prop={'size':16})" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
paultheastronomer/OAD-Data-Science-Toolkit
Teaching Materials/Machine Learning/Supervised Learning/Examples/SNN/Simple_Neural_Network.ipynb
gpl-3.0
[ "First neural network\nWe will build a simple feed forward neural network with Keras. We will start with a two layer neural network for simplicity.\nImport all necessary python packages", "# For simple array operations\nimport numpy as np \n\n# To construct the model\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.optimizers import SGD\n\n# Some utility for splitting data and printing the classification report\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn.utils import shuffle", "Load some data\nThe dataset in this experiment is a publically available pulsar dataset from Rob Lyons paper. It is in a simple ASCII format delimited by commas. There are 8 statistical features that represent different measure of the de-dispersed pulse profile of pulsar and non pulsar candidates. The last column is a label column where '1' represents a pulsar and '0' represents a non pulsar candidate.", "dataset = np.loadtxt('../Data/HTRU_2.csv',delimiter=',')\n\nprint 'The dataset has %d rows and %d features' %(dataset.shape[0],dataset.shape[1]-1)\n\n# Split into features and labels\nfor i in range(0,10):\n dataset = shuffle(dataset)\n \nfeatures = dataset[:,0:-1]\nlabels = dataset[:,-1]", "Split the data into training and testing data", "traindata,testdata,trainlabels,testlabels = train_test_split(features,labels,test_size=0.3)\n\ntrainlabels = trainlabels.astype('int')\ntestlabels = testlabels.astype('int')", "Show some info about the split", "print 'Number of training samples : %d'%(traindata.shape[0])\nprint 'Number of test samples : %d'%(testdata.shape[0])\n", "Construct the model", "model = Sequential() # Our model is a simple feedforward model\nmodel.add(Dense(64,input_shape=(8,))) # The first layer holds the input for in which our case the there are 8 features.\nmodel.add(Activation('relu')) # First activation layer is rectified linear unit (RELU)\nmodel.add(Dense(256)) # Second layer has 256 neurons \nmodel.add(Activation('relu')) # Second RELU activation\nmodel.add(Dense(1)) # Third layer has 1 neuron because we have only one outcome - pulsar or non pulsar\nmodel.add(Activation('softmax')) # The Scoring layer which normalizes the scores", "Print the mode summary\nThis step makes sure that our model is correctly defined and there is no error in the model definition.\nIt will also show the sizes of each layers", "model.summary()", "Compile the model\nThis step defines the parameters for training", "model.compile(loss='binary_crossentropy', # Loss function for binary classification\n optimizer=SGD(), # Optimizer for learning, in this case Stochastic Gradient Descent (SGD)\n metrics=['accuracy']) # Evaluation function\"", "Train the model\nIn this step we will train the network and also define the number of epochs and batch size for training.", "batch_size = 100\nn_epochs = 10\n\ntraining = model.fit(traindata,trainlabels,\n nb_epoch=n_epochs,\n batch_size=batch_size,\n validation_data=(testdata, testlabels),\n verbose=1)\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
sassoftware/sas-viya-programming
python/titanic/TitanicExample.ipynb
apache-2.0
[ "Example: Model Titanic Survival\nThis example is provided to demonstrate some of the typical programming activities for working with Python in a SAS Viya environment to run actions in SAS Cloud Analytic Services. The actions that are used in the example require SAS Visual Data Mining and Machine Learning.\nFor more information, see http://support.sas.com/documentation/prod-p/vdmml/index.html.", "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom IPython.core.display import display, HTML\n%matplotlib inline\n\nimport swat\ns = swat.CAS('cloud.example.com', 5570)", "Get the Data", "ulresult = s.upload('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic3.csv')\n\ntitanic3 = ulresult.casTable\n\ntype(titanic3)\n\ntitanic3.table.columnInfo()", "Add a Computed Column\nSome of the columns in the data are problematic for modeling:\n* The name column should not have a bearing on the analysis.\n* The boat and body columns are proxies for the response variable, survived.\n* The cabin column is similar to name in that it is too specific to be generalized. A computed column, deck, is created because it is slightly more general than cabin.", "# Create a computed variable.\n\ntitanic3.computedVars = ['deck'] # 1\ntitanic3.computedVarsProgram = \\\n \"if cabin ne '' then deck = ksubstr(cabin,1,1); else deck = '';\"\n\n\nnumeric=['pclass', 'survived', 'age', 'sibsp', 'parch', 'fare']\n\n# Remove boat and body because they are proxies for survived.\n# Remove ticket and cabin. Use the computed column, deck, instead.\nchar = ['sex', 'deck', 'embarked', 'home.dest']\n\nall = numeric + char", "Group By Analysis: Descriptive Statistics\nThe simple.summary action is used to provide some descriptive statistics. The groupBy parameter is set on the Titanic3 object so that the statistics are shown for survivors and those that did not survive.", "# The numeric variable was defined earlier.\nresults = titanic3[numeric].groupby(\"survived\").simple.summary()\n\nresultColumns = ['Column', 'Min', 'Max', 'N', 'NMiss', 'Mean', 'Sum', 'Std', 'StdErr'];\n\ndisplay(HTML('<h3>Perished</h3>'))\ndisplay(results['ByGroup1.Summary'][resultColumns]) # 1\n\ndisplay(HTML('<h3>Survived</h3>'))\ndisplay(results['ByGroup2.Summary'][resultColumns])", "Sample the Data", "s.builtins.loadActionSet('sampling')\n\n# The sampling.stratified action does not accept the vars parameter.\n# Instead, copyVars is used to select the columns to copy to the output table.\nif 'vars' in titanic3.params:\n del titanic3.vars\n\n# Temporarily set a groupBy parameter.\nwith titanic3:\n titanic3.groupBy={'survived'}\n titanic3.sampling.stratified(\n partInd=True, # 1\n samppct=40, # 2\n seed=1234,\n output={\n 'casout':{'name':'titanic3part', 'replace':True},\n 'copyVars':all\n }\n )\n\ntitanic3.table.dropTable() # 3\n\ntitanic3part = s.CASTable('titanic3part') # 4\nci = titanic3part.columnInfo()\ndisplay(ci)", "Check that Sampling is Even\nAs long as each partition has approximately .38 for the mean, then survivor rows are distributed evenly in the partitions.", "survSummary = titanic3part['survived'].groupby('_partind_').simple.summary()\n \nresultColumns = ['Column', 'N', 'NMiss', 'Mean', 'Sum', 'Std', 'StdErr']\n\ndisplay(survSummary['ByGroupInfo'])\ndisplay(survSummary['ByGroup1.Summary'][resultColumns])\ndisplay(survSummary['ByGroup2.Summary'][resultColumns])", "Train a Model\nThe casOut parameter that is shown in the example is used to store the model as an in-memory table. The next step of this example is to show how to score data with the model.", "s.builtins.loadActionSet('decisionTree') # 1\n\ntraining = titanic3part.query('0 = _partind_') # 2\n\ntrainingResults = training.forestTrain(\n target='survived',\n inputs=all,\n nominals=char + ['pclass', 'survived'],\n casOut={'name':'forestModel', 'replace':True},\n seed=1234,\n binOrder=True,\n varImp=True\n )\n\ndisplay(trainingResults)", "Use the Model for Scoring\nIn this example, both the training data and the validation data are scored. This is done so that we can assess the effectiveness of the model for predicting whether someone survives on the Titanic.\nThe in-memory table, forestModel, is used as the model. The scoring output is stored in an in-memory table that is named forestScored.", "forestModel = s.CASTable('forestModel')\n\ntitanic3part.forestScore(\n modelTable=forestModel, \n copyVars=['survived', '_partind_'],\n casOut={'name':'forestScored', 'replace':True}, \n vote='prob'\n)", "Assess the Model\nThe assess action is part of the percentile action set. You can run the loadActionSet action to ensure that the action is available to your session.", "s.builtins.loadActionSet('percentile')\n\nforestScored = s.CASTable('forestScored') # 1\nforestScored.groupBy='_PartInd_' # 2\nforestScored.computedVars=['P1', 'P0'] # 3\nforestScored.computedVarsProgram=''' \n if '1' eq strip(_RF_PredName_) then do;\n P1 = _RF_PredP_;\n P0 = 1 - _RF_PredP_;\n end;\n else do;\n P1 = 1 - _RF_PredP_;\n P0 = _RF_PredP_;\n end;\n''' # 4\n\nforestScored.percentile.assess(\n casOut={'name':'forestAssess', 'replace':True},\n nbins=10,\n cutStep = 0.01,\n inputs=['P1'],\n response='survived', \n event='1', \n pVar=['P0'], \n pEvent='0'\n)", "Plot ROC", "forestAssess_ROC = \\\n s.CASTable('forestAssess_ROC', where='1 = _partind_') # 1\n\nout2 = forestAssess_ROC.to_frame()\n\nplt.figure(figsize=(8,8))\nplt.plot(out2._FPR_,out2._Sensitivity_,'bo-',linewidth=2)\nplt.plot(pd.Series(range(0,11,1))/10,pd.Series(range(0,11,1))/10,'k--',linewidth=1)\nplt.xlabel('False Positive Rate')\nplt.ylabel('Correct Classification Rate')\nplt.grid(True)\nplt.title('ROC Curve')\nplt.show()", "Plot Lift", "forestAssess = \\\n s.CASTable('forestAssess', where='1 = _partind_') # 1\nlift = forestAssess.to_frame()\n\nplt.figure(figsize=(8,8))\nplt.plot(lift._Depth_, lift._Lift_,'bo-',linewidth=2)\nplt.xlabel('Percentile')\nplt.ylabel('Lift')\nplt.grid(True)\nplt.title('Lift Chart')\nplt.show()\n\n\ns.close()", "Copyright SAS Institute\nDisclaimer: SAS may reference other websites or content or resources for use at Customer's sole discretion. SAS has no control over any websites or resources that are provided by companies or persons other than SAS. Customer acknowledges and agrees that SAS is not responsible for the availability or use of any such external sites or resources, and does not endorse any advertising, products, or other materials on or available from such websites or resources. Customer acknowledges and agrees that SAS is not liable for any loss or damage that may be incurred by Customer or its end users as a result of the availability or use of those external sites or resources, or as a result of any reliance placed by Customer or its end users on the completeness, accuracy, or existence of any advertising, products, or other materials on, or available from, such websites or resources." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
philferriere/tfoptflow
tfoptflow/pwcnet_train_lg-6-2-cyclic-chairsthingsmix.ipynb
mit
[ "PWC-Net-large model training (with cyclical learning rate schedule)\nIn this notebook, we:\n- Use a PWC-Net-large model (with dense and residual connections), 6 level pyramid, uspample level 2 by 4 as the final flow prediction\n- Train the model on a mix of the FlyingChairs and FlyingThings3DHalfRes dataset using a Cyclic<sub>short</sub> schedule of our own\n- The Cyclic<sub>short</sub> schedule oscillates between 5e-04 and 1e-05 for 200,000 steps\nBelow, look for TODO references and customize this notebook based on your own machine setup.\nReference\n[2018a]<a name=\"2018a\"></a> Sun et al. 2018. PWC-Net: CNNs for Optical Flow Using Pyramid, Warping, and Cost Volume. [arXiv] [web] [PyTorch (Official)] [Caffe (Official)]", "\"\"\"\npwcnet_train.ipynb\n\nPWC-Net model training.\n\nWritten by Phil Ferriere\n\nLicensed under the MIT License (see LICENSE for details)\n\nTensorboard:\n [win] tensorboard --logdir=E:\\\\repos\\\\tf-optflow\\\\tfoptflow\\\\pwcnet-lg-6-2-cyclic-chairsthingsmix\n [ubu] tensorboard --logdir=/media/EDrive/repos/tf-optflow/tfoptflow/pwcnet-lg-6-2-cyclic-chairsthingsmix\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport sys\nfrom copy import deepcopy\n\nfrom dataset_base import _DEFAULT_DS_TRAIN_OPTIONS\nfrom dataset_flyingchairs import FlyingChairsDataset\nfrom dataset_flyingthings3d import FlyingThings3DHalfResDataset\nfrom dataset_mixer import MixedDataset\nfrom model_pwcnet import ModelPWCNet, _DEFAULT_PWCNET_TRAIN_OPTIONS", "TODO: Set this first!", "# TODO: You MUST set dataset_root to the correct path on your machine!\nif sys.platform.startswith(\"win\"):\n _DATASET_ROOT = 'E:/datasets/'\nelse:\n _DATASET_ROOT = '/media/EDrive/datasets/'\n_FLYINGCHAIRS_ROOT = _DATASET_ROOT + 'FlyingChairs_release'\n_FLYINGTHINGS3DHALFRES_ROOT = _DATASET_ROOT + 'FlyingThings3D_HalfRes'\n \n# TODO: You MUST adjust the settings below based on the number of GPU(s) used for training\n# Set controller device and devices\n# A one-gpu setup would be something like controller='/device:GPU:0' and gpu_devices=['/device:GPU:0']\n# Here, we use a dual-GPU setup, as shown below\ngpu_devices = ['/device:GPU:0', '/device:GPU:1']\ncontroller = '/device:CPU:0'\n\n# TODO: You MUST adjust this setting below based on the amount of memory on your GPU(s)\n# Batch size\nbatch_size = 8", "Pre-train on FlyingChairs+FlyingThings3DHalfRes mix\nLoad the dataset", "# TODO: You MUST set the batch size based on the capabilities of your GPU(s) \n# Load train dataset\nds_opts = deepcopy(_DEFAULT_DS_TRAIN_OPTIONS)\nds_opts['in_memory'] = False # Too many samples to keep in memory at once, so don't preload them\nds_opts['aug_type'] = 'heavy' # Apply all supported augmentations\nds_opts['batch_size'] = batch_size * len(gpu_devices) # Use a multiple of 8; here, 16 for dual-GPU mode (Titan X & 1080 Ti)\nds_opts['crop_preproc'] = (256, 448) # Crop to a smaller input size\nds1 = FlyingChairsDataset(mode='train_with_val', ds_root=_FLYINGCHAIRS_ROOT, options=ds_opts)\nds_opts['type'] = 'into_future'\nds2 = FlyingThings3DHalfResDataset(mode='train_with_val', ds_root=_FLYINGTHINGS3DHALFRES_ROOT, options=ds_opts)\nds = MixedDataset(mode='train_with_val', datasets=[ds1, ds2], options=ds_opts)\n\n# Display dataset configuration\nds.print_config()", "Configure the training", "# Start from the default options\nnn_opts = deepcopy(_DEFAULT_PWCNET_TRAIN_OPTIONS)\nnn_opts['verbose'] = True\nnn_opts['ckpt_dir'] = './pwcnet-lg-6-2-cyclic-chairsthingsmix/'\nnn_opts['batch_size'] = ds_opts['batch_size']\nnn_opts['x_shape'] = [2, ds_opts['crop_preproc'][0], ds_opts['crop_preproc'][1], 3]\nnn_opts['y_shape'] = [ds_opts['crop_preproc'][0], ds_opts['crop_preproc'][1], 2]\nnn_opts['use_tf_data'] = True # Use tf.data reader\nnn_opts['gpu_devices'] = gpu_devices\nnn_opts['controller'] = controller\n\n# Use the PWC-Net-large model in quarter-resolution mode\nnn_opts['use_dense_cx'] = True\nnn_opts['use_res_cx'] = True\nnn_opts['pyr_lvls'] = 6\nnn_opts['flow_pred_lvl'] = 2\n\n# Set the learning rate schedule. This schedule is for a single GPU using a batch size of 8.\n# Below,we adjust the schedule to the size of the batch and the number of GPUs.\nnn_opts['lr_policy'] = 'cyclic'\nnn_opts['cyclic_lr_max'] = 5e-04 # Anything higher will generate NaNs\nnn_opts['cyclic_lr_base'] = 1e-05\nnn_opts['cyclic_lr_stepsize'] = 20000\nnn_opts['max_steps'] = 200000\n\n# Below,we adjust the schedule to the size of the batch and our number of GPUs (2).\nnn_opts['cyclic_lr_stepsize'] /= len(gpu_devices)\nnn_opts['max_steps'] /= len(gpu_devices)\nnn_opts['cyclic_lr_stepsize'] = int(nn_opts['cyclic_lr_stepsize'] / (float(ds_opts['batch_size']) / 8))\nnn_opts['max_steps'] = int(nn_opts['max_steps'] / (float(ds_opts['batch_size']) / 8))\n\n# Instantiate the model and display the model configuration\nnn = ModelPWCNet(mode='train_with_val', options=nn_opts, dataset=ds)\nnn.print_config()", "Train the model", "# Train the model\nnn.train()", "Training log\nHere are the training curves for the run above:\n\n\n\nHere are the predictions issued by the model for a few validation samples:" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ud3sh/coursework
deeplearning.ai/coursera-improving-neural-networks/week2/Optimization_methods_v1b.ipynb
unlicense
[ "Optimization Methods\nUntil now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result. \nGradient descent goes \"downhill\" on a cost function $J$. Think of it as trying to do this: \n<img src=\"images/cost.jpg\" style=\"width:650px;height:300px;\">\n<caption><center> <u> Figure 1 </u>: Minimizing the cost is like finding the lowest point in a hilly landscape<br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption>\nNotations: As usual, $\\frac{\\partial J}{\\partial a } = $ da for any variable a.\nTo get started, run the following code to import the libraries you will need.\n<font color='darkblue'> Updates to Assignment <font>\nIf you were working on a previous version\n\nThe current notebook filename is version \"Optimization_methods_v1b\". \nYou can find your work in the file directory as version \"Optimization methods'.\nTo see the file directory, click on the Coursera logo at the top left of the notebook.\n\nList of Updates\n\nop_utils is now opt_utils_v1a. Assertion statement in initialize_parameters is fixed.\nopt_utils_v1a: compute_cost function now accumulates total cost of the batch without taking the average (average is taken for entire epoch instead).\nIn model function, the total cost per mini-batch is accumulated, and the average of the entire epoch is taken as the average cost. So the plot of the cost function over time is now a smooth downward curve instead of an oscillating curve.\nPrint statements used to check each function are reformatted, and 'expected output` is reformatted to match the format of the print statements (for easier visual comparisons).", "import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io\nimport math\nimport sklearn\nimport sklearn.datasets\n\nfrom opt_utils_v1a import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation\nfrom opt_utils_v1a import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset\nfrom testCases import *\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'", "1 - Gradient Descent\nA simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent. \nWarm-up exercise: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$: \n$$ W^{[l]} = W^{[l]} - \\alpha \\text{ } dW^{[l]} \\tag{1}$$\n$$ b^{[l]} = b^{[l]} - \\alpha \\text{ } db^{[l]} \\tag{2}$$\nwhere L is the number of layers and $\\alpha$ is the learning rate. All parameters should be stored in the parameters dictionary. Note that the iterator l starts at 0 in the for loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift l to l+1 when coding.", "# GRADED FUNCTION: update_parameters_with_gd\n\ndef update_parameters_with_gd(parameters, grads, learning_rate):\n \"\"\"\n Update parameters using one step of gradient descent\n \n Arguments:\n parameters -- python dictionary containing your parameters to be updated:\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n grads -- python dictionary containing your gradients to update each parameters:\n grads['dW' + str(l)] = dWl\n grads['db' + str(l)] = dbl\n learning_rate -- the learning rate, scalar.\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n \"\"\"\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * grads['dW' + str(l+1)] \n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * grads['db' + str(l+1)] \n ### END CODE HERE ###\n \n return parameters\n\nparameters, grads, learning_rate = update_parameters_with_gd_test_case()\n\nparameters = update_parameters_with_gd(parameters, grads, learning_rate)\nprint(\"W1 =\\n\" + str(parameters[\"W1\"]))\nprint(\"b1 =\\n\" + str(parameters[\"b1\"]))\nprint(\"W2 =\\n\" + str(parameters[\"W2\"]))\nprint(\"b2 =\\n\" + str(parameters[\"b2\"]))", "Expected Output:\nW1 =\n[[ 1.63535156 -0.62320365 -0.53718766]\n [-1.07799357 0.85639907 -2.29470142]]\nb1 =\n[[ 1.74604067]\n [-0.75184921]]\nW2 =\n[[ 0.32171798 -0.25467393 1.46902454]\n [-2.05617317 -0.31554548 -0.3756023 ]\n [ 1.1404819 -1.09976462 -0.1612551 ]]\nb2 =\n[[-0.88020257]\n [ 0.02561572]\n [ 0.57539477]]\nA variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent. \n\n(Batch) Gradient Descent:\n\n``` python\nX = data_input\nY = labels\nparameters = initialize_parameters(layers_dims)\nfor i in range(0, num_iterations):\n # Forward propagation\n a, caches = forward_propagation(X, parameters)\n # Compute cost.\n cost += compute_cost(a, Y)\n # Backward propagation.\n grads = backward_propagation(a, caches, parameters)\n # Update parameters.\n parameters = update_parameters(parameters, grads)\n```\n\nStochastic Gradient Descent:\n\npython\nX = data_input\nY = labels\nparameters = initialize_parameters(layers_dims)\nfor i in range(0, num_iterations):\n for j in range(0, m):\n # Forward propagation\n a, caches = forward_propagation(X[:,j], parameters)\n # Compute cost\n cost += compute_cost(a, Y[:,j])\n # Backward propagation\n grads = backward_propagation(a, caches, parameters)\n # Update parameters.\n parameters = update_parameters(parameters, grads)\nIn Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will \"oscillate\" toward the minimum rather than converge smoothly. Here is an illustration of this: \n<img src=\"images/kiank_sgd.png\" style=\"width:750px;height:250px;\">\n<caption><center> <u> <font color='purple'> Figure 1 </u><font color='purple'> : SGD vs GD<br> \"+\" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>\nNote also that implementing SGD requires 3 for-loops in total:\n1. Over the number of iterations\n2. Over the $m$ training examples\n3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)\nIn practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.\n<img src=\"images/kiank_minibatch.png\" style=\"width:750px;height:250px;\">\n<caption><center> <u> <font color='purple'> Figure 2 </u>: <font color='purple'> SGD vs Mini-Batch GD<br> \"+\" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>\n<font color='blue'>\nWhat you should remember:\n- The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step.\n- You have to tune a learning rate hyperparameter $\\alpha$.\n- With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large).\n2 - Mini-Batch Gradient descent\nLet's learn how to build mini-batches from the training set (X, Y).\nThere are two steps:\n- Shuffle: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches. \n<img src=\"images/kiank_shuffle.png\" style=\"width:550px;height:300px;\">\n\nPartition: Partition the shuffled (X, Y) into mini-batches of size mini_batch_size (here 64). Note that the number of training examples is not always divisible by mini_batch_size. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full mini_batch_size, it will look like this: \n\n<img src=\"images/kiank_partition.png\" style=\"width:550px;height:300px;\">\nExercise: Implement random_mini_batches. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:\npython\nfirst_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]\nsecond_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]\n...\nNote that the last mini-batch might end up smaller than mini_batch_size=64. Let $\\lfloor s \\rfloor$ represents $s$ rounded down to the nearest integer (this is math.floor(s) in Python). If the total number of examples is not a multiple of mini_batch_size=64 then there will be $\\lfloor \\frac{m}{mini_batch_size}\\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini__batch__size \\times \\lfloor \\frac{m}{mini_batch_size}\\rfloor$).", "# GRADED FUNCTION: random_mini_batches\n\ndef random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):\n \"\"\"\n Creates a list of random minibatches from (X, Y)\n \n Arguments:\n X -- input data, of shape (input size, number of examples)\n Y -- true \"label\" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)\n mini_batch_size -- size of the mini-batches, integer\n \n Returns:\n mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)\n \"\"\"\n \n np.random.seed(seed) # To make your \"random\" minibatches the same as ours\n m = X.shape[1] # number of training examples\n mini_batches = []\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((1,m))\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n ### START CODE HERE ### (approx. 2 lines)\n mini_batch_X = shuffled_X[:, mini_batch_size * k : mini_batch_size * (k + 1)]\n mini_batch_Y = shuffled_Y[:, mini_batch_size * k : mini_batch_size * (k + 1)]\n ### END CODE HERE ###\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n ### START CODE HERE ### (approx. 2 lines)\n mini_batch_X = shuffled_X[:, mini_batch_size * k : m]\n mini_batch_Y = shuffled_Y[:, mini_batch_size * k : m]\n ### END CODE HERE ###\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches\n\nX_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()\nmini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)\n\nprint (\"shape of the 1st mini_batch_X: \" + str(mini_batches[0][0].shape))\nprint (\"shape of the 2nd mini_batch_X: \" + str(mini_batches[1][0].shape))\nprint (\"shape of the 3rd mini_batch_X: \" + str(mini_batches[2][0].shape))\nprint (\"shape of the 1st mini_batch_Y: \" + str(mini_batches[0][1].shape))\nprint (\"shape of the 2nd mini_batch_Y: \" + str(mini_batches[1][1].shape)) \nprint (\"shape of the 3rd mini_batch_Y: \" + str(mini_batches[2][1].shape))\nprint (\"mini batch sanity check: \" + str(mini_batches[0][0][0][0:3]))", "Expected Output:\n<table style=\"width:50%\"> \n <tr>\n <td > **shape of the 1st mini_batch_X** </td> \n <td > (12288, 64) </td> \n </tr> \n\n <tr>\n <td > **shape of the 2nd mini_batch_X** </td> \n <td > (12288, 64) </td> \n </tr> \n\n <tr>\n <td > **shape of the 3rd mini_batch_X** </td> \n <td > (12288, 20) </td> \n </tr>\n <tr>\n <td > **shape of the 1st mini_batch_Y** </td> \n <td > (1, 64) </td> \n </tr> \n <tr>\n <td > **shape of the 2nd mini_batch_Y** </td> \n <td > (1, 64) </td> \n </tr> \n <tr>\n <td > **shape of the 3rd mini_batch_Y** </td> \n <td > (1, 20) </td> \n </tr> \n <tr>\n <td > **mini batch sanity check** </td> \n <td > [ 0.90085595 -0.7612069 0.2344157 ] </td> \n </tr>\n\n</table>\n\n<font color='blue'>\nWhat you should remember:\n- Shuffling and Partitioning are the two steps required to build mini-batches\n- Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.\n3 - Momentum\nBecause mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will \"oscillate\" toward convergence. Using momentum can reduce these oscillations. \nMomentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the \"velocity\" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill. \n<img src=\"images/opt_momentum.png\" style=\"width:400px;height:250px;\">\n<caption><center> <u><font color='purple'>Figure 3</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>\nExercise: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the grads dictionary, that is:\nfor $l =1,...,L$:\npython\nv[\"dW\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"W\" + str(l+1)])\nv[\"db\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"b\" + str(l+1)])\nNote that the iterator l starts at 0 in the for loop while the first parameters are v[\"dW1\"] and v[\"db1\"] (that's a \"one\" on the superscript). This is why we are shifting l to l+1 in the for loop.", "# GRADED FUNCTION: initialize_velocity\n\ndef initialize_velocity(parameters):\n \"\"\"\n Initializes the velocity as a python dictionary with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\" \n - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.\n Arguments:\n parameters -- python dictionary containing your parameters.\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n \n Returns:\n v -- python dictionary containing the current velocity.\n v['dW' + str(l)] = velocity of dWl\n v['db' + str(l)] = velocity of dbl\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural networks\n v = {}\n \n # Initialize velocity\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n v[\"dW\" + str(l+1)] = np.zeros(parameters[\"W\" + str(l+1)].shape)\n v[\"db\" + str(l+1)] = np.zeros(parameters[\"b\" + str(l+1)].shape)\n ### END CODE HERE ###\n \n return v\n\nparameters = initialize_velocity_test_case()\n\nv = initialize_velocity(parameters)\nprint(\"v[\\\"dW1\\\"] =\\n\" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] =\\n\" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] =\\n\" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] =\\n\" + str(v[\"db2\"]))", "Expected Output:\nv[\"dW1\"] =\n[[ 0. 0. 0.]\n [ 0. 0. 0.]]\nv[\"db1\"] =\n[[ 0.]\n [ 0.]]\nv[\"dW2\"] =\n[[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]]\nv[\"db2\"] =\n[[ 0.]\n [ 0.]\n [ 0.]]\nExercise: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$: \n$$ \\begin{cases}\nv_{dW^{[l]}} = \\beta v_{dW^{[l]}} + (1 - \\beta) dW^{[l]} \\\nW^{[l]} = W^{[l]} - \\alpha v_{dW^{[l]}}\n\\end{cases}\\tag{3}$$\n$$\\begin{cases}\nv_{db^{[l]}} = \\beta v_{db^{[l]}} + (1 - \\beta) db^{[l]} \\\nb^{[l]} = b^{[l]} - \\alpha v_{db^{[l]}} \n\\end{cases}\\tag{4}$$\nwhere L is the number of layers, $\\beta$ is the momentum and $\\alpha$ is the learning rate. All parameters should be stored in the parameters dictionary. Note that the iterator l starts at 0 in the for loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a \"one\" on the superscript). So you will need to shift l to l+1 when coding.", "# GRADED FUNCTION: update_parameters_with_momentum\n\ndef update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):\n \"\"\"\n Update parameters using Momentum\n \n Arguments:\n parameters -- python dictionary containing your parameters:\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n grads -- python dictionary containing your gradients for each parameters:\n grads['dW' + str(l)] = dWl\n grads['db' + str(l)] = dbl\n v -- python dictionary containing the current velocity:\n v['dW' + str(l)] = ...\n v['db' + str(l)] = ...\n beta -- the momentum hyperparameter, scalar\n learning_rate -- the learning rate, scalar\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n v -- python dictionary containing your updated velocities\n \"\"\"\n\n L = len(parameters) // 2 # number of layers in the neural networks\n \n # Momentum update for each parameter\n for l in range(L):\n \n ### START CODE HERE ### (approx. 4 lines)\n # compute velocities\n v[\"dW\" + str(l+1)] = beta * v[\"dW\" + str(l+1)] + (1 - beta)* grads[\"dW\" + str(l+1)] \n v[\"db\" + str(l+1)] = beta * v[\"db\" + str(l+1)] + (1 - beta)* grads[\"db\" + str(l+1)]\n # update parameters\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * v[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * v[\"db\" + str(l+1)]\n ### END CODE HERE ###\n \n return parameters, v\n\nparameters, grads, v = update_parameters_with_momentum_test_case()\n\nparameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)\nprint(\"W1 = \\n\" + str(parameters[\"W1\"]))\nprint(\"b1 = \\n\" + str(parameters[\"b1\"]))\nprint(\"W2 = \\n\" + str(parameters[\"W2\"]))\nprint(\"b2 = \\n\" + str(parameters[\"b2\"]))\nprint(\"v[\\\"dW1\\\"] = \\n\" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \\n\" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \\n\" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = v\" + str(v[\"db2\"]))", "Expected Output:\nW1 = \n[[ 1.62544598 -0.61290114 -0.52907334]\n [-1.07347112 0.86450677 -2.30085497]]\nb1 = \n[[ 1.74493465]\n [-0.76027113]]\nW2 = \n[[ 0.31930698 -0.24990073 1.4627996 ]\n [-2.05974396 -0.32173003 -0.38320915]\n [ 1.13444069 -1.0998786 -0.1713109 ]]\nb2 = \n[[-0.87809283]\n [ 0.04055394]\n [ 0.58207317]]\nv[\"dW1\"] = \n[[-0.11006192 0.11447237 0.09015907]\n [ 0.05024943 0.09008559 -0.06837279]]\nv[\"db1\"] = \n[[-0.01228902]\n [-0.09357694]]\nv[\"dW2\"] = \n[[-0.02678881 0.05303555 -0.06916608]\n [-0.03967535 -0.06871727 -0.08452056]\n [-0.06712461 -0.00126646 -0.11173103]]\nv[\"db2\"] = v[[ 0.02344157]\n [ 0.16598022]\n [ 0.07420442]]\nNote that:\n- The velocity is initialized with zeros. So the algorithm will take a few iterations to \"build up\" velocity and start to take bigger steps.\n- If $\\beta = 0$, then this just becomes standard gradient descent without momentum. \nHow do you choose $\\beta$?\n\nThe larger the momentum $\\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\\beta$ is too big, it could also smooth out the updates too much. \nCommon values for $\\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\\beta = 0.9$ is often a reasonable default. \nTuning the optimal $\\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$. \n\n<font color='blue'>\nWhat you should remember:\n- Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.\n- You have to tune a momentum hyperparameter $\\beta$ and a learning rate $\\alpha$.\n4 - Adam\nAdam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum. \nHow does Adam work?\n1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction). \n2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction). \n3. It updates parameters in a direction based on combining information from \"1\" and \"2\".\nThe update rule is, for $l = 1, ..., L$: \n$$\\begin{cases}\nv_{dW^{[l]}} = \\beta_1 v_{dW^{[l]}} + (1 - \\beta_1) \\frac{\\partial \\mathcal{J} }{ \\partial W^{[l]} } \\\nv^{corrected}{dW^{[l]}} = \\frac{v{dW^{[l]}}}{1 - (\\beta_1)^t} \\\ns_{dW^{[l]}} = \\beta_2 s_{dW^{[l]}} + (1 - \\beta_2) (\\frac{\\partial \\mathcal{J} }{\\partial W^{[l]} })^2 \\\ns^{corrected}{dW^{[l]}} = \\frac{s{dW^{[l]}}}{1 - (\\beta_2)^t} \\\nW^{[l]} = W^{[l]} - \\alpha \\frac{v^{corrected}{dW^{[l]}}}{\\sqrt{s^{corrected}{dW^{[l]}}} + \\varepsilon}\n\\end{cases}$$\nwhere:\n- t counts the number of steps taken of Adam \n- L is the number of layers\n- $\\beta_1$ and $\\beta_2$ are hyperparameters that control the two exponentially weighted averages. \n- $\\alpha$ is the learning rate\n- $\\varepsilon$ is a very small number to avoid dividing by zero\nAs usual, we will store all parameters in the parameters dictionary \nExercise: Initialize the Adam variables $v, s$ which keep track of the past information.\nInstruction: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for grads, that is:\nfor $l = 1, ..., L$:\n```python\nv[\"dW\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"W\" + str(l+1)])\nv[\"db\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"b\" + str(l+1)])\ns[\"dW\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"W\" + str(l+1)])\ns[\"db\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"b\" + str(l+1)])\n```", "# GRADED FUNCTION: initialize_adam\n\ndef initialize_adam(parameters) :\n \"\"\"\n Initializes v and s as two python dictionaries with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\" \n - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.\n \n Arguments:\n parameters -- python dictionary containing your parameters.\n parameters[\"W\" + str(l)] = Wl\n parameters[\"b\" + str(l)] = bl\n \n Returns: \n v -- python dictionary that will contain the exponentially weighted average of the gradient.\n v[\"dW\" + str(l)] = ...\n v[\"db\" + str(l)] = ...\n s -- python dictionary that will contain the exponentially weighted average of the squared gradient.\n s[\"dW\" + str(l)] = ...\n s[\"db\" + str(l)] = ...\n\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural networks\n v = {}\n s = {}\n \n # Initialize v, s. Input: \"parameters\". Outputs: \"v, s\".\n for l in range(L):\n ### START CODE HERE ### (approx. 4 lines)\n v[\"dW\" + str(l+1)] = np.zeros(parameters[\"W\" + str(l+1)].shape)\n v[\"db\" + str(l+1)] = np.zeros(parameters[\"b\" + str(l+1)].shape)\n s[\"dW\" + str(l+1)] = np.zeros(parameters[\"W\" + str(l+1)].shape)\n s[\"db\" + str(l+1)] = np.zeros(parameters[\"b\" + str(l+1)].shape)\n ### END CODE HERE ###\n \n return v, s\n\nparameters = initialize_adam_test_case()\n\nv, s = initialize_adam(parameters)\nprint(\"v[\\\"dW1\\\"] = \\n\" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \\n\" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \\n\" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = \\n\" + str(v[\"db2\"]))\nprint(\"s[\\\"dW1\\\"] = \\n\" + str(s[\"dW1\"]))\nprint(\"s[\\\"db1\\\"] = \\n\" + str(s[\"db1\"]))\nprint(\"s[\\\"dW2\\\"] = \\n\" + str(s[\"dW2\"]))\nprint(\"s[\\\"db2\\\"] = \\n\" + str(s[\"db2\"]))", "Expected Output:\nv[\"dW1\"] = \n[[ 0. 0. 0.]\n [ 0. 0. 0.]]\nv[\"db1\"] = \n[[ 0.]\n [ 0.]]\nv[\"dW2\"] = \n[[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]]\nv[\"db2\"] = \n[[ 0.]\n [ 0.]\n [ 0.]]\ns[\"dW1\"] = \n[[ 0. 0. 0.]\n [ 0. 0. 0.]]\ns[\"db1\"] = \n[[ 0.]\n [ 0.]]\ns[\"dW2\"] = \n[[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]]\ns[\"db2\"] = \n[[ 0.]\n [ 0.]\n [ 0.]]\nExercise: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$: \n$$\\begin{cases}\nv_{W^{[l]}} = \\beta_1 v_{W^{[l]}} + (1 - \\beta_1) \\frac{\\partial J }{ \\partial W^{[l]} } \\\nv^{corrected}{W^{[l]}} = \\frac{v{W^{[l]}}}{1 - (\\beta_1)^t} \\\ns_{W^{[l]}} = \\beta_2 s_{W^{[l]}} + (1 - \\beta_2) (\\frac{\\partial J }{\\partial W^{[l]} })^2 \\\ns^{corrected}{W^{[l]}} = \\frac{s{W^{[l]}}}{1 - (\\beta_2)^t} \\\nW^{[l]} = W^{[l]} - \\alpha \\frac{v^{corrected}{W^{[l]}}}{\\sqrt{s^{corrected}{W^{[l]}}}+\\varepsilon}\n\\end{cases}$$\nNote that the iterator l starts at 0 in the for loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift l to l+1 when coding.", "# GRADED FUNCTION: update_parameters_with_adam\n\ndef update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,\n beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):\n \"\"\"\n Update parameters using Adam\n \n Arguments:\n parameters -- python dictionary containing your parameters:\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n grads -- python dictionary containing your gradients for each parameters:\n grads['dW' + str(l)] = dWl\n grads['db' + str(l)] = dbl\n v -- Adam variable, moving average of the first gradient, python dictionary\n s -- Adam variable, moving average of the squared gradient, python dictionary\n learning_rate -- the learning rate, scalar.\n beta1 -- Exponential decay hyperparameter for the first moment estimates \n beta2 -- Exponential decay hyperparameter for the second moment estimates \n epsilon -- hyperparameter preventing division by zero in Adam updates\n\n Returns:\n parameters -- python dictionary containing your updated parameters \n v -- Adam variable, moving average of the first gradient, python dictionary\n s -- Adam variable, moving average of the squared gradient, python dictionary\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural networks\n v_corrected = {} # Initializing first moment estimate, python dictionary\n s_corrected = {} # Initializing second moment estimate, python dictionary\n \n # Perform Adam update on all parameters\n for l in range(L):\n # Moving average of the gradients. Inputs: \"v, grads, beta1\". Output: \"v\".\n ### START CODE HERE ### (approx. 2 lines)\n v[\"dW\" + str(l+1)] = beta1 * v[\"dW\" + str(l+1)] + (1 - beta1)* grads[\"dW\" + str(l+1)] \n v[\"db\" + str(l+1)] = beta1 * v[\"db\" + str(l+1)] + (1 - beta1)* grads[\"db\" + str(l+1)]\n ### END CODE HERE ###\n\n # Compute bias-corrected first moment estimate. Inputs: \"v, beta1, t\". Output: \"v_corrected\".\n ### START CODE HERE ### (approx. 2 lines)\n v_corrected[\"dW\" + str(l+1)] = v[\"dW\" + str(l+1)] / (1 - beta1 ** t)\n v_corrected[\"db\" + str(l+1)] = v[\"db\" + str(l+1)] / (1 - beta1 ** t)\n ### END CODE HERE ###\n\n # Moving average of the squared gradients. Inputs: \"s, grads, beta2\". Output: \"s\".\n ### START CODE HERE ### (approx. 2 lines)\n s[\"dW\" + str(l+1)] = beta2 * s[\"dW\" + str(l+1)] + (1 - beta2) * (grads[\"dW\" + str(l+1)]**2)\n s[\"db\" + str(l+1)] = beta2 * s[\"db\" + str(l+1)] + (1 - beta2) * (grads[\"db\" + str(l+1)]**2)\n ### END CODE HERE ###\n\n # Compute bias-corrected second raw moment estimate. Inputs: \"s, beta2, t\". Output: \"s_corrected\".\n ### START CODE HERE ### (approx. 2 lines)\n s_corrected[\"dW\" + str(l+1)] = s[\"dW\" + str(l+1)] / (1 - beta2 ** t)\n s_corrected[\"db\" + str(l+1)] = s[\"db\" + str(l+1)] / (1 - beta2 ** t)\n ### END CODE HERE ###\n\n # Update parameters. Inputs: \"parameters, learning_rate, v_corrected, s_corrected, epsilon\". Output: \"parameters\".\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * (v_corrected[\"dW\" + str(l+1)] / (np.sqrt(s_corrected[\"dW\" + str(l+1)]) + epsilon))\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * (v_corrected[\"db\" + str(l+1)] / (np.sqrt(s_corrected[\"db\" + str(l+1)]) + epsilon))\n ### END CODE HERE ###\n\n return parameters, v, s\n\nparameters, grads, v, s = update_parameters_with_adam_test_case()\nparameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)\n\nprint(\"W1 = \\n\" + str(parameters[\"W1\"]))\nprint(\"b1 = \\n\" + str(parameters[\"b1\"]))\nprint(\"W2 = \\n\" + str(parameters[\"W2\"]))\nprint(\"b2 = \\n\" + str(parameters[\"b2\"]))\nprint(\"v[\\\"dW1\\\"] = \\n\" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \\n\" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \\n\" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = \\n\" + str(v[\"db2\"]))\nprint(\"s[\\\"dW1\\\"] = \\n\" + str(s[\"dW1\"]))\nprint(\"s[\\\"db1\\\"] = \\n\" + str(s[\"db1\"]))\nprint(\"s[\\\"dW2\\\"] = \\n\" + str(s[\"dW2\"]))\nprint(\"s[\\\"db2\\\"] = \\n\" + str(s[\"db2\"]))", "Expected Output:\nW1 = \n[[ 1.63178673 -0.61919778 -0.53561312]\n [-1.08040999 0.85796626 -2.29409733]]\nb1 = \n[[ 1.75225313]\n [-0.75376553]]\nW2 = \n[[ 0.32648046 -0.25681174 1.46954931]\n [-2.05269934 -0.31497584 -0.37661299]\n [ 1.14121081 -1.09245036 -0.16498684]]\nb2 = \n[[-0.88529978]\n [ 0.03477238]\n [ 0.57537385]]\nv[\"dW1\"] = \n[[-0.11006192 0.11447237 0.09015907]\n [ 0.05024943 0.09008559 -0.06837279]]\nv[\"db1\"] = \n[[-0.01228902]\n [-0.09357694]]\nv[\"dW2\"] = \n[[-0.02678881 0.05303555 -0.06916608]\n [-0.03967535 -0.06871727 -0.08452056]\n [-0.06712461 -0.00126646 -0.11173103]]\nv[\"db2\"] = \n[[ 0.02344157]\n [ 0.16598022]\n [ 0.07420442]]\ns[\"dW1\"] = \n[[ 0.00121136 0.00131039 0.00081287]\n [ 0.0002525 0.00081154 0.00046748]]\ns[\"db1\"] = \n[[ 1.51020075e-05]\n [ 8.75664434e-04]]\ns[\"dW2\"] = \n[[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]\n [ 1.57413361e-04 4.72206320e-04 7.14372576e-04]\n [ 4.50571368e-04 1.60392066e-07 1.24838242e-03]]\ns[\"db2\"] = \n[[ 5.49507194e-05]\n [ 2.75494327e-03]\n [ 5.50629536e-04]]\nYou now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference.\n5 - Model with different optimization algorithms\nLets use the following \"moons\" dataset to test the different optimization methods. (The dataset is named \"moons\" because the data from each of the two classes looks a bit like a crescent-shaped moon.)", "train_X, train_Y = load_dataset()", "We have already implemented a 3-layer neural network. You will train it with: \n- Mini-batch Gradient Descent: it will call your function:\n - update_parameters_with_gd()\n- Mini-batch Momentum: it will call your functions:\n - initialize_velocity() and update_parameters_with_momentum()\n- Mini-batch Adam: it will call your functions:\n - initialize_adam() and update_parameters_with_adam()", "def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,\n beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):\n \"\"\"\n 3-layer neural network model which can be run in different optimizer modes.\n \n Arguments:\n X -- input data, of shape (2, number of examples)\n Y -- true \"label\" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)\n layers_dims -- python list, containing the size of each layer\n learning_rate -- the learning rate, scalar.\n mini_batch_size -- the size of a mini batch\n beta -- Momentum hyperparameter\n beta1 -- Exponential decay hyperparameter for the past gradients estimates \n beta2 -- Exponential decay hyperparameter for the past squared gradients estimates \n epsilon -- hyperparameter preventing division by zero in Adam updates\n num_epochs -- number of epochs\n print_cost -- True to print the cost every 1000 epochs\n\n Returns:\n parameters -- python dictionary containing your updated parameters \n \"\"\"\n\n L = len(layers_dims) # number of layers in the neural networks\n costs = [] # to keep track of the cost\n t = 0 # initializing the counter required for Adam update\n seed = 10 # For grading purposes, so that your \"random\" minibatches are the same as ours\n m = X.shape[1] # number of training examples\n \n # Initialize parameters\n parameters = initialize_parameters(layers_dims)\n\n # Initialize the optimizer\n if optimizer == \"gd\":\n pass # no initialization required for gradient descent\n elif optimizer == \"momentum\":\n v = initialize_velocity(parameters)\n elif optimizer == \"adam\":\n v, s = initialize_adam(parameters)\n \n # Optimization loop\n for i in range(num_epochs):\n \n # Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch\n seed = seed + 1\n minibatches = random_mini_batches(X, Y, mini_batch_size, seed)\n cost_total = 0\n \n for minibatch in minibatches:\n\n # Select a minibatch\n (minibatch_X, minibatch_Y) = minibatch\n\n # Forward propagation\n a3, caches = forward_propagation(minibatch_X, parameters)\n\n # Compute cost and add to the cost total\n cost_total += compute_cost(a3, minibatch_Y)\n\n # Backward propagation\n grads = backward_propagation(minibatch_X, minibatch_Y, caches)\n\n # Update parameters\n if optimizer == \"gd\":\n parameters = update_parameters_with_gd(parameters, grads, learning_rate)\n elif optimizer == \"momentum\":\n parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)\n elif optimizer == \"adam\":\n t = t + 1 # Adam counter\n parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,\n t, learning_rate, beta1, beta2, epsilon)\n cost_avg = cost_total / m\n \n # Print the cost every 1000 epoch\n if print_cost and i % 1000 == 0:\n print (\"Cost after epoch %i: %f\" %(i, cost_avg))\n if print_cost and i % 100 == 0:\n costs.append(cost_avg)\n \n # plot the cost\n plt.plot(costs)\n plt.ylabel('cost')\n plt.xlabel('epochs (per 100)')\n plt.title(\"Learning rate = \" + str(learning_rate))\n plt.show()\n\n return parameters", "You will now run this 3 layer neural network with each of the 3 optimization methods.\n5.1 - Mini-batch Gradient descent\nRun the following code to see how the model does with mini-batch gradient descent.", "# train 3-layer model\nlayers_dims = [train_X.shape[0], 5, 2, 1]\nparameters = model(train_X, train_Y, layers_dims, optimizer = \"gd\")\n\n# Predict\npredictions = predict(train_X, train_Y, parameters)\n\n# Plot decision boundary\nplt.title(\"Model with Gradient Descent optimization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,2.5])\naxes.set_ylim([-1,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)", "5.2 - Mini-batch gradient descent with momentum\nRun the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.", "# train 3-layer model\nlayers_dims = [train_X.shape[0], 5, 2, 1]\nparameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = \"momentum\")\n\n# Predict\npredictions = predict(train_X, train_Y, parameters)\n\n# Plot decision boundary\nplt.title(\"Model with Momentum optimization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,2.5])\naxes.set_ylim([-1,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)", "5.3 - Mini-batch with Adam mode\nRun the following code to see how the model does with Adam.", "# train 3-layer model\nlayers_dims = [train_X.shape[0], 5, 2, 1]\nparameters = model(train_X, train_Y, layers_dims, optimizer = \"adam\")\n\n# Predict\npredictions = predict(train_X, train_Y, parameters)\n\n# Plot decision boundary\nplt.title(\"Model with Adam optimization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,2.5])\naxes.set_ylim([-1,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)", "5.4 - Summary\n<table> \n <tr>\n <td>\n **optimization method**\n </td>\n <td>\n **accuracy**\n </td>\n <td>\n **cost shape**\n </td>\n\n </tr>\n <td>\n Gradient descent\n </td>\n <td>\n 79.7%\n </td>\n <td>\n oscillations\n </td>\n <tr>\n <td>\n Momentum\n </td>\n <td>\n 79.7%\n </td>\n <td>\n oscillations\n </td>\n </tr>\n <tr>\n <td>\n Adam\n </td>\n <td>\n 94%\n </td>\n <td>\n smoother\n </td>\n </tr>\n</table>\n\nMomentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligeable. Also, the huge oscillations you see in the cost come from the fact that some minibatches are more difficult thans others for the optimization algorithm.\nAdam on the other hand, clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster.\nSome advantages of Adam include:\n- Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum) \n- Usually works well even with little tuning of hyperparameters (except $\\alpha$)\nReferences:\n\nAdam paper: https://arxiv.org/pdf/1412.6980.pdf" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
GoogleCloudPlatform/asl-ml-immersion
notebooks/kubeflow_pipelines/pipelines/labs/kfp_pipeline_vertex_automl_online_predictions.ipynb
apache-2.0
[ "Continuous Training with AutoML Vertex Pipelines\nLearning Objectives:\n1. Learn how to use Vertex AutoML pre-built components\n1. Learn how to build a Vertex AutoML pipeline with these components using BigQuery as a data source\n1. Learn how to compile, upload, and run the Vertex AutoML pipeline\nIn this lab, you will build, deploy, and run a Vertex AutoML pipeline that orchestrates the Vertex AutoML AI services to train, tune, and deploy a model. \nSetup", "from google.cloud import aiplatform\n\nREGION = \"us-central1\"\nPROJECT = !(gcloud config get-value project)\nPROJECT = PROJECT[0]\n\n# Set `PATH` to include the directory containing KFP CLI\nPATH = %env PATH\n%env PATH=/home/jupyter/.local/bin:{PATH}", "Understanding the pipeline design\nThe workflow implemented by the pipeline is defined using a Python based Domain Specific Language (DSL). The pipeline's DSL is in the pipeline_vertex/pipeline_vertex_automl.py file that we will generate below.\nThe pipeline's DSL has been designed to avoid hardcoding any environment specific settings like file paths or connection strings. These settings are provided to the pipeline code through a set of environment variables.\nBuilding and deploying the pipeline\nExercise\nComplete the pipeline below:", "%%writefile ./pipeline_vertex/pipeline_vertex_automl.py\n# Copyright 2021 Google LLC\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n\n# https://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\"\n# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\"\"\"Kubeflow Covertype Pipeline.\"\"\"\n\nimport os\n\nfrom google_cloud_pipeline_components.aiplatform import (\n AutoMLTabularTrainingJobRunOp,\n EndpointCreateOp,\n ModelDeployOp,\n TabularDatasetCreateOp,\n)\nfrom kfp.v2 import dsl\n\nPIPELINE_ROOT = os.getenv(\"PIPELINE_ROOT\")\nPROJECT = os.getenv(\"PROJECT\")\nDATASET_SOURCE = os.getenv(\"DATASET_SOURCE\")\nPIPELINE_NAME = os.getenv(\"PIPELINE_NAME\", \"covertype\")\nDISPLAY_NAME = os.getenv(\"MODEL_DISPLAY_NAME\", PIPELINE_NAME)\nTARGET_COLUMN = os.getenv(\"TARGET_COLUMN\", \"Cover_Type\")\nSERVING_MACHINE_TYPE = os.getenv(\"SERVING_MACHINE_TYPE\", \"n1-standard-16\")\n\n\n@dsl.pipeline(\n name=f\"{PIPELINE_NAME}-vertex-automl-pipeline\",\n description=f\"AutoML Vertex Pipeline for {PIPELINE_NAME}\",\n pipeline_root=PIPELINE_ROOT,\n)\ndef create_pipeline():\n\n dataset_create_task = TabularDatasetCreateOp(\n # TODO\n )\n\n automl_training_task = AutoMLTabularTrainingJobRunOp(\n # TODO\n )\n\n endpoint_create_task = EndpointCreateOp(\n # TODO\n )\n\n model_deploy_task = ModelDeployOp( # pylint: disable=unused-variable\n # TODO\n )\n", "Compile the pipeline\nLet's start by defining the environment variables that will be passed to the pipeline compiler:", "ARTIFACT_STORE = f\"gs://{PROJECT}-kfp-artifact-store\"\nPIPELINE_ROOT = f\"{ARTIFACT_STORE}/pipeline\"\nDATASET_SOURCE = f\"bq://{PROJECT}.covertype_dataset.covertype\"\n\n%env PIPELINE_ROOT={PIPELINE_ROOT}\n%env PROJECT={PROJECT}\n%env REGION={REGION}\n%env DATASET_SOURCE={DATASET_SOURCE}", "Let us make sure that the ARTIFACT_STORE has been created, and let us create it if not:", "!gsutil ls | grep ^{ARTIFACT_STORE}/$ || gsutil mb -l {REGION} {ARTIFACT_STORE}", "Use the CLI compiler to compile the pipeline\nWe compile the pipeline from the Python file we generated into a JSON description using the following command:", "PIPELINE_JSON = \"covertype_automl_vertex_pipeline.json\"", "Exercise\nCompile the pipeline with the dsl-compile-v2 command line:", "# TODO", "Note: You can also use the Python SDK to compile the pipeline:\n```python\nfrom kfp.v2 import compiler\ncompiler.Compiler().compile(\n pipeline_func=create_pipeline, \n package_path=PIPELINE_JSON,\n)\n```\nThe result is the pipeline file.", "!head {PIPELINE_JSON}", "Deploy the pipeline package\nExercise\nUpload and run the pipeline to Vertex AI using aiplatform.PipelineJob:", "# TODO", "Copyright 2021 Google LLC\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttps://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
dwhswenson/openpathsampling
examples/tests/langevin_integrator_check.ipynb
mit
[ "Langevin Integrator Check\nThe toy_dynamics subpackage provides an integrator called LangevinBAOABIntegrator, which is based on a paper by Leimkuhler and Matthews. This notebook uses the toy_dynamics package to check that the integrator gives the correct position and velocity distribution for a harmonic oscillator.\nNote that this particular test does not make use of the trajectory storage tools. It is mainly to show how to use the toy_dynamics subpackage, and has little connection to the main package. The trajectory generated here is extremely long, so in this case we choose not to store it. For an example using toy_dynamics with the storage tools, see ???(to be added later)???.\nImports", "import openpathsampling.engines.toy as toys\nimport openpathsampling as paths\nimport numpy as np", "Set up the simulation\nThis the potential energy surface is $V(x,y) = \\frac{A[0]}{2}m[0] \\omega[0]^2 (x-x_0[0])^2 + \\frac{A[1]}{2}m[1] \\omega[1]^2 (y-x_0[1])^2$", "my_pes = toys.HarmonicOscillator(A=[1.0, 1.0], omega=[2.0, 1.0], x0=[0.0, 0.0])\n\ntopology=toys.Topology(n_spatial=2, masses=[1.0,2.0], pes=my_pes)\n\n\nmy_integ = toys.LangevinBAOABIntegrator(dt=0.02, temperature=0.5, gamma=1.0)\n\nsim = toys.Engine(options={'integ' : my_integ, 'n_steps_per_frame' : 10}, topology=topology)\n\ntemplate = toys.Snapshot(coordinates=np.array([[0.0, 0.0]]), \n velocities=np.array([[0.1, 0.0]]), \n engine=sim)\n\n\nnframes = 250000", "Set the initial conditions for the system, and initialize the sample storage.", "sim.current_snapshot = template\n\nx1 = []\nx2 = []\nv1 = []\nv2 = []", "Run the simulation\nThis might take a while...", "for i in range(nframes):\n # generate the next frame (which is sim.n_steps_per_frame timesteps)\n snap = sim.generate_next_frame()\n # sample the information desired to check distributions\n pos = snap.coordinates[0]\n vel = snap.velocities[0]\n x1.append(pos[0])\n x2.append(pos[1])\n v1.append(vel[0])\n v2.append(vel[1])", "Run analysis calculation\nBuild the 1D histograms we'll use:", "nbins = 50\nrrange = (-2.5, 2.5)\nrrangex1 = ((min(x1)), (max(x1)))\nrrangev1 = ((min(v1)), (max(v1)))\nrrangex2 = (min(x2), max(x2))\nrrangev2 = (min(v2), max(v2))\ndens = True\n(x1hist, binsx1) = np.histogram(x1, bins=nbins, range=rrange, density=dens)\n(x2hist, binsx2) = np.histogram(x2, bins=nbins, range=rrange, density=dens)\n(v1hist, binsv1) = np.histogram(v1, bins=nbins, range=rrange, density=dens)\n(v2hist, binsv2) = np.histogram(v2, bins=nbins, range=rrange, density=dens)", "Build the 2D histograms:", "(hist1, xb1, yb1) = np.histogram2d(x1, v1, [nbins/2, nbins/2], [rrangex1, rrangev1])\n(hist2, xb2, yb2) = np.histogram2d(x2, v2, [nbins/2, nbins/2], [rrangex2, rrangev2])", "Run the analysis of the kinetic energy:", "instantaneous_ke = []\ncumulative_ke_1 = []\ncumulative_ke_2 = []\ntot_ke_1 = 0.0\ntot_ke_2 = 0.0\nfor v in zip(v1, v2):\n local_ke_1 = 0.5*sim.mass[0]*v[0]*v[0]\n local_ke_2 = 0.5*sim.mass[1]*v[1]*v[1]\n instantaneous_ke.append(local_ke_1+local_ke_2)\n tot_ke_1 += local_ke_1\n tot_ke_2 += local_ke_2\n cumulative_ke_1.append(tot_ke_1 / (len(cumulative_ke_1)+1))\n cumulative_ke_2.append(tot_ke_2 / (len(cumulative_ke_2)+1))", "Plot our results\nImports for the plots we'll use, as well as some parameter adjustment.", "%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as pylab\nfrom matplotlib.legend_handler import HandlerLine2D\nimport numpy as np\n\npylab.rcParams['figure.figsize'] = 12, 4\nmatplotlib.rcParams.update({'font.size' : 18})", "Now we plot the distributions of the positions and velocities. These should match the exact Gaussians they're paired with.", "# Boltzmann info as a in exp(-ax^2)\nboltzmann_vel1 = 0.5*sim.integ.beta*sim.mass[0]\nboltzmann_pos1 = 0.5*sim.integ.beta*sim.mass[0]*sim.pes.omega[0]**2\nplotbinsx1 = [0.5*(binsx1[i]+binsx1[i+1]) for i in range(len(binsx1)-1)]\nplotbinsx2 = [0.5*(binsx2[i]+binsx2[i+1]) for i in range(len(binsx2)-1)]\nplotbinsv1 = [0.5*(binsv1[i]+binsv1[i+1]) for i in range(len(binsv1)-1)]\nplotbinsv2 = [0.5*(binsv2[i]+binsv2[i+1]) for i in range(len(binsv2)-1)]\nlx1 = np.linspace(min(plotbinsx1), max(plotbinsx1), 5*len(plotbinsx1))\nlx2 = np.linspace(min(plotbinsx2), max(plotbinsx2), 5*len(plotbinsx2))\nlv1 = np.linspace(min(plotbinsv1), max(plotbinsv1), 5*len(plotbinsv1))\nlv2 = np.linspace(min(plotbinsv2), max(plotbinsv2), 5*len(plotbinsv2))\nf, (ax1, av1) = plt.subplots(1,2, sharey=True)\npx1 = ax1.plot(lx1, np.sqrt(boltzmann_pos1/np.pi)*np.exp(-boltzmann_pos1*lx1**2), 'k-', plotbinsx1, x1hist, 'r-')\npx1 = ax1.set_xlabel('$x$')\npv1 = av1.plot(lv1, np.sqrt(boltzmann_vel1/np.pi)*np.exp(-boltzmann_vel1*lv1**2), 'k-', plotbinsv1, v1hist, 'r-')\npv1 = av1.set_xlabel('$v_x$')", "In the above, you should see that the exact answer (black line) matches up reasonably well with the calculated results (red line). You might notice that the left graph, for position, doesn't match quite as well as the right graph, for velocities. This is as expected: the integrator should impose the correct velocity distribution, but sampling space correctly requires more time to converge.\nThe plots above check the $x$ degree of freedom; the plots below do the same for $y$.", "boltzmann_vel2 = 0.5*sim.integ.beta*sim.mass[1]\nboltzmann_pos2 = 0.5*sim.integ.beta*sim.mass[1]*sim.pes.omega[1]**2\nf, (ax2, av2) = plt.subplots(1,2, sharey=True)\npx2 = ax2.plot(lx2, np.sqrt(boltzmann_pos2/np.pi)*np.exp(-boltzmann_pos2*lx2**2), 'k-', plotbinsx2, x2hist, 'r-')\npx2 = ax2.set_xlabel('$y$')\npv2 = av2.plot(lv2, np.sqrt(boltzmann_vel2/np.pi)*np.exp(-boltzmann_vel2*lv2**2), 'k-', plotbinsv2, v2hist, 'r-')\npv2 = av2.set_xlabel('$v_y$')", "Next we plot the 2D histograms for each degree of freedom. These should be reasonably circular.", "f, (ah1, ah2) = plt.subplots(1,2)\nah1.set_xlabel('$x$')\nah1.set_ylabel('$v_x$')\nah2.set_xlabel('$y$')\nah2.set_ylabel('$v_y$')\nhist1plt = ah1.imshow(hist1.T, extent=[xb1[0],xb1[-1],yb1[0],yb1[-1]], interpolation='nearest')\nhist2plt = ah2.imshow(hist2.T, extent=[xb2[0],xb2[-1],yb2[0],yb2[-1]], interpolation='nearest')", "The two plots above should look reasonably similar to each other, although the axes will depend on your choice of $m$ and $\\omega$.\nThe final plot is of the kinetic energy information:", "timeseries = [sim.integ.dt*sim.n_steps_per_frame*i for i in range(nframes)]\ninst_KE, = plt.plot(timeseries[::nframes//1000], instantaneous_ke[::nframes//1000], 'ko', label='instantaneous KE',markersize=2)\n\nke_1 = plt.plot(timeseries, cumulative_ke_1, 'r-', label='cumulative KE, x', linewidth=3)\nke_2 = plt.plot(timeseries, cumulative_ke_2, 'b-', label='cumulative KE, y', linewidth=3)\nleg = plt.legend(prop={'size' : 12}, handler_map={inst_KE: HandlerLine2D(numpoints=1)})\nplt.xlabel('time');\nplt.ylabel('kinetic energy');", "The cumulative kinetic energy for each degree of freedom should converge to the same value, which should be half the temperature (since we have $k_\\text{B}=1$). The instantaneous values of the kinetic energy should suggest a longer tail." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ealogar/curso-python
sysadmin/1_Gathering_system_data.ipynb
apache-2.0
[ "Gathering system data\nGoals:\n- Gathering System Data with multiplatform and platform-dependent tools\n- Get infos from files, /proc, /sys\n- Capture command output\n- Use psutil to get IO, CPU and memory data\n- Parse files with a strategy\n\nNon-goals for this lesson:\n- use with, yield or pipes\n\nModules", "import psutil\nimport glob\nimport sys\nimport subprocess\n\n\n#\n# Our code is p3-ready\n#\nfrom __future__ import print_function, unicode_literals\n\ndef grep(needle, fpath):\n \"\"\"A simple grep implementation\n\n goal: open() is iterable and doesn't\n need splitlines()\n goal: comprehension can filter lists\n \"\"\"\n return [x for x in open(fpath) if needle in x]\n\n# Do we have localhost?\nprint(grep(\"localhost\", \"/etc/hosts\"))\n\n#The psutil module is very nice\nimport psutil\n\n#Works on Windows, Linux and MacOS\npsutil.cpu_percent() \n\n#And its output is very easy to manage\nret = psutil.disk_io_counters()\nprint(ret)\n\n# Exercise: Which other informations \n# does psutil provide? \n# Use this cell and the tab-completion jupyter functionalities.\n\n# Exercise\ndef multiplatform_vmstat(count):\n # Write a vmstat-like function printing every second:\n # - cpu usage%\n # - bytes read and written in the given interval\n # Hint: use psutil and time.sleep(1)\n # Hint: use this cell or try on ipython and *then* write the function\n # using %edit vmstat.py\n for i in range(count):\n raise NotImplementedError\n print(cpu_usage, bytes_rw)\n\nmultiplatform_vmstat(5)\n\n!python -c \"from solutions import multiplatform_vmstat;multiplatform_vmstat(3)\"\n\n#\n# subprocess\n#\n# The check_output function returns the command stdout\nfrom subprocess import check_output\n\n# It takes a *list* as an argument!\nout = check_output(\"ping -c5 www.google.com\".split())\n\n# and returns a string\nprint(out)\nprint(type(out))", "If you want to stream command output, use subprocess.Popen and check carefully subprocess documentation!", "def sh(cmd, shell=False, timeout=0):\n \"\"\"\"Returns an iterable output of a command string\n checking...\n \"\"\"\n from sys import version_info as python_version\n if python_version < (3, 3): # ..before using..\n if timeout:\n raise ValueError(\"Timeout not supported until Python 3.3\")\n output = check_output(cmd.split(), shell=shell)\n else:\n output = check_output(cmd.split(), shell=shell, timeout=timeout)\n return output.splitlines()\n\n\n# Exercise:\n# implement a multiplatform pgrep-like function.\ndef pgrep(program):\n \"\"\"\n A multiplatform pgrep-like function.\n Prints a list of processes executing 'program'\n @param program - eg firefox, explorer.exe\n \n Hint: use subprocess, os and list-comprehension\n eg. items = [x for x in a_list if 'firefox' in x] \n \"\"\"\n raise NotImplementedError\npgrep('firefox')\n\nfrom solutions import pgrep as sol_pgrep\nsol_pgrep(\"firefox\")", "Parsing /proc\nLinux /proc filesystem is a cool place to get data\nIn the next example we'll see how to get:\n - thread informations;\n - disk statistics;", "# Parsing /proc - 1\ndef linux_threads(pid):\n \"\"\"Retrieving data from /proc\n \"\"\"\n from glob import glob\n # glob emulates shell expansion of * and ?\n # Change to /proc the base path if you run on linux machine\n path = \"proc/{}/task/*/status\".format(pid)\n \n \n # pick a set of fields to gather\n t_info = ('Pid', 'Tgid', 'voluntary') # this is a tuple!\n for t in glob(path):\n # ... and use comprehension to get \n # intersting data.\n t_info = [x \n for x in open(t) \n if x.startswith(t_info)] # startswith accepts tuples!\n print(t_info)\n\n# If you're on linux try linux_threads\npid_of_init = 1 # or systemd ?\nlinux_threads(pid_of_init)\n\n# On linux /proc/diskstats is the source of I/O infos\ndisk_l = grep(\"vda1\", \"proc/diskstats\")\nprint(''.join(disk_l))\n\n# To gather that data we put the header in a multiline string\nfrom solutions import diskstats_headers as headers\nprint(*headers, sep='\\n')\n\n#Take the 1st entry (sda), split the data...\ndisk_info = disk_l[0].split()\n# ... and tie them with the header\nret = zip(headers, disk_info)\n\n# On py3 we need to iterate over the generators\nprint(list(ret))\n\n# Try to mangle ret\nprint('\\n'.join(str(x) for x in ret))\n# Exercise: trasform ret in a dict.\n\n# We can create a reusable commodity class with\nfrom collections import namedtuple\n\n# using the imported `headers` as attributes\n# like the one provided by psutil\nDiskStats = namedtuple('DiskStat', headers)\n\n# ... and disk_info as values\ndstat = DiskStats(*disk_info)\nprint(dstat.device, dstat.writes_ms)\n\n# Exercise\n# Write the following function \ndef linux_diskstats(partition):\n \"\"\"Print every second I/O information from /proc/diskstats\n \n @param: partition - eg sda1 or vdx1\n \n Hint: use the above `grep` function\n Hint: use zip, time.sleep, print() and *magic\n \"\"\"\n diskstats_headers = ('reads reads_merged reads_sectors reads_ms'\n ' writes writes_merged writes_sectors writes_ms'\n ' io_in_progress io_ms_weight').split()\n \n while True:\n raise NotImplementedError\n print(values, sep=\"\\t\")\n\n!python -c \"from solutions import linux_diskstats;linux_diskstats('vda1')\"\n\n\n# Using check_output with split() doesn't always work\nfrom os import makedirs\nmakedirs('/tmp/course/b l a n k s') # , exist_ok=True) this on py3\n \ncheck_output('ls \"/tmp/course/b l a n k s\"'.split())\n\n# You can use\nfrom shlex import split\n# and\ncmd = split('dir -a \"/tmp/course/b l a n k s\"')\ncheck_output(cmd)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
esumitra/minecraft-programming
notebooks/Adventure1.ipynb
mit
[ "Welcome to Programming Adventures in Minecraft\n\nProgramming software is fun and what better way to learn than to do things in Minecraft!\nHere are some things you can do with Minecraft programming\n\nSend a chat message to Alex \n Programmers call this \"Hello World\" \nTeleport Steve to a secret location\n...", "import sys\n#sys.path.append('/Users/esumitra/workspaces/mc/mcpipy')", "Send a chat message\nLets write our first program to send a message to the Player\nStart up minecraft and type the following in a new cell\nimport mcpi.minecraft as minecraft\nimport time\nmc = minecraft.Minecraft.create()\nmc.postToChat(\"Hello kids\")\ntime.sleep(5)", "# Start typing below\n# once you are done typing, press (Ctrl+Enter) to run the code\nimport mcpi.minecraft as minecraft\nimport time\nmc = minecraft.Minecraft.create()\nmc.postToChat(\"Hello kids\")\ntime.sleep(5)", "If everthing went well, you saw a chat message in Minecraft. You have now written your first program for Minecraft. Jump up and down three times yelling \nI rock!\nNow let's see what the lines in your program do.\n\nIn order to work with minecraft we need other programs that help us connect to our minecraft game. These programs are called libraries. We need the minecraft and time libraries, so we tell our program that we need these libraries by typing\nimport mcpi.minecraft as minecraft\nimport time\n\nThere are many other libraries we can use in minecraft, some interesting ones being mcpi.blocks which has programs to build with blocks and mcpi.entity which has a list of things like bats, witherskull that you can use in your programs. \nA library has many functions to do things. There is a function to display a message, a function to play music files and so on. \n\nWe need a minecraft object to connect to our game so we create minecraft object by typing\nmc = minecraft.Minecraft.create()\n\nThe minecraft object has many functions. When a function is tied to an object is is also called a method. We need the postToChat to display a message, so we type mc.postToChat(...).\nNow try to complete the tasks below in this adventure.\n Good luck! \nTask 1\nWrite a program to display Hi Brooks School in Minecraft", "# Program for Adventure 1 - Task 1\n", "Task 2\nWrite a program to display a countdown from 5,4,3,2,1\nHint: use time.sleep(1) to pause for 1 second between displaying numbers", "# Program for Adventure 1 - Task 2\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tedunderwood/fiction
bert/logistic_regression_baselines.ipynb
mit
[ "Logistic models for blog post\nThis notebook works up some quick and dirty bag-of-words models, to see how much this approach suffers when we cut whole documents into 128- or 256-word chunks.\nWe're going to use LogisticRegression from scikit-learn, and apply it in three ways:\n\n\nTo whole documents.\n\n\nTo BERT-sized chunks.\n\n\nAggregating the votes from BERT-sized chunks to produce a document-level prediction.", "# Things that will come in handy\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import accuracy_score, f1_score\nfrom collections import Counter\nfrom scipy.stats import pearsonr\nimport random, glob, csv", "Modeling whole movie reviews from the IMDb dataset\n@InProceedings{maas-EtAl:2011:ACL-HLT2011,\n author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher},\n title = {Learning Word Vectors for Sentiment Analysis},\n booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies},\n month = {June},\n year = {2011},\n address = {Portland, Oregon, USA},\n publisher = {Association for Computational Linguistics},\n pages = {142--150},\n url = {http://www.aclweb.org/anthology/P11-1015}", "raw = pd.read_csv('sentimentdata.tsv', sep = '\\t')\n\nfullname = 'sentiment'\n\nraw = raw.sample(frac = 1)\n# that is in effect a shuffle\n\ncut = round(len(raw) * .75)\n\ntrain = raw.iloc[0: cut, : ]\ntest = raw.iloc[cut : , : ]\n\nlex = Counter()\n\ndelchars = ''.join(c for c in map(chr, range(256)) if not c.isalpha())\nspaces = ' ' * len(delchars)\npunct2space = str.maketrans(delchars, spaces)\n\ndef getwords(text):\n global punct2space\n text = text.replace('<br />', ' ')\n words = text.translate(punct2space).split()\n return words\n\ndef get_dataset(rootfolder):\n \n negpaths = glob.glob(rootfolder + '/neg/*.txt')\n pospaths = glob.glob(rootfolder + '/pos/*.txt')\n paths = [(0, x) for x in negpaths] + [(1, x) for x in pospaths]\n \n index = 0\n lines = []\n lex = Counter()\n labels = []\n texts = []\n \n for label, p in paths:\n \n with open(p) as f:\n text = f.read().strip().lower()\n words = getwords(text)\n for w in words:\n lex[w] += 1\n labels.append(label)\n texts.append(text)\n\n vocab = [x[0] for x in lex.most_common()]\n print(vocab[0:10])\n \n df = pd.DataFrame.from_dict({'sent': labels, 'text': texts})\n df = df.sample(frac = 1)\n # shuffle\n \n return vocab, df\n\ndef make_matrix(df, vocab, cut):\n \n lexicon = dict()\n for i in range(cut):\n lexicon[vocab[i]] = i\n \n y = []\n x = []\n \n for i, row in df.iterrows():\n y.append(int(row['sent']))\n x_row = np.zeros(cut)\n words = getwords(row.text)\n for w in words:\n if w in lexicon:\n idx = lexicon[w]\n x_row[idx] = x_row[idx] + 1\n \n x_row = x_row / np.sum(len(words))\n \n x.append(x_row)\n \n x = np.array(x)\n \n return x, y\n \n\ntriplets = []\n\nvocab, train_df = get_dataset('/Volumes/TARDIS/aclImdb/train')\nprint('got training')\ndummy, test_df = get_dataset('/Volumes/TARDIS/aclImdb/test')\nprint('got test')\n\nfor cut in range(3200, 5200, 200):\n\n for reg_const in [.00001, .0001, .0003, .001, .01, .1]:\n \n trainingset, train_y = make_matrix(train_df, vocab, cut)\n testset, test_y = make_matrix(test_df, vocab, cut)\n \n model = LogisticRegression(C = reg_const)\n stdscaler = StandardScaler()\n stdscaler.fit(trainingset)\n scaledtraining = stdscaler.transform(trainingset)\n model.fit(scaledtraining, train_y)\n\n scaledtest = stdscaler.transform(testset)\n predictions = [x[1] for x in model.predict_proba(scaledtest)]\n predictions = np.round(predictions)\n accuracy = accuracy_score(predictions, test_y)\n f1 = f1_score(predictions, test_y)\n print(cut, reg_const, f1, accuracy)\n triplets.append((accuracy, cut, reg_const))\n\nrandom.shuffle(triplets)\ntriplets.sort(key = lambda x: x[0])\nprint(triplets[-1])", "Cut down the reviews to 128-word chunks; how does it perform?\nHere I'm using the same data files that were given to BERT.", "def get_datachunks(filepath):\n \n data = pd.read_csv(filepath, sep = '\\t', header = None, names = ['idx', 'sent', 'dummy', 'text'], quoting = csv.QUOTE_NONE)\n \n lex = Counter()\n \n for i, row in data.iterrows():\n text = row['text'].strip().lower()\n words = getwords(text)\n for w in words:\n lex[w] += 1\n\n vocab = [x[0] for x in lex.most_common()]\n print(vocab[0:10])\n \n df = data.loc[ : , ['sent', 'text']]\n \n return vocab, df\n\ntriplets = []\n\nvocab, train_df = get_datachunks('/Users/tunder/Dropbox/fiction/bert/bertdata/train_sentiment.tsv')\nprint('got training')\ndummy, test_df = get_datachunks('/Users/tunder/Dropbox/fiction/bert/bertdata/dev_sentiment.tsv')\nprint('got test')\n\nfor cut in range(2200, 6200, 400):\n\n for reg_const in [.00001, .00005, .0001, .0003, .001]:\n \n trainingset, train_y = make_matrix(train_df, vocab, cut)\n testset, test_y = make_matrix(test_df, vocab, cut)\n \n model = LogisticRegression(C = reg_const)\n stdscaler = StandardScaler()\n stdscaler.fit(trainingset)\n scaledtraining = stdscaler.transform(trainingset)\n model.fit(scaledtraining, train_y)\n\n scaledtest = stdscaler.transform(testset)\n predictions = [x[1] for x in model.predict_proba(scaledtest)]\n predictions = np.round(predictions)\n accuracy = accuracy_score(predictions, test_y)\n f1 = f1_score(predictions, test_y)\n print(cut, reg_const, f1, accuracy)\n triplets.append((accuracy, cut, reg_const))\n\nrandom.shuffle(triplets)\ntriplets.sort(key = lambda x: x[0])\nprint(triplets[-1])", "How much can we improve our chunk-level results by aggregating them?", "trainingset, train_y = make_matrix(train_df, vocab, 5200)\ntestset, test_y = make_matrix(test_df, vocab, 5200)\nmodel = LogisticRegression(C = .0001)\nstdscaler = StandardScaler()\nstdscaler.fit(trainingset)\nscaledtraining = stdscaler.transform(trainingset)\nmodel.fit(scaledtraining, train_y)\n\nscaledtest = stdscaler.transform(testset)\npredictions = [x[1] for x in model.predict_proba(scaledtest)]\n\n# make a dataframe\nmeta = pd.read_csv('bertmeta/dev_rows_sentiment.tsv', sep = '\\t')\npred = pd.DataFrame.from_dict({'idx': meta['idx'], 'pred': predictions, 'real': test_y})\npred = pred.set_index('idx')\npred.head()\n\nright = 0\n\nfor idx, row in pred.iterrows():\n if row['pred'] >= 0.5:\n predclass = 1\n else:\n predclass = 0\n \n if predclass == row['real']:\n right += 1\n\nprint(right / len(pred))\n\nbyvol = meta.groupby('docid')\nrightvols = 0\nallvols = 0\nbertprobs = dict()\n\nfor vol, df in byvol:\n total = 0\n right = 0\n positive = 0\n df.set_index('idx', inplace = True)\n predicted = []\n for idx, row in df.iterrows():\n predict = pred.loc[idx, 'pred']\n predicted.append(predict)\n true_class = row['class']\n \n volmean = sum(predicted) / len(predicted)\n if volmean >= 0.5:\n predicted_class = 1\n else:\n predicted_class = 0\n \n if true_class == predicted_class:\n rightvols += 1\n allvols += 1\n\nprint()\nprint('Overall accuracy:', rightvols / allvols)", "What about the parallel problem for genre?\nWe use the same data that was passed to BERT.", "triplets = []\n\nvocab, train_df = get_datachunks('/Users/tunder/Dropbox/fiction/bert/bertdata/train_Mystery256.tsv')\nprint('got training')\ndummy, test_df = get_datachunks('/Users/tunder/Dropbox/fiction/bert/bertdata/dev_Mystery256.tsv')\nprint('got test')\n\nfor cut in range(2000, 6200, 400):\n\n for reg_const in [.00001, .00005, .0001, .0003, .001]:\n \n trainingset, train_y = make_matrix(train_df, vocab, cut)\n testset, test_y = make_matrix(test_df, vocab, cut)\n \n model = LogisticRegression(C = reg_const)\n stdscaler = StandardScaler()\n stdscaler.fit(trainingset)\n scaledtraining = stdscaler.transform(trainingset)\n model.fit(scaledtraining, train_y)\n\n scaledtest = stdscaler.transform(testset)\n predictions = [x[1] for x in model.predict_proba(scaledtest)]\n predictions = np.round(predictions)\n accuracy = accuracy_score(predictions, test_y)\n f1 = f1_score(predictions, test_y)\n print(cut, reg_const, f1, accuracy)\n triplets.append((accuracy, cut, reg_const))\n\nrandom.shuffle(triplets)\ntriplets.sort(key = lambda x: x[0])\nprint(triplets[-1])", "and now aggregating the genre chunks", "# best model\n\ntrainingset, train_y = make_matrix(train_df, vocab, 6000)\ntestset, test_y = make_matrix(test_df, vocab, 6000)\nmodel = LogisticRegression(C = .00001)\nstdscaler = StandardScaler()\nstdscaler.fit(trainingset)\nscaledtraining = stdscaler.transform(trainingset)\nmodel.fit(scaledtraining, train_y)\n\nscaledtest = stdscaler.transform(testset)\npredictions = [x[1] for x in model.predict_proba(scaledtest)]\n\n# make a dataframe\nmeta = pd.read_csv('bertmeta/dev_rows_Mystery256.tsv', sep = '\\t')\npred = pd.DataFrame.from_dict({'idx': meta['idx'], 'pred': predictions, 'real': test_y})\npred = pred.set_index('idx')\npred.head()\n\nbyvol = meta.groupby('docid')\nrightvols = 0\nallvols = 0\nbertprobs = dict()\n\nfor vol, df in byvol:\n total = 0\n right = 0\n positive = 0\n df.set_index('idx', inplace = True)\n predicted = []\n for idx, row in df.iterrows():\n predict = pred.loc[idx, 'pred']\n predicted.append(predict)\n true_class = row['class']\n \n volmean = sum(predicted) / len(predicted)\n if volmean >= 0.5:\n predicted_class = 1\n else:\n predicted_class = 0\n \n if true_class == predicted_class:\n rightvols += 1\n allvols += 1\n\nprint()\nprint('Overall accuracy:', rightvols / allvols)", "Aside: It's really remarkable how powerful binary voting can be. In this case models of genre at 256-word scale are pretty awful (75.5% accuracy) but aggregate up to 87.7% accuracy. But that's still not quite in the same league with models that can see whole novels; in that case the detective/mystery genre can be modeled with more than 91% accuracy. Something is lost when we can't see the whole elephant at once." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
DB2-Samples/db2jupyter
Db2 Time and Date Functions.ipynb
apache-2.0
[ "<a id=\"top\"></a>\nDb2 11 Time and Date Functions\nThere are plenty of new date and time functions found in Db2 11. These functions allow you to extract portions from a date\nand format the date in a variety of different ways. While Db2 already has a number of date and time functions, these new\nfunctions allow for greater compatibility with other database implementations, making it easier to port to DB2.ion.", "%run db2.ipynb", "Table of Contents\n\nExtract Function\nDATE_PART Function\nDATE_TRUNC Function\nExtracting Specific Days from a Month\nDate Addition\nExtracting Weeks, Months, Quarters, and Years\nNext Day Function\nBetween Date/Time Functions\nMonths Between\nDate Duration\nOverlaps Predicate\nUTC Time Conversions\n\nBack to Top\n<a id='extract'></a>\nExtract Function\nThe EXTRACT function extracts and element from a date/time value. The syntax of the EXTRACT command is:\nPython\nEXTRACT( element FROM expression )\nThis is a slightly different format from most functions that you see in the DB2. Element must be one of the following values:\n|Element Name | Description\n|:---------------- | :-----------------------------------------------------------------------------------------\n|EPOCH | Number of seconds since 1970-01-01 00:00:00.00. The value can be positive or negative.\n|MILLENNIUM(S) | The millennium is to be returned.\n|CENTURY(CENTURIES)| The number of full 100-year periods represented by the year.\n|DECADE(S) | The number of full 10-year periods represented by the year.\n|YEAR(S) | The year portion is to be returned. \n|QUARTER | The quarter of the year (1 - 4) is to be returned.\n|MONTH | The month portion is to be returned. \n|WEEK | The number of the week of the year (1 - 53) that the specified day is to be returned.\n|DAY(S) | The day portion is to be returned. \n|DOW | The day of the week that is to be returned. Note that \"1\" represents Sunday. \n|DOY | The day (1 - 366) of the year that is to be returned.\n|HOUR(S) | The hour portion is to be returned. \n|MINUTE(S) | The minute portion is to be returned. \n|SECOND(S) | The second portion is to be returned. \n|MILLISECOND(S) | The second of the minute, including fractional parts to one thousandth of a second\n|MICROSECOND(S) | The second of the minute, including fractional parts to one millionth of a second\nThe synonym NOW is going to be used in the next example. NOW is a synonym for CURRENT TIMESTAMP.", "%sql VALUES NOW", "This SQL will return every possible extract value from the current date.the SQL standard.", "%%sql -a\nWITH DATES(FUNCTION, RESULT) AS (\n VALUES\n ('EPOCH', EXTRACT( EPOCH FROM NOW )), \n ('MILLENNIUM(S)', EXTRACT( MILLENNIUM FROM NOW )),\n ('CENTURY(CENTURIES)', EXTRACT( CENTURY FROM NOW )), \n ('DECADE(S)', EXTRACT( DECADE FROM NOW )),\n ('YEAR(S)', EXTRACT( YEAR FROM NOW )), \n ('QUARTER', EXTRACT( QUARTER FROM NOW )), \n ('MONTH', EXTRACT( MONTH FROM NOW )),\n ('WEEK', EXTRACT( WEEK FROM NOW )),\n ('DAY(S)', EXTRACT( DAY FROM NOW )), \n ('DOW', EXTRACT( DOW FROM NOW )), \n ('DOY', EXTRACT( DOY FROM NOW )), \n ('HOUR(S)', EXTRACT( HOURS FROM NOW )), \n ('MINUTE(S)', EXTRACT( MINUTES FROM NOW )),\n ('SECOND(S)', EXTRACT( SECONDS FROM NOW )),\n ('MILLISECOND(S)', EXTRACT( MILLISECONDS FROM NOW )), \n ('MICROSECOND(S)', EXTRACT( MICROSECONDS FROM NOW ))\n )\nSELECT FUNCTION, CAST(RESULT AS BIGINT) FROM DATES", "Back to Top\n<a id='part'></a>\nDATE_PART Function\nDATE_PART is similar to the EXTRACT function but it uses the more familiar syntax:\nPython\nDATE_PART(element, expression)\nIn the case of the function, the element must be placed in quotes, rather than as a keyword in the EXTRACT function. in addition, the DATE_PART always returns a BIGINT, while the EXTRACT function will return a different data type depending on the element being returned. For instance, compare the SECONDs option for both functions. In the case of EXTRACT you get a DECIMAL result while for the DATE_PART you get a truncated BIGINT.", "%%sql -a\nWITH DATES(FUNCTION, RESULT) AS (\n VALUES\n ('EPOCH', DATE_PART('EPOCH' ,NOW )), \n ('MILLENNIUM(S)', DATE_PART('MILLENNIUM' ,NOW )),\n ('CENTURY(CENTURIES)', DATE_PART('CENTURY' ,NOW )), \n ('DECADE(S)', DATE_PART('DECADE' ,NOW )),\n ('YEAR(S)', DATE_PART('YEAR' ,NOW )), \n ('QUARTER', DATE_PART('QUARTER' ,NOW )), \n ('MONTH', DATE_PART('MONTH' ,NOW )),\n ('WEEK', DATE_PART('WEEK' ,NOW )),\n ('DAY(S)', DATE_PART('DAY' ,NOW )), \n ('DOW', DATE_PART('DOW' ,NOW )), \n ('DOY', DATE_PART('DOY' ,NOW )), \n ('HOUR(S)', DATE_PART('HOURS' ,NOW )), \n ('MINUTE(S)', DATE_PART('MINUTES' ,NOW )),\n ('SECOND(S)', DATE_PART('SECONDS' ,NOW )),\n ('MILLISECOND(S)', DATE_PART('MILLISECONDS' ,NOW )), \n ('MICROSECOND(S)', DATE_PART('MICROSECONDS' ,NOW ))\n )\nSELECT FUNCTION, CAST(RESULT AS BIGINT) FROM DATES;", "Back to Top\n<a id='trunc'></a>\nDATE_TRUNC Function\nDATE_TRUNC computes the same results as the DATE_PART function but then truncates the value down. Note that not all values can be truncated. The function syntax is:\nPython\nDATE_TRUNC(element, expression)\nThe element must be placed in quotes, rather than as a keyword in the EXTRACT function.\nNote that DATE_TRUNC always returns a BIGINT.\nThe elements that can be truncated are:\n|Element Name |Description\n|:---------------- |:------------------------------------------------------------------------------\n|MILLENNIUM(S) |The millennium is to be returned.\n|CENTURY(CENTURIES) |The number of full 100-year periods represented by the year.\n|DECADE(S) |The number of full 10-year periods represented by the year.\n|YEAR(S) |The year portion is to be returned. \n|QUARTER |The quarter of the year (1 - 4) is to be returned.\n|MONTH |The month portion is to be returned. \n|WEEK |The number of the week of the year (1 - 53) that the specified day is to be returned.\n|DAY(S) |The day portion is to be returned.\n|HOUR(S) |The hour portion is to be returned. \n|MINUTE(S) |The minute portion is to be returned. \n|SECOND(S) |The second portion is to be returned. \n|MILLISECOND(S) |The second of the minute, including fractional parts to one thousandth of a second\n|MICROSECOND(S) |The second of the minute, including fractional parts to one millionth of a secondry data types.", "%%sql -a\nWITH DATES(FUNCTION, RESULT) AS (\n VALUES \n ('MILLENNIUM(S)', DATE_TRUNC('MILLENNIUM' ,NOW )),\n ('CENTURY(CENTURIES)', DATE_TRUNC('CENTURY' ,NOW )), \n ('DECADE(S)', DATE_TRUNC('DECADE' ,NOW )),\n ('YEAR(S)', DATE_TRUNC('YEAR' ,NOW )), \n ('QUARTER', DATE_TRUNC('QUARTER' ,NOW )), \n ('MONTH', DATE_TRUNC('MONTH' ,NOW )),\n ('WEEK', DATE_TRUNC('WEEK' ,NOW )),\n ('DAY(S)', DATE_TRUNC('DAY' ,NOW )), \n ('HOUR(S)', DATE_TRUNC('HOURS' ,NOW )), \n ('MINUTE(S)', DATE_TRUNC('MINUTES' ,NOW )),\n ('SECOND(S)', DATE_TRUNC('SECONDS' ,NOW )),\n ('MILLISECOND(S)', DATE_TRUNC('MILLISECONDS' ,NOW )), \n ('MICROSECOND(S)', DATE_TRUNC('MICROSECONDS' ,NOW ))\n )\nSELECT FUNCTION, RESULT FROM DATES", "Back to Top\n<a id='month'></a>\nExtracting Specfic Days from a Month\nThere are three functions that retrieve day information from a date. These functions include:\n\nDAYOFMONTH - returns an integer between 1 and 31 that represents the day of the argument\nFIRST_DAY - returns a date or timestamp that represents the first day of the month of the argument\nDAYS_TO_END_OF_MONTH - returns the number of days to the end of the month\n\nThis is the current date so that you know what all of the calculations are based on.", "%sql VALUES NOW", "This expression (DAYOFMONTH) returns the day of the month.", "%sql VALUES DAYOFMONTH(NOW)", "FIRST_DAY will return the first day of the month. You could probably compute this with standard SQL date functions, but it is a lot easier just to use this builtin function.", "%sql VALUES FIRST_DAY(NOW)", "Finally, DAYS_TO_END_OF_MOTNH will return the number of days to the end of the month. A Zero would be returned if you are on the last day of the month.", "%sql VALUES DAYS_TO_END_OF_MONTH(NOW)", "Back to Top\n<a id='add'></a>\nDate Addition Functions\nThe date addition functions will add or subtract days from a current timestamp. The functions that \nare available are:\n\nADD_YEARS - Add years to a date\nADD_MONTHS - Add months to a date\nADD_DAYS - Add days to a date\nADD_HOURS - Add hours to a date\nADD_MINUTES - Add minutes to a date\nADD_SECONDS - Add seconds to a date\n\nThe format of the function is:\nPython \nADD_DAYS ( expression, numeric expression )\nThe following SQL will add one \"unit\" to the current date.", "%%sql\nWITH DATES(FUNCTION, RESULT) AS \n ( \n VALUES\n ('CURRENT DATE ',NOW),\n ('ADD_YEARS ',ADD_YEARS(NOW,1)),\n ('ADD_MONTHS ',ADD_MONTHS(NOW,1)),\n ('ADD_DAYS ',ADD_DAYS(NOW,1)),\n ('ADD_HOURS ',ADD_HOURS(NOW,1)),\n ('ADD_MINUTES ',ADD_MINUTES(NOW,1)),\n ('ADD_SECONDS ',ADD_SECONDS(NOW,1))\n )\nSELECT * FROM DATES", "A negative number can be used to subtract values from the current date.", "%%sql\nWITH DATES(FUNCTION, RESULT) AS \n ( \n VALUES\n ('CURRENT DATE ',NOW),\n ('ADD_YEARS ',ADD_YEARS(NOW,-1)),\n ('ADD_MONTHS ',ADD_MONTHS(NOW,-1)),\n ('ADD_DAYS ',ADD_DAYS(NOW,-1)),\n ('ADD_HOURS ',ADD_HOURS(NOW,-1)),\n ('ADD_MINUTES ',ADD_MINUTES(NOW,-1)),\n ('ADD_SECONDS ',ADD_SECONDS(NOW,-1))\n )\nSELECT * FROM DATES", "Back to Top\n<a id='extract'></a>\nExtracting Weeks, Months, Quarters, and Years from a Date\nThere are four functions that extract different values from a date. These functions include:\n\nTHIS_QUARTER - returns the first day of the quarter\nTHIS_WEEK - returns the first day of the week (Sunday is considered the first day of that week)\nTHIS_MONTH - returns the first day of the month\nTHIS_YEAR - returns the first day of the year", "%%sql\nWITH DATES(FUNCTION, RESULT) AS \n ( \n VALUES\n ('CURRENT DATE ',NOW),\n ('THIS_WEEK ',THIS_WEEK(NOW)),\n ('THIS_MONTH ',THIS_MONTH(NOW)),\n ('THIS_QUARTER ',THIS_QUARTER(NOW)),\n ('THIS_YEAR ',THIS_YEAR(NOW))\n )\nSELECT * FROM DATES", "There is also a NEXT function for each of these. The NEXT function will return the next week, month, quarter,\nor year given a current date.", "%%sql\nWITH DATES(FUNCTION, RESULT) AS \n ( \n VALUES\n ('CURRENT DATE ',NOW),\n ('NEXT_WEEK ',NEXT_WEEK(NOW)),\n ('NEXT_MONTH ',NEXT_MONTH(NOW)),\n ('NEXT_QUARTER ',NEXT_QUARTER(NOW)),\n ('NEXT_YEAR ',NEXT_YEAR(NOW))\n )\nSELECT * FROM DATES", "Back to Top\n<a id='nextday'></a>\nNext Day Function\nThe previous set of functions returned a date value for the current week, month, quarter, or year (or the next one\nif you used the NEXT function). The NEXT_DAY function returns the next day (after the date you supply) \nbased on the string representation of the day. The date string will be dependent on the codepage that you are using for the database.\nThe date (from an English perspective) can be:\n|Day |Short form\n|:-------- |:---------\n|Monday |MON\n|Tuesday |TUE\n|Wednesday |WED\n|Thursday |THU\n|Friday |FRI\n|Saturday |SAT\n|Sunday |SUN\nThe following SQL will show you the \"day\" after the current date that is Monday through Sunday.", "%%sql\nWITH DATES(FUNCTION, RESULT) AS \n ( \n VALUES\n ('CURRENT DATE ',NOW),\n ('Monday ',NEXT_DAY(NOW,'Monday')),\n ('Tuesday ',NEXT_DAY(NOW,'TUE')),\n ('Wednesday ',NEXT_DAY(NOW,'Wednesday')),\n ('Thursday ',NEXT_DAY(NOW,'Thursday')),\n ('Friday ',NEXT_DAY(NOW,'FRI')),\n ('Saturday ',NEXT_DAY(NOW,'Saturday')),\n ('Sunday ',NEXT_DAY(NOW,'Sunday')) \n )\nSELECT * FROM DATES", "Back to Top\n<a id='between'></a>\nBetween Date/Time Functions\nThese date functions compute the number of full seconds, minutes, hours, days, weeks, and years between\ntwo dates. If there isn't a full value between the two objects (like less than a day), a zero will be\nreturned. These new functions are:\n\nHOURS_BETWEEN - returns the number of full hours between two arguments\nMINUTES_BETWEEN - returns the number of full minutes between two arguments\nSECONDS_BETWEEN - returns the number of full seconds between two arguments\nDAYS_BETWEEN - returns the number of full days between two arguments\nWEEKS_BETWEEN - returns the number of full weeks between two arguments\nYEARS_BETWEEN - returns the number of full years between two arguments\n\nThe format of the function is:\nPython\nDAYS_BETWEEN( expression1, expression2 )\nThe following SQL will use a date that is in the future with exactly one extra second, minute, hour, day,\nweek and year added to it.", "%%sql -q\nDROP VARIABLE FUTURE_DATE;\nCREATE VARIABLE FUTURE_DATE TIMESTAMP DEFAULT(NOW + 1 SECOND + 1 MINUTE + 1 HOUR + 8 DAYS + 1 YEAR);\n\nWITH DATES(FUNCTION, RESULT) AS (\n VALUES \n ('SECONDS_BETWEEN',SECONDS_BETWEEN(FUTURE_DATE,NOW)),\n ('MINUTES_BETWEEN',MINUTES_BETWEEN(FUTURE_DATE,NOW)), \n ('HOURS_BETWEEN ',HOURS_BETWEEN(FUTURE_DATE,NOW)), \n ('DAYS BETWEEN ',DAYS_BETWEEN(FUTURE_DATE,NOW)), \n ('WEEKS_BETWEEN ',WEEKS_BETWEEN(FUTURE_DATE,NOW)), \n ('YEARS_BETWEEN ',YEARS_BETWEEN(FUTURE_DATE,NOW))\n )\nSELECT * FROM DATES; ", "Back to Top\n<a id='mbetween'></a>\nMONTHS_BETWEEN Function\nYou may have noticed that the MONTHS_BETWEEN function was not in the previous list of functions. The\nreason for this is that the value returned for MONTHS_BETWEEN is different from the other functions. The MONTHS_BETWEEN\nfunction returns a DECIMAL value rather than an integer value. The reason for this is that the duration of a\nmonth is not as precise as a day, week or year. The following example will show how the duration is \na decimal value rather than an integer. You could always truncate the value if you want an integer.", "%%sql\nWITH DATES(FUNCTION, RESULT) AS (\n VALUES \n ('0 MONTH ',MONTHS_BETWEEN(NOW, NOW)),\n ('1 MONTH ',MONTHS_BETWEEN(NOW + 1 MONTH, NOW)), \n ('1 MONTH + 1 DAY',MONTHS_BETWEEN(NOW + 1 MONTH + 1 DAY, NOW)), \n ('LEAP YEAR ',MONTHS_BETWEEN('2016-02-01','2016-03-01')),\n ('NON-LEAP YEAR ',MONTHS_BETWEEN('2015-02-01','2015-03-01'))\n )\nSELECT * FROM DATES", "Back to Top\n<a id='duration'></a>\nDate Duration Functions\nAn alternate way of representing date durations is through the use of an integer with the format YYYYMMDD where\nthe YYYY represents the year, MM for the month and DD for the day. Date durations are easier to manipulate than\ntimestamp values and take up substantially less storage.\nThere are two new functions.\n\nYMD_BETWEEN returns a numeric value that specifies the number of full years, full months, and full days between two datetime values\nAGE returns a numeric value that represents the number of full years, full months, and full days between the current timestamp and the argument\n\nThis SQL statement will return various AGE calculations based on the current timestamp.", "%%sql\nWITH DATES(FUNCTION, RESULT) AS (\n VALUES \n ('AGE + 1 DAY ',AGE(NOW - 1 DAY)),\n ('AGE + 1 MONTH ',AGE(NOW - 1 MONTH)),\n ('AGE + 1 YEAR ',AGE(NOW - 1 YEAR)),\n ('AGE + 1 DAY + 1 MONTH ',AGE(NOW - 1 DAY - 1 MONTH)),\n ('AGE + 1 DAY + 1 YEAR ',AGE(NOW - 1 DAY - 1 YEAR)),\n ('AGE + 1 DAY + 1 MONTH + 1 YEAR',AGE(NOW - 1 DAY - 1 MONTH - 1 YEAR))\n )\nSELECT * FROM DATES", "The YMD_BETWEEN function is similar to the AGE function except that it takes two date arguments. We can\nsimulate the AGE function by supplying the NOW function to the YMD_BETWEEN function.", "%%sql\nWITH DATES(FUNCTION, RESULT) AS (\n VALUES \n ('1 DAY ',YMD_BETWEEN(NOW,NOW - 1 DAY)),\n ('1 MONTH ',YMD_BETWEEN(NOW,NOW - 1 MONTH)),\n ('1 YEAR ',YMD_BETWEEN(NOW,NOW - 1 YEAR)),\n ('1 DAY + 1 MONTH ',YMD_BETWEEN(NOW,NOW - 1 DAY - 1 MONTH)),\n ('1 DAY + 1 YEAR ',YMD_BETWEEN(NOW,NOW - 1 DAY - 1 YEAR)),\n ('1 DAY + 1 MONTH + 1 YEAR',YMD_BETWEEN(NOW,NOW - 1 DAY - 1 MONTH - 1 YEAR))\n )\nSELECT * FROM DATES ", "Back to Top\n<a id='overlaps'></a>\nOVERLAPS Predicate\nThe OVERLAPS predicate is used to determine whether two chronological periods overlap. This is not a \nfunction within DB2, but rather a special SQL syntax extension. \nA chronological period is specified by a pair of date-time expressions. The first expression specifies\nthe start of a period; the second specifies its end.\nPython\n(start1,end1) OVERLAPS (start2, end2)\nThe beginning and end values are not included in the periods. The following \nsummarizes the overlap logic. For example, the periods 2016-10-19 to 2016-10-20 \nand 2016-10-20 to 2016-10-21 do not overlap. \nFor instance, the following interval does not overlap.", "%%sql\nVALUES\n CASE\n WHEN \n (NOW, NOW + 1 DAY) OVERLAPS (NOW + 1 DAY, NOW + 2 DAYS) THEN 'Overlaps'\n ELSE\n 'No Overlap'\n END", "If the first date range is extended by one day then the range will overlap.", "%%sql\nVALUES\n CASE\n WHEN \n (NOW, NOW + 2 DAYS) OVERLAPS (NOW + 1 DAY, NOW + 2 DAYS) THEN 'Overlaps'\n ELSE\n 'No Overlap'\n END", "Identical date ranges will overlap.", "%%sql\nVALUES\n CASE\n WHEN \n (NOW, NOW + 1 DAY) OVERLAPS (NOW, NOW + 1 DAY) THEN 'Overlaps'\n ELSE\n 'No Overlap'\n END", "Back to Top\n<a id='utc'></a>\nUTC Time Conversions\nDb2 has two functions that allow you to translate timestamps to and from UTC (Coordinated Universal Time).\nThe FROM_UTC_TIMESTAMP scalar function returns a TIMESTAMP that is converted from Coordinated Universal Time \nto the time zone specified by the time zone string. \nThe TO_UTC_TIMESTAMP scalar function returns a TIMESTAMP that is converted to Coordinated Universal Time \nfrom the timezone that is specified by the timezone string. \nThe format of the two functions is:\nPython\nFROM_UTC_TIMESTAMP( expression, timezone )\nTO_UTC_TIMESTAMP( expression, timezone)\nThe return value from each of these functions is a timestamp. The \"expression\" is a timestamp that\nyou want to convert to the local timezone (or convert to UTC). The timezone is \nan expression that specifies the time zone that the expression is to be adjusted to. \nThe value of the timezone-expression must be a time zone name from the Internet Assigned Numbers Authority (IANA)\ntime zone database. The standard format for a time zone name in the IANA database is Area/Location, where:\n\nArea is the English name of a continent, ocean, or the special area 'Etc'\nLocation is the English name of a location within the area; usually a city, or small island\n\nExamples:\n\n\"America/Toronto\"\n\"Asia/Sakhalin\"\n\"Etc/UTC\" (which represents Coordinated Universal Time)\n\nFor complete details on the valid set of time zone names and the rules that are associated with those time zones,\nrefer to the IANA time zone database. The database server uses version 2010c of the IANA time zone database. \nThe result is a timestamp, adjusted from/to the Coordinated Universal Time time zone to the time zone \nspecified by the timezone-expression. If the timezone-expression returns a value that is not a time zone \nin the IANA time zone database, then the value of expression is returned without being adjusted.\nThe timestamp adjustment is done by first applying the raw offset from Coordinated Universal Time of the \ntimezone-expression. If Daylight Saving Time is in effect at the adjusted timestamp for the time zone \nthat is specified by the timezone-expression, then the Daylight Saving Time offset is also applied \nto the timestamp.\nTime zones that use Daylight Saving Time have ambiguities at the transition dates. When a time zone \nchanges from standard time to Daylight Saving Time, a range of time does not occur as it is skipped \nduring the transition. When a time zone changes from Daylight Saving Time to standard time, \na range of time occurs twice. Ambiguous timestamps are treated as if they occurred when standard time \nwas in effect for the time zone. \nConvert the Coordinated Universal Time timestamp '2011-12-25 09:00:00.123456' to the 'Asia/Tokyo' time zone. \nThe following returns a TIMESTAMP with the value '2011-12-25 18:00:00.123456'.", "%%sql\nVALUES FROM_UTC_TIMESTAMP(TIMESTAMP '2011-12-25 09:00:00.123456', 'Asia/Tokyo');", "Convert the Coordinated Universal Time timestamp '2014-11-02 06:55:00' to the 'America/Toronto' time zone. \nThe following returns a TIMESTAMP with the value '2014-11-02 01:55:00'.", "%%sql\nVALUES FROM_UTC_TIMESTAMP(TIMESTAMP'2014-11-02 06:55:00', 'America/Toronto');", "Convert the Coordinated Universal Time timestamp '2015-03-02 06:05:00' to the 'America/Toronto' \ntime zone. The following returns a TIMESTAMP with the value '2015-03-02 01:05:00'.", "%%sql\nVALUES FROM_UTC_TIMESTAMP(TIMESTAMP'2015-03-02 06:05:00', 'America/Toronto');", "Convert the timestamp '1970-01-01 00:00:00' to the Coordinated Universal Time timezone from the 'America/Denver'\ntimezone. The following returns a TIMESTAMP with the value '1970-01-01 07:00:00'.", "%%sql\nVALUES TO_UTC_TIMESTAMP(TIMESTAMP'1970-01-01 00:00:00', 'America/Denver');", "Using UTC Functions\nOne of the applications for using the UTC is to take the transaction timestamp and normalize it across\nall systems that access the data. You can convert the timestamp to UTC on insert and then when it is \nretrieved, it can be converted to the local timezone.\nThis example will use a number of techniques to hide the complexity of changing timestamps to local timezones.\nThe following SQL will create our base transaction table (TXS_BASE) that will be used throughout the\nexample.", "%%sql -q\nDROP TABLE TXS_BASE;\nCREATE TABLE TXS_BASE\n (\n ID INTEGER NOT NULL,\n CUSTID INTEGER NOT NULL,\n TXTIME_UTC TIMESTAMP NOT NULL\n );", "The UTC functions will be written to take advantage of a local timezone variable called TIME_ZONE. This\nvariable will contain the timezone of the server (or user) that is running the transaction. In this \ncase we are using the timezone in Toronto, Canada.", "%%sql\nCREATE OR REPLACE VARIABLE TIME_ZONE VARCHAR(255) DEFAULT('America/Toronto');", "The SET Command can be used to update the TIME_ZONE to the current location we are in.", "%sql SET TIME_ZONE = 'America/Toronto'", "In order to retrieve the value of the current timezone, we take advantage of a simple user-defined function\ncalled GET_TIMEZONE. It just retrieves the contents of the current TIME_ZONE variable that we set up.", "%%sql\nCREATE OR REPLACE FUNCTION GET_TIMEZONE()\n RETURNS VARCHAR(255)\nLANGUAGE SQL CONTAINS SQL\n RETURN (TIME_ZONE)", "The TXS view is used by all SQL statements rather than the TXS_BASE table. The reason for this is to \ntake advantage of INSTEAD OF triggers that can manipulate the UTC without modifying the original SQL.\nNote that when the data is returned from the view that the TXTIME field is converted from UTC to the current\nTIMEZONE that we are in.", "%%sql \nCREATE OR REPLACE VIEW TXS AS\n (\n SELECT\n ID,\n CUSTID,\n FROM_UTC_TIMESTAMP(TXTIME_UTC,GET_TIMEZONE()) AS TXTIME\n FROM\n TXS_BASE\n )", "An INSTEAD OF trigger (INSERT, UPDATE, and DELETE) is created against the TXS view so that any insert or \nupdate on a TXTIME column will be converted back to the UTC value. From an application perspective, \nwe are using the local time, not the UTC time.", "%%sql -d\nCREATE OR REPLACE TRIGGER I_TXS\n INSTEAD OF INSERT ON TXS\n REFERENCING NEW AS NEW_TXS\n FOR EACH ROW MODE DB2SQL\nBEGIN ATOMIC\n INSERT INTO TXS_BASE VALUES (\n NEW_TXS.ID,\n NEW_TXS.CUSTID,\n TO_UTC_TIMESTAMP(NEW_TXS.TXTIME,GET_TIMEZONE())\n );\nEND\n@\n\nCREATE OR REPLACE TRIGGER U_TXS\n INSTEAD OF UPDATE ON TXS\n REFERENCING NEW AS NEW_TXS OLD AS OLD_TXS\n FOR EACH ROW MODE DB2SQL\nBEGIN ATOMIC\n UPDATE TXS_BASE \n SET (ID, CUSTID, TXTIME_UTC) = \n (NEW_TXS.ID,\n NEW_TXS.CUSTID,\n TO_UTC_TIMESTAMP(NEW_TXS.TXTIME,TIME_ZONE)\n )\n WHERE \n TXS_BASE.ID = OLD_TXS.ID\n ;\nEND\n@\n\nCREATE OR REPLACE TRIGGER D_TXS\n INSTEAD OF DELETE ON TXS\n REFERENCING OLD AS OLD_TXS\n FOR EACH ROW MODE DB2SQL\nBEGIN ATOMIC\n DELETE FROM TXS_BASE \n WHERE \n TXS_BASE.ID = OLD_TXS.ID\n ;\nEND\n@", "At this point in time(!) we can start inserting records into our table. We have already set the timezone\nto be Toronto, so the next insert statement will take the current time (NOW) and insert it into the table. \nFor reference, here is the current time.", "%sql VALUES NOW", "We will insert one record into the table and immediately retrieve the result.", "%%sql\nINSERT INTO TXS VALUES(1,1,NOW);\n\nSELECT * FROM TXS;", "Note that the timsstamp appears to be the same as what we insert (plus or minus a few seconds). What actually\nsits in the base table is the UTC time.", "%sql SELECT * FROM TXS_BASE", "We can modify the time that is returned to us by changing our local timezone. The statement will make \nthe system think we are in Vancouver.", "%sql SET TIME_ZONE = 'America/Vancouver'", "Retrieving the results will show that the timestamp has shifted by 3 hours (Vancouver is 3 hours behind\nToronto).", "%sql SELECT * FROM TXS", "So what happens if we insert a record into the table now that we are in Vancouver?", "%%sql\nINSERT INTO TXS VALUES(2,2,NOW);\nSELECT * FROM TXS;", "The data retrieved reflects the fact that we are now in Vancouver from an application perspective. Looking at the\nbase table and you will see that everything has been converted to UTC time.", "%sql SELECT * FROM TXS_BASE", "Finally, we can switch back to Toronto time and see when the transactions were done. You will see that from a\nToronto perspetive tht the transactions were done three hours later because of the timezone differences.", "%%sql\nSET TIME_ZONE = 'America/Toronto';\nSELECT * FROM TXS;", "Back to Top\nCredits: IBM 2018, George Baklarz [baklarz@ca.ibm.com]" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
PMEAL/OpenPNM
examples/contrib/maze_solver.ipynb
mit
[ "Maze Solver\nIn this notebook, we write a maze solver by solving the Poisson equation with two Dirichlet boundary conditions imposed on the two faces that correspond to the start and end of the maze, respectively. \nThe logic is pretty simple: once we have the solution, we just need to start off from one face and follow the gradient. Since the gradient in the deadends is almost close to 0, following the nonzero gradient should guide us toward the other side of the maze.\nWe implement two different approaches:\n\n\nDirect numerical simulation\nHere, we first convert the image into a Cubic network, trim the pores that correspond to the walls, and finally run a basic OhmicConduction (or FickianDiffusion) on the resulting trimmed network.\n\n\nNetwork extraction\nHere, we first use the SNOW algorithm to extract the equivalent network of the maze. Note that the nodes in the equivalent network will not exactly give us the corners of the maze, but at least it gives us a rough idea, enough for solving the maze! Then, like the first approach, we run a basic OhmicConduction on the extracted network. The advantage of this approach is that it's way faster due to much fewer unknowns.\n\n\nNote: Inspired by this post by Jeremy Theler https://www.linkedin.com/posts/jeremytheler_how-to-solve-a-maze-without-ai-use-laplaces-activity-6831291311832760320-x9d5", "# Install the required pmeal packages in the current Jupyter kernel\nimport sys\ntry:\n import openpnm as op\nexcept:\n !{sys.executable} -m pip install openpnm\n import openpnm as op\ntry:\n import porespy as ps\nexcept:\n !{sys.executable} -m pip install porespy\n import porespy as ps\n\nimport requests\nimport numpy as np\nfrom scipy import ndimage\nimport matplotlib.pyplot as plt\nimport porespy as ps\nimport openpnm as op\nfrom openpnm.utils import tic, toc\nfrom PIL import Image\nfrom io import BytesIO\n%config InlineBackend.figure_formats = ['svg']\nws = op.Workspace()\nws.settings[\"loglevel\"] = 60", "Load maze samples", "im_size = 'medium'\n\nif im_size == 'small':\n url = 'https://imgur.com/ZLbV4eh.png'\nelif im_size == 'medium':\n url = 'https://imgur.com/A3Jx8SJ.png'\nelse:\n url = 'https://imgur.com/FLJ21e5.png'\n\nresponse = requests.get(url)\nimg = Image.open(BytesIO(response.content))\nim = np.array(img.getdata()).reshape(img.size[0], img.size[1], 4)[:, :, 0]\nim = im == 255\n\nNx, Ny, = im.shape\n\nfig, ax = plt.subplots(figsize=(5, 5))\nax.imshow(im, cmap='Blues', interpolation=\"none\")\nax.axis(\"off\");", "Approach A: Direct numerical simulation\nThicken the walls to reduce number of unknowns", "# Structuring element for thickening walls\nstrel = np.array([[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]])\n\n# Save some computation by thickening the walls\ndef thicken_wall(im):\n return ~ndimage.morphology.binary_dilation(~im, structure=strel)\n\nfor _ in range(5):\n im = thicken_wall(im)\n\nfig, ax = plt.subplots(figsize=(5, 5))\nax.imshow(im, cmap='Blues', interpolation=\"none\")\nax.axis(\"off\")", "Convert the maze into a Cubic network", "# Get top and bottom boundaries\nBP_top = np.zeros_like(im)\nBP_bot = np.zeros_like(im)\nBP_top[0, :] = True\nBP_bot[-1, :] = True\nBP_top *= im\nBP_bot *= im\n\n# Make a cubis network with same dimensions as image and assign the props\nnet = op.network.Cubic(shape=[Nx, Ny, 1])\nnet['pore.index'] = np.arange(0, net.Np)\nnet['pore.BP_top'] = BP_top.flatten()\nnet['pore.BP_bot'] = BP_bot.flatten()\n\n# Trim wall pores\nop.topotools.trim(network=net, pores=~im.flatten())", "Solve the Poisson equation ($\\nabla^2 \\phi = 0$) on the maze", "# Set up a dummy phase and apply uniform arbitrary conductance\nphase = op.phases.GenericPhase(network=net)\nphase['throat.electrical_conductance'] = 1.0\n\n# Run algorithm\nalg = op.algorithms.OhmicConduction(network=net, phase=phase)\nalg.set_value_BC(pores=net.pores('BP_top'), values=0.0)\nalg.set_value_BC(pores=net.pores('BP_bot'), values=1.0)\ntic()\nalg.run()\ndt = toc(quiet=True);\nprint(f'Solve time: {dt:.3f} s')", "Follow the gradient!", "# Calculate flux in throats and show in pores\n# Note: No need to calculate pore.rate as it auto interpolates from throat values\nphase['throat.rate'] = alg.rate(throats=net.Ts, mode='single')\nrate_im = np.ones([Nx, Ny]).flatten() * np.nan\nrate_im[net['pore.index']] = phase['pore.rate']\nrate_im = rate_im.reshape([Nx, Ny])\n\n# Plot the maze solution\nfig, ax = plt.subplots(figsize=(5, 5))\nax.imshow(rate_im, cmap='jet', interpolation=\"none\")\nax.axis(\"off\");", "Approach B: Network extraction\nNetwork extraction using SNOW algorithm", "# We need to pass image transpose since matrix xy coords is inverted\n# i.e., row is x and col is y, whereas in Cartesian convention, it's the opposite.\nout = ps.networks.snow2(im.T) \nproj = op.io.PoreSpy.import_data(out.network)\nnet = proj.network", "Solve the Poisson equation ($\\nabla^2 \\phi = 0$) on the extracted network", "# Set up a dummy phase and apply uniform arbitrary conductance\nphase = op.phases.GenericPhase(network=net)\nphase['throat.electrical_conductance'] = 1.0\n\n# Run algorithm\nalg = op.algorithms.OhmicConduction(network=net, phase=phase)\nalg.set_value_BC(pores=net.pores('ymin'), values=0.0)\nalg.set_value_BC(pores=net.pores('ymax'), values=1.0)\ntic()\nalg.run()\ndt = toc(quiet=True);\nprint(f'Solve time: {dt:.3f} s')", "Follow the gradient!", "# Get throat rate values\nphase['throat.rate'] = alg.rate(throats=net.Ts, mode='single')\n\n# Plot the maze solution (i.e., throat rates!)\nfig, ax = plt.subplots(figsize=(5, 5))\nop.topotools.plot_connections(net, ax=ax,\n color_by=phase[\"throat.rate\"],\n linewidth=2, cmap=\"Wistia\")\nax.imshow(im, interpolation=\"none\", cmap='Blues');\nax.axis(\"off\");" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
zommiommy/Norm-Approximation
Norm Approximation.ipynb
mit
[ "Norm approximation on restricted quantized domain\nFast approximation of the norm over the value of a 10bit unsigned int \nusing batch gradient descent to minimize the squared error\nApproximation formula:\n$$\\gamma (x+y) \\approx \\sqrt{x^2+y^2} \\ x,y \\in [0,1023] \\subseteq \\mathbb{N}$$\nwhere $$\\gamma = 0.7531854654594905$$\nimport needed modules", "#numerical library\nimport numpy as np\n#plot library\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\nfrom pprint import pprint", "inizialization of the variable", "x,y = np.indices([1024,1024])", "Guessing the speed gain if this work", "%timeit (x**2+y**2)**0.5\n\n%timeit 0.7531854654594905*(x+y)", "So if this approximation work we can stimate the norm over x7 time faster\n\nhyper parameters initialization", "epoch_of_training = 1000\nlearning_rate = 1e-8\ngamma = 1", "Initial Error\nError formula:\n$$E = \\frac{1}{2}\\sum_{1}^{n} (\\gamma (x+y)-\\sqrt{x^2+y^2})^2$$", "init_sq_err = np.sum(0.5*((gamma*(x+y) - (x**2+y**2)**0.5)**2))\ninit_sq_err", "the partial derivative of the error\nPartial derivative formula:\n$$\\frac{\\partial }{\\partial \\gamma}E =\\sum_{1}^{n} (\\gamma (x+y)-\\sqrt{x^2+y^2})(x+y)$$\nThe stocastic grdient descent\n$$\\gamma^{i} = \\gamma^{i-1} - \\eta \\frac{\\partial }{\\partial \\gamma}E$$", "for i in range(epoch_of_training):\n gamma -= learning_rate * np.mean((gamma*(x+y)-(x**2+y**2)**0.5)*(x+y))", "Results", "gamma\n\nfin_sq_err = np.sum(0.5*((gamma*(x+y) - (x**2+y**2)**0.5)**2))\nfin_sq_err\n\ndelta_sq_err = init_sq_err - fin_sq_err\ndelta_sq_err\n\nError = abs(gamma*(x+y) - (x**2+y**2)**0.5)\nprint(np.max(Error))\nprint(np.mean(Error))\nprint(np.min(Error))", "Percentual Error Plot", "fig = plt.figure(figsize=[12,12])\nax = fig.gca(projection='3d')\n\nX = np.arange(1, 1024, 8)\nY = np.arange(1, 1024, 8)\nX, Y = np.meshgrid(X, Y)\nZ = np.sqrt(X**2 + Y**2)\nF = abs(gamma*(X+Y)-Z)/(Z)\n\nsurf = ax.plot_surface(X, Y, F, rstride=2, cstride=2, cmap=cm.jet,linewidth=1)\n\nax.set_xlabel('X')\nax.set_xlim(-10, 1034)\nax.set_ylabel('Y')\nax.set_ylim(-10, 1034)\nax.set_zlabel('Z')\nax.set_zlim(0, 0.30)\n\nax.invert_yaxis()\n\nax.zaxis.set_major_locator(LinearLocator(10))\nax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n\nfig.colorbar(surf)\n\nplt.show()\n", "Study of the error\nAlong the axis the error is :\nes $x = 0$\n$$Error = |\\gamma - 1| y$$\nwitch is where there is the biggest relative error\nAlong the bisecting rect $y = x$ the error is:\n$$Error = | 2\\gamma - \\sqrt{2} |y$$\n0 Error points:\n$$\\gamma (x+y) = \\sqrt{x^2+y^2}$$\n$$\\gamma^2 (x+y)^2 = x^2+y^2$$\n$$(\\gamma^2-1)x^2 + (\\gamma^2-1)y^2 + 2\\gamma^2 xy = 0$$\nif $\\gamma^2 = 1$ then $xy = 0$ so the equality hold on the axis\nelse $x^2 + y^2 + 2\\frac{\\gamma^2}{(\\gamma^2-1)} xy = 0$ is a conic\nthe charateristic martixs are :\n$$B = \\begin{pmatrix}1 & \\frac{\\gamma^2}{\\gamma^2 -1}\\ \\frac{\\gamma^2}{\\gamma^2 -1} & 1\\end{pmatrix}\\A = \\begin{pmatrix} 0 & 0 & 0 \\\n0 &\n1\n & \\frac{\\gamma^2}{\\gamma^2 -1}\\ \n0 &\n\\frac{\\gamma^2}{\\gamma^2 -1} & \n1\n\\end{pmatrix}$$\n$det(A) = 0$ so it's a degenerate conic\n$det(B) = 1 - (\\frac{\\gamma^2}{\\gamma^2-1})^2 = \\frac{1-2\\gamma^2}{(\\gamma^2 -1)^2}$\n$det(B) > 0 \\Rightarrow 1-2\\gamma^2 > 0 \\Rightarrow \\left | \\gamma \\right | < \\frac{1}{\\sqrt{2}}$\nso if $\\left | \\gamma \\right | > \\frac{1}{\\sqrt{2}} \\approx 0.707$ the conic is two real distint rect\nelse the conic is two immaginiary coniugate rect\nso if they ar real the equation of the rect are:\n$$x_{1,2} = -y\\frac{\\gamma^2}{\\gamma^2-1}\\pm \\sqrt{y^2\\frac{\\gamma^4}{(\\gamma^2-1)^2} - 1}$$" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
serge-sans-paille/talks
pyconfr2015/PythranFR 2015.ipynb
mit
[ "We don't need your loop\nby serge_sans_paille\nQuarkslab / Télécom Bretagne / Namek\nPyConFR 2015\nUne brève histoire de la boucle\nAssembleur\n.L3:\n addsd (%rsi), %xmm0\n addq $8, %rsi\n cmpq %rax, %rsi\n jne .L3\nFORTRAN\nfortran\n do 10 i = 1, n\n sum = sum + A(i)\n 10 continue\nC\nc\nfor(unsigned i = 0; i &lt; n ; ++i)\n sum += A[i];\nAPL\napl\n+/ A\nPython", "import numpy\nn = 10\nA = numpy.random.random(n)\nprint(A)", "Fortran style", "s = 0\nfor i in range(n):\n s += A[i]\nprint(s)", "APL style", "s = numpy.sum(A)\nprint(s)", "Une préférence ?\nQuestion de performances\nBoucles explicites", "n = 1000000\nA = numpy.random.random(n)\n\ndef explicit_sum(seq):\n s = 0\n for elem in seq:\n s += elem ** 2\n return s\n\n%timeit explicit_sum(A)\n\n%timeit numpy.sum(A**2)\n\n### Cython\n\n%load_ext Cython\n\n%%cython\ncimport numpy as np\ncimport cython\n\n@cython.boundscheck(False)\n@cython.wraparound(False)\ndef cython_sum(np.ndarray[double, ndim=1] A):\n cdef double s = 0\n cdef int i, n\n n = len(A)\n for i in range(n):\n s += A[i] * A[i]\n return s\n\n%timeit cython_sum(A)\n\n%%file pythran_sum.py\n\n#pythran export pythran_sum(float64[])\n\nimport numpy\ndef pythran_sum(A):\n return numpy.sum(A**2)\n\n!python -m pythran.run pythran_sum.py -DUSE_BOOST_SIMD -O3 -march=native\n\n!python -m timeit -s 'from pythran_sum import pythran_sum; import numpy; n = 1000000 ; A = numpy.random.random(n)' 'pythran_sum(A)'", "We don't need your loops!\n\nUn appel de fonction a plus de sens qu'une boucle pour un humain\nUn appel de fonction a plus de sens qu'une boucle pour un compilateur\n\nExemples de boucles amenées à disparaitre", "n, m = 100, 200\nA = numpy.random.random((n,m))\n\ns = 0.\nfor i in range(1, n-1):\n for j in range(1, m-1):\n s += A[i,j]\nprint(s)\n\nprint(numpy.sum(A[1:-1, 1:-1]))\n\ns = 0\nfor i in range(n):\n for j in range(m):\n if A[i,j] < .5:\n s += 1\nprint(s)\n\nprint(numpy.sum(A < .5))\n\ns = 0\nB = numpy.empty_like(A)\nfor i in range(n):\n for j in range(m):\n if A[i,j] < .5:\n B[i,j] = A[i,j]\n else:\n B[i,j] = 0.\nprint(B)\n\nprint(numpy.where(A<.5,A,0.))\n\n### Piège!\n\nn = 100\nB = numpy.arange(100)\nshift = 3\nfor i in range(shift, n):\n B[i] = 1 + B[i - shift]\nprint(B)\n\nB = numpy.arange(100)\nB[shift:] = 1 + B[:-shift]\nprint(B)", "Les scientifiquent codent en Numpy de haut niveau !", "%%file grayscott.py\n#pythran export GrayScott(int, float, float, float, float)\n\nimport numpy as np\ndef GrayScott(counts, Du, Dv, F, k):\n n = 300\n U = np.zeros((n+2,n+2), dtype=np.float32)\n V = np.zeros((n+2,n+2), dtype=np.float32)\n u, v = U[1:-1,1:-1], V[1:-1,1:-1]\n\n r = 20\n u[:] = 1.0\n U[n/2-r:n/2+r,n/2-r:n/2+r] = 0.50\n V[n/2-r:n/2+r,n/2-r:n/2+r] = 0.25\n\n u += 0.15*np.random.random((n,n))\n v += 0.15*np.random.random((n,n))\n\n for i in range(counts):\n Lu = ( U[0:-2,1:-1] +\n U[1:-1,0:-2] - 4*U[1:-1,1:-1] + U[1:-1,2:] +\n U[2: ,1:-1] )\n Lv = ( V[0:-2,1:-1] +\n V[1:-1,0:-2] - 4*V[1:-1,1:-1] + V[1:-1,2:] +\n V[2: ,1:-1] )\n uvv = u*v*v\n u += Du*Lu - uvv + F*(1 - u)\n v += Dv*Lv + uvv - (F + k)*v\n\n return V\n\n\nfrom grayscott import GrayScott\n\n%timeit GrayScott(40, 0.16, 0.08, 0.04, 0.06)\n\n%%cython\ncimport cython\nimport numpy as np\ncimport numpy as np\n\ncpdef CythonGrayScott(int counts, double Du, double Dv, double F, double k):\n cdef int n = 300\n cdef np.ndarray U = np.zeros((n+2,n+2), dtype=np.float_)\n cdef np.ndarray V = np.zeros((n+2,n+2), dtype=np.float_)\n cdef np.ndarray u = U[1:-1,1:-1]\n cdef np.ndarray v = V[1:-1,1:-1]\n\n cdef int r = 20\n u[:] = 1.0\n U[n/2-r:n/2+r,n/2-r:n/2+r] = 0.50\n V[n/2-r:n/2+r,n/2-r:n/2+r] = 0.25\n u += 0.15*np.random.random((n,n))\n v += 0.15*np.random.random((n,n))\n\n cdef np.ndarray Lu = np.zeros_like(u)\n cdef np.ndarray Lv = np.zeros_like(v)\n cdef int i, c, r1, c1, r2, c2\n cdef double uvv\n\n cdef double[:, ::1] bU = U\n cdef double[:, ::1] bV = V\n cdef double[:, ::1] bLu = Lu\n cdef double[:, ::1] bLv = Lv\n\n for i in range(counts):\n for r in range(n):\n r1 = r + 1\n r2 = r + 2\n for c in range(n):\n c1 = c + 1\n c2 = c + 2\n bLu[r,c] = bU[r1,c2] + bU[r1,c] + bU[r2,c1] + bU[r,c1] - 4*bU[r1,c1]\n bLv[r,c] = bV[r1,c2] + bV[r1,c] + bV[r2,c1] + bV[r,c1] - 4*bV[r1,c1]\n\n for r in range(n):\n r1 = r + 1\n for c in range(n):\n c1 = c + 1\n uvv = bU[r1,c1]*bV[r1,c1]*bV[r1,c1]\n bU[r1,c1] += Du*bLu[r,c] - uvv + F*(1 - bU[r1,c1])\n bV[r1,c1] += Dv*bLv[r,c] + uvv - (F + k)*bV[r1,c1]\n return V\n\n\n%timeit GrayScott(40, 0.16, 0.08, 0.04, 0.06)\n%timeit CythonGrayScott(40, 0.16, 0.08, 0.04, 0.06)\n\n!python -m pythran.run -O3 -march=native grayscott.py -o pythran_grayscott.so\n\n! python -m timeit -s 'from pythran_grayscott import GrayScott' 'GrayScott(40, 0.16, 0.08, 0.04, 0.06)'", "Optimisations faites par Pythran\n\nFusion de boucle (expression template + forward subsitution)\nPropagation de constantes interprocédurales\nÉlimination de code mort\nÉvaluation paresseuse\nsubstitution de motifs\nDéroulage de boucles étendues\nVectorisation (génération de code SIMD)\n\nFonctionnalités supportées par Pythran\n\nPython2.7\nException\nList/set/dict comprehension\nGénérateurs, generator expression\nRécursion\nlambda, fonctions imbriquées, fermetures\ntype destructuring\n\nMais pas\n\nPython3 (plus compliqué qu'il n'y parait!)\nClasses utilisateurs\nCode implicitement non statiquement typé\nGestionnaires with \n\nPaquets, modules et fonctions supportés\n\n__builtin__, math, cmath\nbisect, functools (juste partial en fait), itertools\noperator, random, string, time\nnumpy (dont numpy.random mais pas numpy.linalg)\n\nSéduits ? Pas convaincus ? Essayez le !\n\npip install pythran\nhttp://pythonhosted.org/pythran/" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
dsacademybr/PythonFundamentos
Cap02/Notebooks/DSA-Python-Cap02-05-Dicionarios.ipynb
gpl-3.0
[ "<font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 2</font>\nDownload: http://github.com/dsacademybr", "# Versão da Linguagem Python\nfrom platform import python_version\nprint('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())", "Dicionários", "# Isso é uma lista\nestudantes_lst = [\"Mateus\", 24, \"Fernanda\", 22, \"Tamires\", 26, \"Cristiano\", 25] \n\nestudantes_lst\n\n# Isso é um dicionário\nestudantes_dict = {\"Mateus\":24, \"Fernanda\":22, \"Tamires\":26, \"Cristiano\":25}\n\nestudantes_dict \n\nestudantes_dict[\"Mateus\"]\n\nestudantes_dict[\"Pedro\"] = 23\n\nestudantes_dict[\"Pedro\"]\n\nestudantes_dict[\"Tamires\"]\n\nestudantes_dict.clear()\n\nestudantes_dict\n\ndel estudantes_dict\n\nestudantes_dict\n\nestudantes = {\"Mateus\":24, \"Fernanda\":22, \"Tamires\":26, \"Cristiano\":25}\n\nestudantes\n\nlen(estudantes)\n\nestudantes.keys()\n\nestudantes.values()\n\nestudantes.items()\n\nestudantes2 = {\"Maria\":27, \"Erika\":28, \"Milton\":26}\n\nestudantes2\n\nestudantes.update(estudantes2)\n\nestudantes\n\ndic1 = {}\n\ndic1\n\ndic1[\"key_one\"] = 2\n\nprint(dic1)\n\ndic1[10] = 5\n\ndic1\n\ndic1[8.2] = \"Python\"\n\ndic1\n\ndic1[\"teste\"] = 5\n\ndic1\n\ndict1 = {}\n\ndict1\n\ndict1[\"teste\"] = 10\n\ndict1[\"key\"] = \"teste\"\n\n# Atenção, pois chave e valor podem ser iguais, mas representam coisas diferentes.\ndict1\n\ndict2 = {}\n\ndict2[\"key1\"] = \"Big Data\"\n\ndict2[\"key2\"] = 10\n\ndict2[\"key3\"] = 5.6\n\ndict2\n\na = dict2[\"key1\"]\n\nb = dict2[\"key2\"]\n\nc = dict2[\"key3\"]\n\na, b, c\n\n# Dicionário de listas\ndict3 = {'key1':1230,'key2':[22,453,73.4],'key3':['leite','maça','batata']}\n\ndict3\n\ndict3['key2']\n\n# Acessando um item da lista, dentro do dicionário\ndict3['key3'][0].upper()\n\n# Operações com itens da lista, dentro do dicionário\nvar1 = dict3['key2'][0] - 2\n\nvar1\n\n# Duas operações no mesmo comando, para atualizar um item dentro da lista\ndict3['key2'][0] -= 2\n\ndict3", "Criando dicionários aninhados", "# Criando dicionários aninhados\ndict_aninhado = {'key1':{'key2_aninhada':{'key3_aninhada':'Dict aninhado em Python'}}}\n\ndict_aninhado\n\ndict_aninhado['key1']['key2_aninhada']['key3_aninhada']", "Fim\nObrigado\nVisite o Blog da Data Science Academy - <a href=\"http://blog.dsacademy.com.br\">Blog DSA</a>" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
PyladiesMx/Empezando-con-Python
Pandas Introduction/.ipynb_checkpoints/Untitled-checkpoint.ipynb
mit
[ "Introducción a pandas, una librería para analizar datos\nA petición de la comunidad, vamos a empezar a ver cómo podemos usar herramientas para analizar datos!!!\nPandas es una librería muy usada en \"data science\" ya que te permite cargar y operar rápidamente en python. En lo que se enfoca es en procesar arreglos de datos que tengan la forma n x m en donde \"n\" sean el número de filas (rows) y \"m\" el número de columnas. Cada columna puede contener más de un tipo de datos (no es como en numpy que todo tiene que ser lo mismo).\nAhora sin más preámbulos empecemos a usar pandas!!\nLo primero que hay que hacer es importar la librería. Para ello necesitamos que esté instalada. Todas las personas que tienen anaconda no deben de preocuparse por instalar ninguna otra cosa. El resto debe de hacerlo de acuerdo a como instaló python en un principio.", "import pandas as pd\n#Este comando lo que hace básicamente es decir que quiere accedeer a todas las clases\n#que la librería pandas tiene\nimport numpy as np", "Ahora que ya tenemos pandas podemos empezar a explorar que cosas podemos hacer con esta librería.\nEn pandas hay dos tipos nuevos de estructura de datos. Pregunta rápida, recuerdas los otros tipos de datos que hemos visto??\nDe regreso al punto :P los nuevos tipos de datos se separan en dos grupos: las series y los \"data frames\".\nLas series son el elemento más simple en pandas ya que es una arreglo de n x 1 es decir n número de filas con una sola columna. Veamos un ejemplo:", "lista = ['hola', 'mi', 'nombre', 'es', 'pandas',0.18 ]\nserie = pd.Series(lista)\nprint(lista)\nprint(serie)", "Pregunta 1\n¿Qué diferencias notas entre la lista y la serie?\nAjá!! Exactemente!! esos pequeños numerillos a la izquierda, ¿Qué te imaginas que son?\nPues esos pequeños se llaman índices y son un concepto muy útil que hay que entender porque pandas se maneja principalmente con índices. Ahora veamos como podemos nombrar como queramos a esos índices.", "serie2 = pd.Series(lista, index=['H', 'm', 'n', 'e', 'p', '0'])\nserie2", "Te recuerda esto a alguna estructura de datos que ya hemos visto?\nejem, ejem, dic..., ejem, ejem...\nPues si se podría pensar en una serie como una especie de diccionarios, de hecho para crear una serie también puedes partir de un diccionario...", "diccionario = {'Ciudad de México':'Tacos al pastor', 'Puebla':'Chile en nogada', 'Yucatán':'Cochinita'}\n\nserie_dic = pd.Series(diccionario)\nserie_dic\n\nserie_dic.loc['Puebla':'Yucatán']\n\nlista2 = [1,1,1,1,1,1]", "Pregunta 2\n¿Qué crees que pase si queremos hacer una serie con la lista 1 pero que tenga como índices la lista 2?", "ejemplo=pd.Series(lista, lista2)\n\nejemplo[1]", "Como pudiste darte cuenta, en realidad las series sí se parecen a los diccionarios pero no son exactamente lo mismo... Si quieres saber de qué otras maneras puedes crear series, puedes visitar la página de documentación oficial, está super completa!!\nAhora que ya conoces la materia prima de pandas podemos empezar a trabajar con data frames que si lo piensas, serían como un montón de series amarradas entre sí...\nPara crear un data frame hay varias formas, puedes escribirlo todo de la misma forma que hicimos con las series pero también puedes importar una set de datos con el que quieras trabajar.\nVeremos rápidamente como podemos crear un data frame y luego como podemos cargar uno que ya tengamos.", "#Crear un data frame de un arreglo de numpy\narreglo = np.random.randn(7,3)\nprint(arreglo)\ncolumnas = list('ABC')\ndata_frame = pd.DataFrame(arreglo, columns=columnas )\n\ndata_frame", "Si quieres saber mas detalles de lo que la función random.randn de numpy hace, puedes checar esta página\nAsí de simple se puede generar un data frame, pero también podemos hacerlo con diccionarios...", "df_diccionario = pd.DataFrame({'A':list(range(5)),\n 'B': np.random.random_sample(5),\n 'C':'Jueves'})\n\ndf_diccionario", "Así de simple es crear un data frame. Ahora veamos lo que todos estaban esperando!! Crear data frames a partir de datos que esten en hojas de excel o en csv o txt. Que justamente son los que se generan en los experimentos que algunas de ustedes lleguen a hacer o que otras personas hagan para que ustedes los analicen.\nVamos a ver un ejemplo con un set de datos pequeño.", "mi_df = pd.read_csv('FreeFattyAcids.csv', header=0)\nmi_df", "Este set de datos es de un perfil lipídico obtenido con espectrometría de masas. Son tres condiciones, una control y dos tratamientos, para cada una de ellas se puede ver la concentración de ácidos grasos de cadenas de 12 hasta 26 carbonos. \nPor suerte este data frame es pequeño y podemos visualizar casi todo, pero imaginemos que tenemos 1000000 de columnas, en realidad no necesitamos verlo todo, pero queremos saber más o menos como está organizado nuestro data frame. Para esto podemos acceder a un método de mi_df para ver nada más la \"cabeza\" (o la \"cola\") de nuestros datos.", "mi_df.head()\n\n#Por default nos va a mostras las primeras 5 filas después del encabezado,\n#pero podemos cambiarlo pasando un argumento a head()\nmi_df.head(3)\n\n#Para ver la \"cola\"\nmi_df.tail(3)", "Pero una vez que tenemos los datos ¿qué hacemos con ellos? Pues bien en realidad eso depende mucho de las preguntas que queramos responder empecemos con unas preguntas fáciles.\nPregunta 3\nDigamos que a mí sólo me interesa saber cómo cambia el total, ¿Cómo puedo hacer que en un data frame sólo me muestre la columna \"Sample\" y la columna \"Total\"?\nPara responder esta cuestión, necesitamos saber como hacer \"slicing\" de los data frames, esto es, sacar subgrupos de un data frame para analizarlo más fácilmente. Alguien tiene alguna idea que podamos probar??", "mi_df[0:5]", "En pandas una de las cosas más confusas (especialmente si tienes tiempo trabajando con numpy) es sacar subgrupos de datos. La forma en la que pandas está diseñado para esto es a través de índices y encabezados.", "mi_df[\"Sample\"]\n\n#o algo padre que también es posible en pandas es acceder a columnas con puntos\nmi_df.Sample", "Pero si queremos seleccionar múltiples columnas separadas podemos hacer lo siguiente", "mi_df[[\"Sample\",\"16:0\"]][0:4].plot()\n\nimport matplotlib.pyplot as plt\n\nmi_df[0:3]", "Selección por índice\nPara seleccionas sólo por índice tenemos que agregar la terminación .loc después del nombre de nuestro data frame y luego poner los índices que queramos con los nombres de las columnas que nos interesen.\nEsta sección es de la documentación de pandas y dice lo siguiente acerca del atributo .loc.\n\nEste atributos es el método primario de acceso (a los datos). Los siguientes son inputs válidos:\n1. Una etiqueta (que es estrictamente de los índices)\n2. Una lista o arreglo de etiquetas\n3. Un slice de etiquetas (nota, al contrario de lo que se hace en python, el número inicial y final están incluídos!)\n4. Un arreglo booleano\n5. Un \"callable\"", "mi_df.loc[0:4,'Sample']\n\n#¿qué pasa si sólo pongo el nombre de la columna?\nmi_df.loc['Sample']\n\nmi_df.loc[:, 'Sample']\n\nmi_df.loc[mi_df.Sample=='C']#Esto es un slicing dado un arreglo booleano", "Selección por posición.\nEs la más parecida a como opera numpy, y aquí sólo tienes que darle las posiciones que quieres (en números) de filas y columnas. Para esto usamos el atributo .iloc después de nuestro data frame.\nAquí los argumentos válidos son los siguientes:\n\ninteger e.g. 5\nLista o arreglo de enteros [4, 3, 0]\nUn objeto slice con ints 1:7\nUn arreglo booleano\nUn \"callable\"", "mi_df.iloc[0:3, -1]", "Ahora si hagamos unos ejercicios pa' que amarre\nEjercicio 1\nHas un df que tenga sólo las columnas Sample y Total\nEjercicio 2\nMuestrame que Sample tiene el mayor número de ácidos grasos de cadena 18:0\nEjercicio 3\nCuáles son los tres samples con mayor número de ácidos grasos libres\nPues esto es todo por hoy, espero que no haya sido tan complicado, podemos seguir aprendiendo de pandas si es que hay interes en el grupo y nos vemos en la siguiente reunión.\nGracias por asistir!!!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
rcrehuet/Python_for_Scientists_2017
notebooks/2_1_Looping_over_lists.ipynb
gpl-3.0
[ "Looping over lists\nIterating over lists can be confusing. You can mistake looping over the list elements for list items. This simple notbooks aims at pointing the differences.\nLet there be the following list:", "week = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']", "The following for structure cycles over the elements of the list:", "for weekday in week:\n print(\"Today is \",weekday)", "Alternatively we loop over the indices of the list:", "for i in range(len(week)):\n print(\"This is the value of the index, \", i)\n weekday = week[i] #once we have the index we can obtain the corresponding list element\n print(\"Today is \",weekday)", "Third possibility, use enumerate to generate a sequence with pairs of indices and elements:", "for i, weekday in enumerate(week):\n print(\"This is the index \",i)\n print(\"This is the element \",weekday)", "Iterating over elements is more general than iterating based on indices. The reason is that several collections do not have a predefined order and cannot be accessed via indices. As a simple example, if we convert the previous list into a set, we can still iterate over its elements, but we cannot loop over the indices.", "week_set = set(week)\n\nfor weekday in week_set: #Remark that order is arbitrary!\n print(\"Today is \",weekday)\n\n#Try this and read the error:\n\nfor i in range(len(week_set)):\n print(\"This is the value of the index, \", i)\n weekday = week_set[i] \n print(\"Today is \",weekday)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tanmay987/deepLearning
intro-to-tflearn/TFLearn_Digit_Recognition_Solution.ipynb
mit
[ "Handwritten Number Recognition with TFLearn and MNIST\nIn this notebook, we'll be building a neural network that recognizes handwritten numbers 0-9. \nThis kind of neural network is used in a variety of real-world applications including: recognizing phone numbers and sorting postal mail by address. To build the network, we'll be using the MNIST data set, which consists of images of handwritten numbers and their correct labels 0-9.\nWe'll be using TFLearn, a high-level library built on top of TensorFlow to build the neural network. We'll start off by importing all the modules we'll need, then load the data, and finally build the network.", "# Import Numpy, TensorFlow, TFLearn, and MNIST data\nimport numpy as np\nimport tensorflow as tf\nimport tflearn\nimport tflearn.datasets.mnist as mnist", "Retrieving training and test data\nThe MNIST data set already contains both training and test data. There are 55,000 data points of training data, and 10,000 points of test data.\nEach MNIST data point has:\n1. an image of a handwritten digit and \n2. a corresponding label (a number 0-9 that identifies the image)\nWe'll call the images, which will be the input to our neural network, X and their corresponding labels Y.\nWe're going to want our labels as one-hot vectors, which are vectors that holds mostly 0's and one 1. It's easiest to see this in a example. As a one-hot vector, the number 0 is represented as [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], and 4 is represented as [0, 0, 0, 0, 1, 0, 0, 0, 0, 0].\nFlattened data\nFor this example, we'll be using flattened data or a representation of MNIST images in one dimension rather than two. So, each handwritten number image, which is 28x28 pixels, will be represented as a one dimensional array of 784 pixel values. \nFlattening the data throws away information about the 2D structure of the image, but it simplifies our data so that all of the training data can be contained in one array whose shape is [55000, 784]; the first dimension is the number of training images and the second dimension is the number of pixels in each image. This is the kind of data that is easy to analyze using a simple neural network.", "# Retrieve the training and test data\ntrainX, trainY, testX, testY = mnist.load_data(one_hot=True)", "Visualize the training data\nProvided below is a function that will help you visualize the MNIST data. By passing in the index of a training example, the function show_digit will display that training image along with it's corresponding label in the title.", "# Visualizing the data\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# Function for displaying a training image by it's index in the MNIST set\ndef display_digit(index):\n label = trainY[index].argmax(axis=0)\n # Reshape 784 array into 28x28 image\n image = trainX[index].reshape([28,28])\n plt.title('Training data, index: %d, Label: %d' % (index, label))\n plt.imshow(image, cmap='gray_r')\n plt.show()\n \n# Display the first (index 0) training image\ndisplay_digit(0)", "Building the network\nTFLearn lets you build the network by defining the layers in that network. \nFor this example, you'll define:\n\nThe input layer, which tells the network the number of inputs it should expect for each piece of MNIST data. \nHidden layers, which recognize patterns in data and connect the input to the output layer, and\nThe output layer, which defines how the network learns and outputs a label for a given image.\n\nLet's start with the input layer; to define the input layer, you'll define the type of data that the network expects. For example,\nnet = tflearn.input_data([None, 100])\nwould create a network with 100 inputs. The number of inputs to your network needs to match the size of your data. For this example, we're using 784 element long vectors to encode our input data, so we need 784 input units.\nAdding layers\nTo add new hidden layers, you use \nnet = tflearn.fully_connected(net, n_units, activation='ReLU')\nThis adds a fully connected layer where every unit (or node) in the previous layer is connected to every unit in this layer. The first argument net is the network you created in the tflearn.input_data call, it designates the input to the hidden layer. You can set the number of units in the layer with n_units, and set the activation function with the activation keyword. You can keep adding layers to your network by repeated calling tflearn.fully_connected(net, n_units). \nThen, to set how you train the network, use:\nnet = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')\nAgain, this is passing in the network you've been building. The keywords: \n\noptimizer sets the training method, here stochastic gradient descent\nlearning_rate is the learning rate\nloss determines how the network error is calculated. In this example, with categorical cross-entropy.\n\nFinally, you put all this together to create the model with tflearn.DNN(net).", "# Define the neural network\ndef build_model():\n # This resets all parameters and variables, leave this here\n tf.reset_default_graph()\n \n # Inputs\n net = tflearn.input_data([None, trainX.shape[1]])\n\n # Hidden layer(s)\n net = tflearn.fully_connected(net, 128, activation='ReLU')\n net = tflearn.fully_connected(net, 32, activation='ReLU')\n \n # Output layer and training model\n net = tflearn.fully_connected(net, 10, activation='softmax')\n net = tflearn.regression(net, optimizer='sgd', learning_rate=0.01, loss='categorical_crossentropy')\n \n model = tflearn.DNN(net)\n return model\n\n# Build the model\nmodel = build_model()", "Training the network\nNow that we've constructed the network, saved as the variable model, we can fit it to the data. Here we use the model.fit method. You pass in the training features trainX and the training targets trainY. Below I set validation_set=0.1 which reserves 10% of the data set as the validation set. You can also set the batch size and number of epochs with the batch_size and n_epoch keywords, respectively.\nToo few epochs don't effectively train your network, and too many take a long time to execute. Choose wisely!", "# Training\nmodel.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=100, n_epoch=100)", "Testing\nAfter you're satisified with the training output and accuracy, you can then run the network on the test data set to measure it's performance! Remember, only do this after you've done the training and are satisfied with the results.\nA good result will be higher than 95% accuracy. Some simple models have been known to get up to 99.7% accuracy!", "# Compare the labels that our model predicts with the actual labels\n\n# Find the indices of the most confident prediction for each item. That tells us the predicted digit for that sample.\npredictions = np.array(model.predict(testX)).argmax(axis=1)\n\n# Calculate the accuracy, which is the percentage of times the predicated labels matched the actual labels\nactual = testY.argmax(axis=1)\ntest_accuracy = np.mean(predictions == actual, axis=0)\n\n# Print out the result\nprint(\"Test accuracy: \", test_accuracy)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
tpin3694/tpin3694.github.io
sql/delete_a_table.ipynb
mit
[ "Title: Delete A Table\nSlug: delete_a_table\nSummary: Delete an entire table in SQL. \nDate: 2016-05-01 12:00\nCategory: SQL\nTags: Basics\nAuthors: Chris Albon \nNote: This tutorial was written using Catherine Devlin's SQL in Jupyter Notebooks library. If you have not using a Jupyter Notebook, you can ignore the two lines of code below and any line containing %%sql. Furthermore, this tutorial uses SQLite's flavor of SQL, your version might have some differences in syntax.\nFor more, check out Learning SQL by Alan Beaulieu.", "# Ignore\n%load_ext sql\n%sql sqlite://\n%config SqlMagic.feedback = False", "Create Data", "%%sql\n\n-- Create a table of criminals\nCREATE TABLE criminals (pid, name, age, sex, city, minor);\nINSERT INTO criminals VALUES (412, 'James Smith', 15, 'M', 'Santa Rosa', 1);\nINSERT INTO criminals VALUES (234, 'Bill James', 22, 'M', 'Santa Rosa', 0);\nINSERT INTO criminals VALUES (632, 'Stacy Miller', 23, 'F', 'Santa Rosa', 0);\nINSERT INTO criminals VALUES (621, 'Betty Bob', NULL, 'F', 'Petaluma', 1);\nINSERT INTO criminals VALUES (162, 'Jaden Ado', 49, 'M', NULL, 0);\nINSERT INTO criminals VALUES (901, 'Gordon Ado', 32, 'F', 'Santa Rosa', 0);\nINSERT INTO criminals VALUES (512, 'Bill Byson', 21, 'M', 'Santa Rosa', 0);\nINSERT INTO criminals VALUES (411, 'Bob Iton', NULL, 'M', 'San Francisco', 0);", "Delete A Table", "%%sql\n\n-- Delete the table called 'criminals'\nDROP TABLE criminals", "View Table", "%%sql\n\n-- Select everything\nSELECT *\n\n-- From the table 'criminals'\nFROM criminals", "Note: We get an error because the table doesn't exist anymore." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tonordon/WoWAH
WoWAH-Max-Level-Prediction.ipynb
mit
[ "Predicting World of Warcraft Avatar Leveling Behavior\nThis project utilizes the publicly available World of Warcraft Avatar History dataset to garner insight into the game itself as well as its player base. In this case, I have focused on one particular problem: the leveling behavior of avatars. \nProblem\nCan we predict whether or not an avatar on World of Warcraft will be leveled to the max based upon simple metrics describing play behavior? In particular, we would like to explore whether avatar location and guild preferences, as well as play behavior, can be used to predeict whether or not an avatar will reach the maximum level allowable in WoW.\nData\nThis dataset represents one year of observations in 2008 for ~30,000 avatars from the Horde faction in WoW. These observations include the level, location, guild, race, and class of each avatar at a given instance in time. Our goal here is to explore this data for any interesting relationships with our main problem in mind. Ideally, we would like to be able to boil down this raw dataset to a few useful metrics that correlate with whether or not an avatar reaches max level.", "import pandas as pd\n#We don't like infinities, so set those to null\npd.set_option('mode.use_inf_as_null', True)\nimport numpy as np\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport cPickle\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split, ShuffleSplit\nfrom sklearn.metrics import precision_recall_fscore_support\n\nfrom functions import plot_learning_curve, distill_wowah\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n#Constants\n#Max level before WLK\nMAX_LEVEL1 = 70\n#Max level after\nMAX_LEVEL2 = 80\n#Release of the Wrath of the Lich King expansion\nWLK_RD = pd.to_datetime('11/18/2008')", "Load and wrangle data", "#Load data\nwow_df = pd.read_csv('wowah_data.csv')", "Get some basic info from the data", "wow_df.info()\n\nwow_df.columns.values\n\n#Note that some column names have added whitespace. Strip that whitespace for clarity\nfor c in wow_df.columns:\n c = c.lstrip()\n \nwow_df.columns = [x.lstrip() for x in wow_df.columns.values] \n\n#Having all the unique character names will be useful later\nchars = np.sort(wow_df.char.unique())\nnav = np.size(chars)\n#Calculate the average number of data points per avatar\nprint(10826733 / nav)\n\n#Also, let's find the number of unique guilds\nguilds = np.sort(wow_df.guild.unique())\nng = np.size(guilds)\n\n#Also, let's find the number of unique locations\nlocs = np.sort(wow_df.zone.unique())\nnl = np.size(locs)", "So we have ~300 data points per avatar. Now let's explore the properties of the time sampling. For this, we need to convert the timestamp column to a numerical value.", "#Now let's convert the timestamp column to numerical values\n#First check if it's already been done\ntry:\n with open('timestamps.pkl', 'rb') as f:\n #Use cPickle for past serialization\n dts = cPickle.load(f)\nexcept IOError:\n dts = pd.to_datetime(wow_df['timestamp'])\n\n with open('timestamps.pkl', 'wb') as f:\n cPickle.dump(dts, f)\n \n#Just replace old timestamp column since it's no longer necessary \nwow_df['timestamp'] = dts", "Let's try plotting a level curve for one player to examine how they progressed.", "av = wow_df.loc[wow_df['char'] == chars[1]]\n\n#Find where max level is reached since curve is pretty boring after that\nprog = av[av['level'] < MAX_LEVEL2]\n\nplt.plot(prog['timestamp'], prog['level'])\nplt.xticks(['02-2008', '04-2008', '06-2008', '08-2008', '10-2008', '12-2008'], \n ['Feb 2008', 'Apr 2008', 'Jun 2008', 'Aug 2008', 'Oct 2008', 'Dec 2008'])\nplt.xlabel('Time')\nplt.ylabel('Player Level')", "It looks like this avatar was level ~54 when the observations began, and progressed to close to the max level in about a year with a long hiatus. This hiatus corresponds to the original WoW level cap, and the progression from this level begins when Wrath of the Lich King was released. It's also clear that the expansion release during this timeframe will need to be properly accounted for in order to make accurate predictions. \nDistilling the data on an avatar-by-avatar basis\nSince we are looking for variations in level progression across a broad range of levels and times, we want to come up with some features that describe the progression of level and play over time. So, let's go through each avatar and see if it reached max level. We will also create a new dataframe called av_df to store other potantially useful metrics for each avatar. These will include such things as average level, max level, level range, whether or not the avatar changed guilds at all, most frequented guild, and most frequented location.\nAt the end of the day, we want to characterize the variations in avatar level and see if these variations can help us predict whether each avatar reached the max level or not.", "#Use a function to distill raw WoWAH data into quantities grouped by character ID. This script essentially \n# uses the pandas groupby and agg functions to calculate metrics for each character over the time baseline\nav_df = distill_wowah(wow_df, chars)\nav_df.info()", "So now we have a new dataframe that contains one row per avatar with some aggregate properties for each. We now want to clean the data up. As other explorations of this data have shown, there are some problematic avatars. First, a significant fraction of them logged on for only one play. Given that we want to explore time evolution with this dataset, singly observed avatars will not tell us much. Thus, we want to get rid of these. Additionally, many avatars show mysterious race/class swaps even though these data predate race swapping in WoW. It is unclear whether these race and class swaps are real, or whether they reflect inconsistencies in the data. In any case, such swaps will likely muddy the waters for some insights to gain in terms of race/class dependencies, so we will also remove all characters which show more than one race or class.", "#Remove singly observed avatars\nav_df = av_df[av_df['nplays'] > 1]\n#Remove race changing avatars\nav_df = av_df[av_df['nrace'] == 1]\n#Remove class changing avatars\nav_df = av_df[av_df['ncharclass'] == 1]\n\nprint(len(av_df))", "So there were almost 10,000 problematic avatars. That effectively reduces our dataset by 1/3. Still, we should still be able to look for interesting relationships with this cleaned dataset.\nRace and Class\nNow let's see if there are any trends in terms of max leveling with race or class. Each race and class has its unqiue abilities which we expect should affect the level progression of avatars. Let us explore the existence of these correlations with some plots. The following plots will show the fraction of avatars in a given class-race combination that reached max level (level 80).", "grid = sns.FacetGrid(av_df, row='race', size=4, aspect=3)\ngrid.map(sns.barplot, 'charclass','maxlvld', alpha=.5, ci=None)", "So it does indeed look like whether or not an avatar reached max level depends on both the race and class. Death Knights particularly appear to prefer reaching max level for several different races. This probably reflects the fact that the Wrath of the Lich King was released during this dataset, and thus this class had the 'new' factor causing many players to level their first Death Knight. We also see race specific preferences, such as the Troll Rogue, Tauren Mage, and Orc Paladin. In any case, both race and class will probably be useful \nPlay rate\nWe can combine the number of plays and total playtime features to calculate an average 'rate' of play, plrate. This newly engineered feature will measure the amount of play independent of the window function of the observations. Essentially, we will divide the number of plays by the total time baseline. This will give us an idea how frequently each avatar was played. We expect that the play rate should be correlated with max leveling.", "# Construct play rate\n# Construct the play rate from the number of plays observed and the total time baseline for which\n# the avatar was observed\nav_df['plrate'] = np.log10(av_df['nplays'] / av_df['baseline_td'])\n\ng = sns.FacetGrid(av_df, col='maxlvld')\ng.map(sns.distplot, 'plrate', bins=10, kde=False, norm_hist=True)", "It appears that our intuition was correct. The distributions of max leveling appear starkly different between the two sets of avatars. Avatars that reached max level (post WLK) exhibit a somewhat lognormal distribution peaking at around 10 plays/day. In contrast, the non-max-leveled avatars illustrate a much broader distribution with less frequent play rates on average. The exception is the lonesome peak at play rates >100/day. This would seem to represent avatars that started to level quickly after WLK was released, but did not succeed before these observations ended.\nProgression rate\nNow we will construct another feature: the progression rate, prate. For this, we want to quantify how quickly an avatar was leveled up over time. This will employ the lvlrng and prog_baseline_td features. The former is the range of levels observed for each avatar, and the latter measures the time each avatar was played before reaching the max level. Again, we expect this feature to correlate with max leveling, but we will investigate this using some plots.", "#Construct progression rate\nav_df['prate'] = np.log10(av_df['lvlrng'] / av_df['prog_baseline_td'])\n#So pandas seems to be unable to properly handle NANs, so I am just going to drop rows with prate=NaN\nav_df_npr = av_df[np.isfinite(av_df['prate'])]\ng = sns.FacetGrid(av_df_npr, col='maxlvld')\ng.map(sns.distplot, 'prate', bins=10, kde=False, norm_hist=True)", "Again by combining the two features decribing the avatar level progression, we obtain a new feature correlated with max leveling. This tells us that avatars that reach max level progress at different rates than those who do not. Thus, we conclude to include this feature in our final model.\nGuild\nDoes the guild affect the likelihood of max leveling?\nAnother feature we would like to explore is the guild behavior the avatar. Its relation to max leveling may not be as initially straightforward as other features, but different cultures and membership of different guilds may certainly affect the likelihood that its constituent avatars reach max level. Note that an avatar can have been in multiple guilds throughout these observations, so we are forced to deal with some summary statistics. Namely, the number of guilds an avatar was found in a given time and the guild it was observed in most frequently. \nFirst, we examine the former. For this, we use the nguild and baseline_td features. The former represents the number of unique guilds an avatar was part of, while the latter again measures the total time the avatar spent playing. Combining these gives us the grate feature, measuring the number of unique guilds an avatar inhabited throughout the time observed.", "#Number of guilds per unit time\nav_df['grate'] = np.log10(av_df['nguild'] / av_df['baseline_td'])\ng = sns.FacetGrid(av_df.loc[np.isfinite(av_df['grate'])], col='maxlvld')\ng.map(sns.distplot, 'grate', bins=20, kde=False, norm_hist=True)", "It appears that those avatars who fluctuate between guilds less rapidly achieve max level more frequently than those who float back and forth between many guilds quickly. The stark difference between the two distributions indicates that this will likely be an important feature to consider in our model.\nWe will now investigate whether or not specific guilds promote max leveling more than others. For this, we show the distribution of avatars that did and didn't reach max level separated by the modguild feature, where modguild is simply a integer value corresponding to a unique guild ID.", "#Most frequented guild\ng = sns.FacetGrid(av_df, col='maxlvld')\ng.map(sns.distplot, 'modguild', bins=ng, kde=False, norm_hist=True)", "In line with our initial assumption, different guilds tend to promote max leveling more. There also appears to be one guild that tends to disfavor max leveling significantly more than the others. Finally, an avatar is overwhelmingly less likely to reach max level if it did not belong to a guild (strong peak at -1). Thus, the social aspect of guilds is an immensely important one to WoW, and we should try to include occupied guild in our model.\nLocation\nFinally, let us explore another categorical feature on whether or not an avatar reached max level: locations frequented. First, we take a look at the number of locations frequented by avatars from the two sets. As with the guilds, it may be important to factor in how many locations an avatar vists in a timespan. Thus, we will create a location rate feature, lrate, which measures how frequently an avatar moves between different locations. For this we again use the baseline_td feature in addition to nzon, the number of unique locations visited by an avatar.", "#Number of locations per unit time\nav_df['lrate'] = np.log10(av_df['nzon'] / av_df['baseline_td'])\ng = sns.FacetGrid(av_df.loc[np.isfinite(av_df['lrate'])], col='maxlvld')\ng.map(sns.distplot, 'lrate', bins=20, kde=False, norm_hist=True)", "Again, we see that when we control for the time baseline of observations, the location change rate correlates fairly strongly with max leveling. The distributions are markedly different for avatars that reached max level compared with those that did not.\nAs with specific guilds, we now want to see if specific locations visited tend to correlate more strongly with leveling to the max. For this, we need to transform the most frequented location feature, modzon, to a numerical values. For this, we use make dummy variables for each location and use those to convert modzon to modzonkey, which is just an integer ID corresponding to a unique location.", "#Most frequented location\n#For this, we need to convert the categorical modzon feature to a numerical one\n#Use get_dummies for this\nav_df_dum = pd.get_dummies(av_df['modzon'] , columns=['modzon'])\n\nav_df['modzonkey'] = av_df_dum.values.argmax(1)\n\ng = sns.FacetGrid(av_df, col='maxlvld')\ng.map(sns.distplot, 'modzonkey', bins=nl, kde=False, norm_hist=True)", "Finally, we do again see clear indications that visiting certain locations is strongly correlated with reaching max level. Certain locations contain quests, beasts, other game features that cater more towards certain level bands. Thus, we will want to include modzon in the features for the final model.\nFit model to predict whether avatars max leveled or not", "#First, we need to convert the categorical variables to dummy variables for compatibility with standard algorithms\n#Make dummy variables\nav_df_dum = pd.get_dummies(av_df, columns=['race', 'charclass', 'modzon', 'modguild'])\n\n#Get rid of NaN rates\nav_df_dum = av_df_dum.loc[(np.isfinite(av_df_dum['plrate'])) & (np.isfinite(av_df_dum['prate']))\n & (np.isfinite(av_df_dum['grate'])) & (np.isfinite(av_df_dum['lrate']))]\n\n#We also want to get ridof many features. We want to remove all features that are cumulative over the time coverage \n# because the model loses its predictive power if it needs to know all these details over the specific time baseline\n#We also remove the number of races and classes, since these have been cleaned to all be identically 1\nX = av_df_dum.drop(['nrace', 'ncharclass', 'char', 'lastplay', 'firstplay', 'baseline', 'prog_baseline', \n 'nzon', 'nguild', 'baseline_td', 'prog_baseline_td', 'nplays', 'lvlrng', \n 'maxlvl', 'modzonkey',\n 'maxlvld', 'maxlvld_preWLK'\n ], axis=1).copy()\nX['preWLK'] = X['preWLK'].astype(int)\nX['postWLK'] = X['postWLK'].astype(int)\ny = av_df_dum['maxlvld']\n\n#Next, split dataset up into training and test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)", "Thus, we are left with race, class, and most frequented guilds and locations as features for the model. Additionally, we include whether or not an avatar was created before or after the WLK expansion since this obviosuly has a huge bearing on the max level. Finally, we include only time baseline-independent numerical features: the average level and the play, progression, guild, and location rates. We do not want our model dependent upon how long an avatar has been observed for in order to increase the generality of the model.\nNow we fit a logistic regression model to the avatar data. Such a model produces an average classfication accuracy of ~98.4%.", "#Fit a logistic regression model to the maxlvld data\nlogreg = LogisticRegression()\nlogreg.fit(X_train, y_train)\ny_pred = logreg.predict(X_test)\nacc_log = round(logreg.score(X_train, y_train) * 100, 2)\nacc_log", "We can also see which features correlate most and least strongly with max leveling. The following table displays features and their correlations with max leveling.", "coeff_df = pd.DataFrame(X_train.columns)\ncoeff_df.columns = ['Feature']\ncoeff_df[\"Correlation\"] = pd.Series(logreg.coef_[0])\n\ncoeff_df.sort_values(by='Correlation', ascending=False)", "Evaluating the model\nSo evidently, this model works pretty well. It has extremely high scores when predicting max leveling on the test set. Some interesting things to note. \n1) The top few rows show the features most correlated with max leveling. Most of the most strongly correlated features are locations and guilds. Aside from those, we see the play rate, progression rate, and location rate to all be important factors in determining whether one of these avatars reaches max level or not.\n2) On the other side of things, we see certain locations strongly correlated with not acheiving max level. Few guilds are negatively correlated with max leveling.\n3) The pre and post-WLK flags are the strongest anti-correlated with max leveling. \nBut wait... is this score too good to be true? Let's investigate for possible overfitting using learning curves.", "cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=42)\n\nplot_learning_curve(LogisticRegression(), 'Logistic Regression', X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)", "This learning curve shows that the model is being pretty well fit for both the training and cross-validation sets. The fact that it does equally as well on the CV set indicates that overfitting may actually not be a problem.\nLet's also examine the precision and recall of the model.", "pr, rec, fsc, sup = precision_recall_fscore_support(y_test, y_pred, average=None)\nprint(pr)\nprint(rec)", "It also appears that the model is quite precise and recalls accurately when an avatar did not reach max level. However, it appears less precise when predicitng max leveling, and recalls those avatars even worse (although in all cases, >3/4 of the labels are correclty recalled). \nRemoving Wrath of the Lich King\nEvidently, our chosen model does not perform great in terms of precisely and accurately recalling whether avatars reached max level or not. Thus, maybe we need to tweak our model. In particular, it is likely that the effects of WLK on this dataset are discontinuous in time, and thus are probably mucking up things. Therefore, we will try the above again using only pre-WLK avatars.", "#Recalculate progression metrics restricting to pre-WLK\n#In this case, we will just reconstruct the avatar dataframe using only observations from the original wow_df pre-WLK\nwow_df = wow_df.loc[wow_df['timestamp'] < WLK_RD]\n\n#Now re-distill using only pre-WLK data\nav_prewlk_df = distill_wowah(wow_df, chars)\n\n#Now calculate rates\n#Number of guilds per unit time\nav_prewlk_df['grate'] = np.log10(av_prewlk_df['nguild'] / av_prewlk_df['baseline_td'])\n#Number of locations per unit time\nav_prewlk_df['lrate'] = np.log10(av_prewlk_df['nzon'] / av_prewlk_df['baseline_td'])\n#Construct play rate\nav_prewlk_df['plrate'] = np.log10(av_prewlk_df['nplays'] / av_prewlk_df['baseline_td'])\n#Construct progression rate\nav_prewlk_df['prate'] = np.log10(av_prewlk_df['lvlrng'] / av_prewlk_df['prog_baseline_td'])\n\n#Make dummy variables\nav_df_dum = pd.get_dummies(av_prewlk_df, columns=['race', 'charclass', 'modzon', 'modguild'])\n\n#Get rid of NaN rates\nav_df_dum = av_df_dum.loc[(np.isfinite(av_df_dum['plrate'])) & (np.isfinite(av_df_dum['prate']))\n & (np.isfinite(av_df_dum['grate'])) & (np.isfinite(av_df_dum['lrate']))]\n#We also want to get ridof many features. We want to remove all features that are cumulative over the time coverage \n# because the model loses its predictive power if it needs to know all these details over the specific time baseline\n#We also remove the number of races and classes, since these have been cleaned to all be identically 1\nX = av_df_dum.drop(['nrace', 'ncharclass', 'char', 'lastplay', 'firstplay', 'baseline', 'prog_baseline', \n 'nzon', 'nguild', 'baseline_td', 'prog_baseline_td', 'nplays', 'lvlrng', \n 'maxlvl',\n 'maxlvld', 'maxlvld_preWLK', 'preWLK', 'postWLK',\n ], axis=1).copy()\n\ny = av_df_dum['maxlvld_preWLK'].astype(bool)\n\n#Next, split dataset up into training and test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n#Fit a logistic regression model to the maxlvld data\nlogreg = LogisticRegression()\nlogreg.fit(X_train, y_train)\ny_pred = logreg.predict(X_test)\nacc_log = round(logreg.score(X_train, y_train) * 100, 2)\nacc_log\n\ncv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=42)\n\nplot_learning_curve(LogisticRegression(), 'Logistic Regression', X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)\n\npr, rec, fsc, sup = precision_recall_fscore_support(y_test, y_pred, average=None)\nprint(pr)\nprint(rec)", "We see that after removing WLK data from this set, the predictive power of this model is greatly imporoved. With precisions and recalls >90% for both classes of avatars and no apparent overfitting, we conclude that this model should do a decent job at predicting whether or not an avatar will reach max level given some time-series observations. The above analysis indicates that each expansion era needs to be treated completely separately in order to make a strong model.\nConclusions\nWe found that this model can do a good job at predicting whether or not an avatar reaches max level. Essentially, this provides a way to accurately predict whether or not an avatar will reach max level based upon the locations it visits, the guilds it occupies, its race and class, and play behavior. The accuracy, precision, and recall of this model are all >90%. In terms of real-world applicability, such a model could potentially be used to track individual avatars and estimate whether or not they will be played to max level. Developers could then trace these avatars to individual players and use this information to better understand why some players might stop playing (e.g. stop max leveling?). Modifications to gameplay, UI, and pay structure might then be implemented considering these results." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
learn1do1/learn1do1.github.io
python_notebooks/Sorting Revisited.ipynb
mit
[ "Sorting functions in python\nPython is useful for exploring algorithms because of its terseness and large set of libraries.\nThis post will be focused on sorting functions, using a set of shuffled cards (integers) as input and looking at measured runtime.", "import random\n\ncards = range(52)\nrandom.shuffle(cards)\nprint cards", "Before we begin, let's define a function for checking that our sorting function even works. We can call it assert_sorted:", "def assert_sorted(cards):\n current_min = -1\n for card in cards:\n if card < current_min:\n raise AssertionError('Sort Failed')\n current_min = card\n return True", "Selection Sort", "def selection_sort(cards):\n sorted = []\n search_space = list(cards)\n while (len(sorted) < len(cards)):\n current_min_index = 0\n for i in range(len(search_space)):\n if search_space[i] < search_space[current_min_index]:\n current_min_index = i\n sorted.append(search_space.pop(current_min_index))\n return sorted \n\nprint selection_sort(cards)", "Let's look at the performance of selection sort. I looked into cProfile as a way to profile, but I only needed performance in the time dimension, not in the variety of execution functions cProfile was giving me. So I decided instead to use a very simple way of profiling execution time: using the python time library.\nI can then plot the runtime in seconds as the numer of cards I input grows.", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport time\n\nsizes = [x for x in range(2000) if x % 100 == 0]\ndurations = []\n\nfor size in sizes:\n cards = range(size)\n random.shuffle(cards)\n start_time = time.time()\n sorted_cards = selection_sort(cards)\n durations.append(time.time() - start_time)\n assert_sorted(sorted_cards)\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(sizes, durations)\nplt.title('Time in seconds to run selection sort based on input size')\nplt.show()", "Insertion Sort", "def insertion_sort(cards):\n sorted = []\n for candidate in cards:\n # by default, insert at the end of the sorted list\n index = len(sorted)\n for i in range(len(sorted)):\n if candidate < sorted[i]:\n index = i\n break\n sorted.insert(index, candidate)\n return sorted\n\nsizes = [x for x in range(2000) if x % 100 == 0]\ndurations = []\n\nfor size in sizes:\n cards = range(size)\n random.shuffle(cards)\n start_time = time.time()\n sorted_cards = insertion_sort(cards)\n durations.append(time.time() - start_time)\n assert_sorted(sorted_cards)\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(sizes, durations)\nplt.title('Time in seconds to run insertion sort based on input size')\nplt.show()", "Insertion sort with a tree structure\nThis is how non-computer scientist friends of mine describe how they like to sort their cards. It's insertion sort, but we store the sorted cards in a tree so that inserts can happen in log(N) instead of N.", "import bintrees\ndef insertion_sort_with_trees(cards, tree):\n for candidate in cards:\n # by default, insert only the key into the bintree. Value is None\n tree.insert(candidate, None)\n return [x for x in tree.keys()]\n\nsizes = [x for x in range(2000) if x % 100 == 0]\ndurations = []\n\nfor size in sizes:\n cards = range(size)\n random.shuffle(cards)\n redblack_tree = bintrees.RBTree()\n start_time = time.time()\n sorted_cards = insertion_sort_with_trees(cards, redblack_tree)\n durations.append(time.time() - start_time)\n assert_sorted(sorted_cards)\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(sizes, durations)\nplt.title('Time in seconds to run insertion sort based on input size')\nplt.show()\ndurations = []\n\nfor size in sizes:\n cards = range(size)\n random.shuffle(cards)\n binaryTree = bintrees.BinaryTree()\n start_time = time.time()\n sorted_cards = insertion_sort_with_trees(cards, binaryTree)\n durations.append(time.time() - start_time)\n assert_sorted(sorted_cards)\n \nplt.plot(sizes, durations, color = 'yellow')\n", "Downsides of binary tree: it performs well in the best case, but because it isn't balanced, it's worst case performance takes you back to performance as bad as insertion sort, O(N^2)", "sizes = [x for x in range(2000) if x % 100 == 0]\ndurations = []\n\nfor size in sizes:\n cards = range(size)\n redblack_tree = bintrees.RBTree()\n start_time = time.time()\n sorted_cards = insertion_sort_with_trees(cards, redblack_tree)\n durations.append(time.time() - start_time)\n assert_sorted(sorted_cards)\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(sizes, durations)\nplt.title('Time in seconds to run insertion sort based on input size')\nplt.show()\ndurations = []\n\nfor size in sizes:\n cards = range(size)\n binaryTree = bintrees.BinaryTree()\n start_time = time.time()\n sorted_cards = insertion_sort_with_trees(cards, binaryTree)\n durations.append(time.time() - start_time)\n assert_sorted(sorted_cards)\n \nplt.plot(sizes, durations, color = 'yellow')\n", "You can see that because the rbtree remains balanced, insertion into it is stabler (blue line). Whereas the binary tree (yellow line) has O(N^2) performance. It looks very similar to our insertion sort from earlier, because we have lost the benefits of storing the sorted array in a tree.\nQuicksort", "def quicksort(cards):\n if len(cards) < 2:\n return cards\n pivot = random.randint(0, len(cards) - 1)\n upper_half = [x for x in cards if x >= cards[pivot]]\n lower_half = [x for x in cards if x < cards[pivot]]\n return quicksort(lower_half) + quicksort(upper_half)\n \n\nsizes = [x for x in range(10000) if x % 100 == 0]\ndurations = []\n\nfor size in sizes:\n cards = range(size)\n random.shuffle(cards)\n start_time = time.time()\n sorted_cards = quicksort(cards)\n durations.append(time.time() - start_time)\n assert_sorted(sorted_cards)\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(sizes, durations)\nplt.title('Time in seconds to run quick sort based on input size')\nplt.show()\n", "Mergesort\nMergesort is similar to quicksort, in that it uses recursion to only have to do NlogN compares. First divide the list into the smallest unit, then compare each element with the adjacent list to sort and merge the adjacent list. \nI found that the code for mergesort is a little bit more complex that I would like, so I have taken this gif from wikipedia to demonstrate what's really going on here:", "def mergesort(cards):\n if len(cards) <= 1:\n return cards\n else:\n return merge(mergesort(cards[:len(cards)/2]), mergesort(cards[len(cards)/2:]))\n\ndef merge(list1, list2):\n final_list = []\n i = 0\n j = 0\n while len(final_list) != (len(list1)) + len(list2):\n if i == len(list1):\n final_list.append(list2[j])\n j = j + 1\n elif j == len(list2) or list1[i] < list2[j]:\n final_list.append(list1[i])\n i = i + 1\n elif list1[i] >= list2[j]:\n final_list.append(list2[j])\n j = j + 1\n return final_list\n\nsizes = [x for x in range(10000) if x % 100 == 0]\ndurations = []\n\nfor size in sizes:\n cards = range(size)\n random.shuffle(cards)\n start_time = time.time()\n sorted_cards = mergesort(cards)\n durations.append(time.time() - start_time)\n assert_sorted(sorted_cards)\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(sizes, durations)\nplt.title('Time in seconds to run mergesort based on input size')\nplt.show()", "Notice that quicksort barely defeats mergesort, and that using a btree with insertion isn't much slower. \nThis has been an incomplete and much too fast exploration through the world of sorting algorithms! Read more on sorting functions on wikipedia, or download the python notebook files so you can run it on your own computer." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
zkbt/mosasaurus
OrganizationOutline.ipynb
mit
[ "Determine the basic properties of this target.\nThe Target class handles the properties of the star, and (if relevant) the planet or periodic signal associated with that star. I've", "from mosasaurus.Target import Target\n\n# load a target from SIMBAD\nt = Target(starname='GJ1132', name='GJ1132b')\nt.summarize()\nt.star.summarize()\n\n# create a target from values (in case you want to work offline)\nimport astropy.units as u\nt = Target(starname='GJ1132', name='GJ1132b', \n ra=153.71622708*u.deg, dec=-47.15684639*u.deg,\n pmra=-1046., pmdec=416., epoch=2000.0)\nt.summarize()\nt.star.summarize()", "Determine the basic properties of this instrument.\nClasses derived from the Instrument class handle all instrument-specific information, including\n- the reduction of CCD frames\n- the extraction of key information from headers\n- the mapping of reference positions to apertures\n- ...", "from mosasaurus.instruments import LDSS3C\ni = LDSS3C(grism='vph-red')\ni.summarize()\n\ni.keysforlogheader", "Determine the basic properties of this night, including a nightly observing log.\nThis creates a summary observing log that contains all the files from that night. This can be useful for identifying which exposures should be used for various calibration and analysis steps.", "from mosasaurus.Night import Night\nn = Night('ut160227_28', instrument=i)\nn.createNightlyLog(remake=False)\n\nn.summarize()", "Determine the basic properties of this observation, including the data associated with it.\n-identify data associated with this observation", "from mosasaurus.Observation import Observation\no = Observation(t, i, n)\no.summarize()\n\no.loadHeaders()", "-\n-calculate auxiliary information (e.g. barycentric correction)\n-create master calibration data relevant to this observation\n-identify likely cosmic rays in this image\n-define reference positions from which spectra should be extracted\n-identify relevant pixels, define extraction apertures\n-extract spectra, and visualize the process", "col = n.log['object']\ncol." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive2/structured/solutions/5a_train_keras_ai_platform_babyweight.ipynb
apache-2.0
[ "LAB 5a: Training Keras model on Cloud AI Platform\nLearning Objectives\n\nSetup up the environment\nCreate trainer module's task.py to hold hyperparameter argparsing code\nCreate trainer module's model.py to hold Keras model code\nRun trainer module package locally\nSubmit training job to Cloud AI Platform\nSubmit hyperparameter tuning job to Cloud AI Platform\n\nIntroduction\nAfter having testing our training pipeline both locally and in the cloud on a susbset of the data, we can submit another (much larger) training job to the cloud. It is also a good idea to run a hyperparameter tuning job to make sure we have optimized the hyperparameters of our model. \nIn this notebook, we'll be training our Keras model at scale using Cloud AI Platform.\nIn this lab, we will set up the environment, create the trainer module's task.py to hold hyperparameter argparsing code, create the trainer module's model.py to hold Keras model code, run the trainer module package locally, submit a training job to Cloud AI Platform, and submit a hyperparameter tuning job to Cloud AI Platform.\nSet up environment variables and load necessary libraries\nFirst we will install the cloudml-hypertune package on our local machine. This is the package which we will use to report hyperparameter tuning metrics to Cloud AI Platform. Installing the package will allow us to test our trainer package locally.", "!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst\n\n!pip3 install cloudml-hypertune", "Import necessary libraries.", "import os", "Set environment variables.\nSet environment variables so that we can use them throughout the entire lab. We will be using our project name for our bucket, so you only need to change your project and region.", "%%bash\nexport PROJECT=$(gcloud config list project --format \"value(core.project)\")\necho \"Your current GCP Project Name is: \"${PROJECT}\n\n# TODO: Change these to try this notebook out\nPROJECT = \"your-project-name-here\" # Replace with your PROJECT\nBUCKET = PROJECT # defaults to PROJECT\nREGION = \"us-central1\" # Replace with your REGION\n\nos.environ[\"PROJECT\"] = PROJECT\nos.environ[\"BUCKET\"] = BUCKET\nos.environ[\"REGION\"] = REGION\nos.environ[\"TFVERSION\"] = \"2.1\"\nos.environ[\"PYTHONVERSION\"] = \"3.7\"\n\n%%bash\ngcloud config set project ${PROJECT}\ngcloud config set compute/region ${REGION}\n\n%%bash\nif ! gsutil ls | grep -q gs://${BUCKET}; then\n gsutil mb -l ${REGION} gs://${BUCKET}\nfi", "Check data exists\nVerify that you previously created CSV files we'll be using for training and evaluation. If not, go back to lab 1b_prepare_data_babyweight to create them.", "%%bash\ngsutil ls gs://${BUCKET}/babyweight/data/*000000000000.csv", "Now that we have the Keras wide-and-deep code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud AI Platform.\nTrain on Cloud AI Platform\nTraining on Cloud AI Platform requires:\n* Making the code a Python package\n* Using gcloud to submit the training code to Cloud AI Platform\nEnsure that the Cloud AI Platform API is enabled by going to this link.\nMove code into a Python package\nA Python package is simply a collection of one or more .py files along with an __init__.py file to identify the containing directory as a package. The __init__.py sometimes contains initialization code but for our purposes an empty file suffices.\nThe bash command touch creates an empty file in the specified location, the directory babyweight should already exist.", "%%bash\nmkdir -p babyweight/trainer\ntouch babyweight/trainer/__init__.py", "We then use the %%writefile magic to write the contents of the cell below to a file called task.py in the babyweight/trainer folder.\nCreate trainer module's task.py to hold hyperparameter argparsing code.\nThe cell below writes the file babyweight/trainer/task.py which sets up our training job. Here is where we determine which parameters of our model to pass as flags during training using the parser module. Look at how batch_size is passed to the model in the code below. Use this as an example to parse arguements for the following variables\n- nnsize which represents the hidden layer sizes to use for DNN feature columns\n- nembeds which represents the embedding size of a cross of n key real-valued parameters\n- train_examples which represents the number of examples (in thousands) to run the training job\n- eval_steps which represents the positive number of steps for which to evaluate model\nBe sure to include a default value for the parsed arguments above and specfy the type if necessary.", "%%writefile babyweight/trainer/task.py\nimport argparse\nimport json\nimport os\n\nfrom trainer import model\n\nimport tensorflow as tf\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--job-dir\",\n help=\"this model ignores this field, but it is required by gcloud\",\n default=\"junk\"\n )\n parser.add_argument(\n \"--train_data_path\",\n help=\"GCS location of training data\",\n required=True\n )\n parser.add_argument(\n \"--eval_data_path\",\n help=\"GCS location of evaluation data\",\n required=True\n )\n parser.add_argument(\n \"--output_dir\",\n help=\"GCS location to write checkpoints and export models\",\n required=True\n )\n parser.add_argument(\n \"--batch_size\",\n help=\"Number of examples to compute gradient over.\",\n type=int,\n default=512\n )\n parser.add_argument(\n \"--nnsize\",\n help=\"Hidden layer sizes for DNN -- provide space-separated layers\",\n nargs=\"+\",\n type=int,\n default=[128, 32, 4]\n )\n parser.add_argument(\n \"--nembeds\",\n help=\"Embedding size of a cross of n key real-valued parameters\",\n type=int,\n default=3\n )\n parser.add_argument(\n \"--num_epochs\",\n help=\"Number of epochs to train the model.\",\n type=int,\n default=10\n )\n parser.add_argument(\n \"--train_examples\",\n help=\"\"\"Number of examples (in thousands) to run the training job over.\n If this is more than actual # of examples available, it cycles through\n them. So specifying 1000 here when you have only 100k examples makes\n this 10 epochs.\"\"\",\n type=int,\n default=5000\n )\n parser.add_argument(\n \"--eval_steps\",\n help=\"\"\"Positive number of steps for which to evaluate model. Default\n to None, which means to evaluate until input_fn raises an end-of-input\n exception\"\"\",\n type=int,\n default=None\n )\n\n # Parse all arguments\n args = parser.parse_args()\n arguments = args.__dict__\n\n # Unused args provided by service\n arguments.pop(\"job_dir\", None)\n arguments.pop(\"job-dir\", None)\n\n # Modify some arguments\n arguments[\"train_examples\"] *= 1000\n\n # Append trial_id to path if we are doing hptuning\n # This code can be removed if you are not using hyperparameter tuning\n arguments[\"output_dir\"] = os.path.join(\n arguments[\"output_dir\"],\n json.loads(\n os.environ.get(\"TF_CONFIG\", \"{}\")\n ).get(\"task\", {}).get(\"trial\", \"\")\n )\n\n # Run the training job\n model.train_and_evaluate(arguments)", "In the same way we can write to the file model.py the model that we developed in the previous notebooks. \nCreate trainer module's model.py to hold Keras model code.\nTo create our model.py, we'll use the code we wrote for the Wide & Deep model. Look back at your 4c_keras_wide_and_deep_babyweight.ipynb notebook and copy/paste the necessary code from that notebook into its place in the cell below.", "%%writefile babyweight/trainer/model.py\nimport datetime\nimport os\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nimport hypertune\n\n# Determine CSV, label, and key columns\nCSV_COLUMNS = [\"weight_pounds\",\n \"is_male\",\n \"mother_age\",\n \"plurality\",\n \"gestation_weeks\"]\nLABEL_COLUMN = \"weight_pounds\"\n\n# Set default values for each CSV column.\n# Treat is_male and plurality as strings.\nDEFAULTS = [[0.0], [\"null\"], [0.0], [\"null\"], [0.0]]\n\n\ndef features_and_labels(row_data):\n \"\"\"Splits features and labels from feature dictionary.\n\n Args:\n row_data: Dictionary of CSV column names and tensor values.\n Returns:\n Dictionary of feature tensors and label tensor.\n \"\"\"\n label = row_data.pop(LABEL_COLUMN)\n\n return row_data, label # features, label\n\n\ndef load_dataset(pattern, batch_size=1, mode='eval'):\n \"\"\"Loads dataset using the tf.data API from CSV files.\n\n Args:\n pattern: str, file pattern to glob into list of files.\n batch_size: int, the number of examples per batch.\n mode: 'train' | 'eval' to determine if training or evaluating.\n Returns:\n `Dataset` object.\n \"\"\"\n print(\"mode = {}\".format(mode))\n # Make a CSV dataset\n dataset = tf.data.experimental.make_csv_dataset(\n file_pattern=pattern,\n batch_size=batch_size,\n column_names=CSV_COLUMNS,\n column_defaults=DEFAULTS)\n\n # Map dataset to features and label\n dataset = dataset.map(map_func=features_and_labels) # features, label\n\n # Shuffle and repeat for training\n if mode == 'train':\n dataset = dataset.shuffle(buffer_size=1000).repeat()\n\n # Take advantage of multi-threading; 1=AUTOTUNE\n dataset = dataset.prefetch(buffer_size=1)\n\n return dataset\n\n\ndef create_input_layers():\n \"\"\"Creates dictionary of input layers for each feature.\n\n Returns:\n Dictionary of `tf.Keras.layers.Input` layers for each feature.\n \"\"\"\n deep_inputs = {\n colname: tf.keras.layers.Input(\n name=colname, shape=(), dtype=\"float32\")\n for colname in [\"mother_age\", \"gestation_weeks\"]\n }\n\n wide_inputs = {\n colname: tf.keras.layers.Input(\n name=colname, shape=(), dtype=\"string\")\n for colname in [\"is_male\", \"plurality\"]\n }\n\n inputs = {**wide_inputs, **deep_inputs}\n\n return inputs\n\n\ndef categorical_fc(name, values):\n \"\"\"Helper function to wrap categorical feature by indicator column.\n\n Args:\n name: str, name of feature.\n values: list, list of strings of categorical values.\n Returns:\n Categorical and indicator column of categorical feature.\n \"\"\"\n cat_column = tf.feature_column.categorical_column_with_vocabulary_list(\n key=name, vocabulary_list=values)\n ind_column = tf.feature_column.indicator_column(\n categorical_column=cat_column)\n\n return cat_column, ind_column\n\n\ndef create_feature_columns(nembeds):\n \"\"\"Creates wide and deep dictionaries of feature columns from inputs.\n\n Args:\n nembeds: int, number of dimensions to embed categorical column down to.\n Returns:\n Wide and deep dictionaries of feature columns.\n \"\"\"\n deep_fc = {\n colname: tf.feature_column.numeric_column(key=colname)\n for colname in [\"mother_age\", \"gestation_weeks\"]\n }\n wide_fc = {}\n is_male, wide_fc[\"is_male\"] = categorical_fc(\n \"is_male\", [\"True\", \"False\", \"Unknown\"])\n plurality, wide_fc[\"plurality\"] = categorical_fc(\n \"plurality\", [\"Single(1)\", \"Twins(2)\", \"Triplets(3)\",\n \"Quadruplets(4)\", \"Quintuplets(5)\", \"Multiple(2+)\"])\n\n # Bucketize the float fields. This makes them wide\n age_buckets = tf.feature_column.bucketized_column(\n source_column=deep_fc[\"mother_age\"],\n boundaries=np.arange(15, 45, 1).tolist())\n wide_fc[\"age_buckets\"] = tf.feature_column.indicator_column(\n categorical_column=age_buckets)\n\n gestation_buckets = tf.feature_column.bucketized_column(\n source_column=deep_fc[\"gestation_weeks\"],\n boundaries=np.arange(17, 47, 1).tolist())\n wide_fc[\"gestation_buckets\"] = tf.feature_column.indicator_column(\n categorical_column=gestation_buckets)\n\n # Cross all the wide columns, have to do the crossing before we one-hot\n crossed = tf.feature_column.crossed_column(\n keys=[age_buckets, gestation_buckets],\n hash_bucket_size=1000)\n deep_fc[\"crossed_embeds\"] = tf.feature_column.embedding_column(\n categorical_column=crossed, dimension=nembeds)\n\n return wide_fc, deep_fc\n\n\ndef get_model_outputs(wide_inputs, deep_inputs, dnn_hidden_units):\n \"\"\"Creates model architecture and returns outputs.\n\n Args:\n wide_inputs: Dense tensor used as inputs to wide side of model.\n deep_inputs: Dense tensor used as inputs to deep side of model.\n dnn_hidden_units: List of integers where length is number of hidden\n layers and ith element is the number of neurons at ith layer.\n Returns:\n Dense tensor output from the model.\n \"\"\"\n # Hidden layers for the deep side\n layers = [int(x) for x in dnn_hidden_units]\n deep = deep_inputs\n for layerno, numnodes in enumerate(layers):\n deep = tf.keras.layers.Dense(\n units=numnodes,\n activation=\"relu\",\n name=\"dnn_{}\".format(layerno+1))(deep)\n deep_out = deep\n\n # Linear model for the wide side\n wide_out = tf.keras.layers.Dense(\n units=10, activation=\"relu\", name=\"linear\")(wide_inputs)\n\n # Concatenate the two sides\n both = tf.keras.layers.concatenate(\n inputs=[deep_out, wide_out], name=\"both\")\n\n # Final output is a linear activation because this is regression\n output = tf.keras.layers.Dense(\n units=1, activation=\"linear\", name=\"weight\")(both)\n\n return output\n\n\ndef rmse(y_true, y_pred):\n \"\"\"Calculates RMSE evaluation metric.\n\n Args:\n y_true: tensor, true labels.\n y_pred: tensor, predicted labels.\n Returns:\n Tensor with value of RMSE between true and predicted labels.\n \"\"\"\n return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))\n\n\ndef build_wide_deep_model(dnn_hidden_units=[64, 32], nembeds=3):\n \"\"\"Builds wide and deep model using Keras Functional API.\n\n Returns:\n `tf.keras.models.Model` object.\n \"\"\"\n # Create input layers\n inputs = create_input_layers()\n\n # Create feature columns for both wide and deep\n wide_fc, deep_fc = create_feature_columns(nembeds)\n\n # The constructor for DenseFeatures takes a list of numeric columns\n # The Functional API in Keras requires: LayerConstructor()(inputs)\n wide_inputs = tf.keras.layers.DenseFeatures(\n feature_columns=wide_fc.values(), name=\"wide_inputs\")(inputs)\n deep_inputs = tf.keras.layers.DenseFeatures(\n feature_columns=deep_fc.values(), name=\"deep_inputs\")(inputs)\n\n # Get output of model given inputs\n output = get_model_outputs(wide_inputs, deep_inputs, dnn_hidden_units)\n\n # Build model and compile it all together\n model = tf.keras.models.Model(inputs=inputs, outputs=output)\n model.compile(optimizer=\"adam\", loss=\"mse\", metrics=[rmse, \"mse\"])\n\n return model\n\n\ndef train_and_evaluate(args):\n model = build_wide_deep_model(args[\"nnsize\"], args[\"nembeds\"])\n print(\"Here is our Wide-and-Deep architecture so far:\\n\")\n print(model.summary())\n\n trainds = load_dataset(\n args[\"train_data_path\"],\n args[\"batch_size\"],\n 'train')\n\n evalds = load_dataset(\n args[\"eval_data_path\"], 1000, 'eval')\n if args[\"eval_steps\"]:\n evalds = evalds.take(count=args[\"eval_steps\"])\n\n num_batches = args[\"batch_size\"] * args[\"num_epochs\"]\n steps_per_epoch = args[\"train_examples\"] // num_batches\n\n checkpoint_path = os.path.join(args[\"output_dir\"], \"checkpoints/babyweight\")\n cp_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_path, verbose=1, save_weights_only=True)\n\n history = model.fit(\n trainds,\n validation_data=evalds,\n epochs=args[\"num_epochs\"],\n steps_per_epoch=steps_per_epoch,\n verbose=2, # 0=silent, 1=progress bar, 2=one line per epoch\n callbacks=[cp_callback])\n\n EXPORT_PATH = os.path.join(\n args[\"output_dir\"], datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))\n tf.saved_model.save(\n obj=model, export_dir=EXPORT_PATH) # with default serving function\n \n hp_metric = history.history['val_rmse'][-1]\n\n hpt = hypertune.HyperTune()\n hpt.report_hyperparameter_tuning_metric(\n hyperparameter_metric_tag='rmse',\n metric_value=hp_metric,\n global_step=args['num_epochs'])\n \n print(\"Exported trained model to {}\".format(EXPORT_PATH))", "Train locally\nAfter moving the code to a package, make sure it works as a standalone. Note, we incorporated the --train_examples flag so that we don't try to train on the entire dataset while we are developing our pipeline. Once we are sure that everything is working on a subset, we can change it so that we can train on all the data. Even for this subset, this takes about 3 minutes in which you won't see any output ...\nRun trainer module package locally.\nWe can run a very small training job over a single file with a small batch size, 1 epoch, 1 train example, and 1 eval step.", "%%bash\nOUTDIR=babyweight_trained\nrm -rf ${OUTDIR}\nexport PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight\npython3 -m trainer.task \\\n --job-dir=./tmp \\\n --train_data_path=gs://${BUCKET}/babyweight/data/train*.csv \\\n --eval_data_path=gs://${BUCKET}/babyweight/data/eval*.csv \\\n --output_dir=${OUTDIR} \\\n --batch_size=10 \\\n --num_epochs=1 \\\n --train_examples=1 \\\n --eval_steps=1", "Training on Cloud AI Platform\nNow that we see everything is working locally, it's time to train on the cloud! \nTo submit to the Cloud we use gcloud ai-platform jobs submit training [jobname] and simply specify some additional parameters for AI Platform Training Service:\n- jobname: A unique identifier for the Cloud job. We usually append system time to ensure uniqueness\n- job-dir: A GCS location to upload the Python package to\n- runtime-version: Version of TF to use.\n- python-version: Version of Python to use. Currently only Python 3.7 is supported for TF 2.1.\n- region: Cloud region to train in. See here for supported AI Platform Training Service regions\nBelow the -- \\ we add in the arguments for our task.py file.", "%%bash\n\nOUTDIR=gs://${BUCKET}/babyweight/trained_model\nJOBID=babyweight_$(date -u +%y%m%d_%H%M%S)\n\ngcloud ai-platform jobs submit training ${JOBID} \\\n --region=${REGION} \\\n --module-name=trainer.task \\\n --package-path=$(pwd)/babyweight/trainer \\\n --job-dir=${OUTDIR} \\\n --staging-bucket=gs://${BUCKET} \\\n --master-machine-type=n1-standard-8 \\\n --scale-tier=CUSTOM \\\n --runtime-version=${TFVERSION} \\\n --python-version=${PYTHONVERSION} \\\n -- \\\n --train_data_path=gs://${BUCKET}/babyweight/data/train*.csv \\\n --eval_data_path=gs://${BUCKET}/babyweight/data/eval*.csv \\\n --output_dir=${OUTDIR} \\\n --num_epochs=10 \\\n --train_examples=10000 \\\n --eval_steps=100 \\\n --batch_size=32 \\\n --nembeds=8", "The training job should complete within 10 to 15 minutes. You do not need to wait for this training job to finish before moving forward in the notebook, but will need a trained model to complete our next lab.\nLab Summary:\nIn this lab, we set up the environment, created the trainer module's task.py to hold hyperparameter argparsing code, created the trainer module's model.py to hold Keras model code, ran the trainer module package locally, submitted a training job to Cloud AI Platform, and submitted a hyperparameter tuning job to Cloud AI Platform.\nExtra: Training on Cloud AI Platform using containers\nThough we can directly submit TensorFlow 2.1 models using the gcloud ai-platform jobs submit training command, we can also submit containerized models for training. One advantage of using this approach is that we can use frameworks not natively supported by Cloud AI Platform for training and have more control over the environment in which the training loop is running.\nThe rest of this notebook is dedicated to using the containerized model approach.\nCreate Dockerfile\nWe need to create a container with everything we need to be able to run our model. This includes our trainer module package, python3, as well as the libraries we use such as the most up to date TensorFlow 2.0 version.", "%%writefile babyweight/Dockerfile\nFROM gcr.io/deeplearning-platform-release/tf2-cpu\nCOPY trainer /babyweight/trainer\nRUN apt update && \\\n apt install --yes python3-pip && \\\n pip3 install --upgrade --quiet tensorflow==2.1 && \\\n pip3 install --upgrade --quiet cloudml-hypertune\n\nENV PYTHONPATH ${PYTHONPATH}:/babyweight\nENTRYPOINT [\"python3\", \"babyweight/trainer/task.py\"]", "Build and push container image to repo\nNow that we have created our Dockerfile, we need to build and push our container image to our project's container repo. To do this, we'll create a small shell script that we can call from the bash.", "%%writefile babyweight/push_docker.sh\nexport PROJECT_ID=$(gcloud config list project --format \"value(core.project)\")\nexport IMAGE_REPO_NAME=babyweight_training_container\nexport IMAGE_URI=gcr.io/${PROJECT_ID}/${IMAGE_REPO_NAME}\n\necho \"Building $IMAGE_URI\"\ndocker build -f Dockerfile -t ${IMAGE_URI} ./\necho \"Pushing $IMAGE_URI\"\ndocker push ${IMAGE_URI}", "Note: If you get a permissions/stat error when running push_docker.sh from Notebooks, do it from CloudShell:\nOpen CloudShell on the GCP Console\n* git clone https://github.com/GoogleCloudPlatform/training-data-analyst\n* cd training-data-analyst/courses/machine_learning/deepdive2/structured/solutions/babyweight\n* bash push_docker.sh\nThis step takes 5-10 minutes to run.", "%%bash\ncd babyweight\nbash push_docker.sh", "Kindly ignore the incompatibility errors.\nTest container locally\nBefore we submit our training job to Cloud AI Platform, let's make sure our container that we just built and pushed to our project's container repo works perfectly. We can do that by calling our container in bash and passing the necessary user_args for our task.py's parser.", "%%bash\nexport PROJECT_ID=$(gcloud config list project --format \"value(core.project)\")\nexport IMAGE_REPO_NAME=babyweight_training_container\nexport IMAGE_URI=gcr.io/${PROJECT_ID}/${IMAGE_REPO_NAME}\necho \"Running $IMAGE_URI\"\ndocker run ${IMAGE_URI} \\\n --train_data_path=gs://${BUCKET}/babyweight/data/train*.csv \\\n --train_data_path=gs://${BUCKET}/babyweight/data/train*.csv \\\n --eval_data_path=gs://${BUCKET}/babyweight/data/eval*.csv \\\n --output_dir=gs://${BUCKET}/babyweight/trained_model \\\n --batch_size=10 \\\n --num_epochs=10 \\\n --train_examples=1 \\\n --eval_steps=1", "Train on Cloud AI Platform\nOnce the code works in standalone mode, you can run it on Cloud AI Platform. Because this is on the entire dataset, it will take a while. The training run took about <b> two hours </b> for me. You can monitor the job from the GCP console in the Cloud AI Platform section.", "%%bash\nOUTDIR=gs://${BUCKET}/babyweight/trained_model\nJOBID=babyweight_$(date -u +%y%m%d_%H%M%S)\necho ${OUTDIR} ${REGION} ${JOBID}\n# gsutil -m rm -rf ${OUTDIR}\n\nIMAGE=gcr.io/${PROJECT}/babyweight_training_container\n\ngcloud ai-platform jobs submit training ${JOBID} \\\n --staging-bucket=gs://${BUCKET} \\\n --region=${REGION} \\\n --master-image-uri=${IMAGE} \\\n --master-machine-type=n1-standard-4 \\\n --scale-tier=CUSTOM \\\n -- \\\n --train_data_path=gs://${BUCKET}/babyweight/data/train*.csv \\\n --eval_data_path=gs://${BUCKET}/babyweight/data/eval*.csv \\\n --output_dir=${OUTDIR} \\\n --num_epochs=10 \\\n --train_examples=20000 \\\n --eval_steps=100 \\\n --batch_size=32 \\\n --nembeds=8", "When I ran it, I used train_examples=2000000. When training finished, I filtered in the Stackdriver log on the word \"dict\" and saw that the last line was:\n<pre>\nSaving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186\n</pre>\nThe final RMSE was 1.03 pounds.\nHyperparameter tuning\nAll of these are command-line parameters to my program. To do hyperparameter tuning, create hyperparam.yaml and pass it as --config hyperparam.yaml.\nThis step will take <b>up to 2 hours</b> -- you can increase maxParallelTrials or reduce maxTrials to get it done faster. Since maxParallelTrials is the number of initial seeds to start searching from, you don't want it to be too large; otherwise, all you have is a random search.\nNote that this is the same hyperparam.yaml file as above, but included here for convenience.", "%%writefile hyperparam.yaml\ntrainingInput:\n scaleTier: STANDARD_1\n hyperparameters:\n hyperparameterMetricTag: rmse\n goal: MINIMIZE\n maxTrials: 20\n maxParallelTrials: 5\n enableTrialEarlyStopping: True\n params:\n - parameterName: batch_size\n type: INTEGER\n minValue: 8\n maxValue: 512\n scaleType: UNIT_LOG_SCALE\n - parameterName: nembeds\n type: INTEGER\n minValue: 3\n maxValue: 30\n scaleType: UNIT_LINEAR_SCALE\n\n%%bash\nOUTDIR=gs://${BUCKET}/babyweight/hyperparam\nJOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)\necho ${OUTDIR} ${REGION} ${JOBNAME}\ngsutil -m rm -rf ${OUTDIR}\n\nIMAGE=gcr.io/${PROJECT}/babyweight_training_container\n\ngcloud ai-platform jobs submit training ${JOBNAME} \\\n --staging-bucket=gs://${BUCKET} \\\n --region=${REGION} \\\n --master-image-uri=${IMAGE} \\\n --master-machine-type=n1-standard-8 \\\n --scale-tier=CUSTOM \\\n --config=hyperparam.yaml \\\n -- \\\n --train_data_path=gs://${BUCKET}/babyweight/data/train*.csv \\\n --eval_data_path=gs://${BUCKET}/babyweight/data/eval*.csv \\\n --output_dir=${OUTDIR} \\\n --num_epochs=10 \\\n --train_examples=5000 \\\n --eval_steps=100", "Repeat training\nThis time with tuned parameters for batch_size and nembeds. Note that your best results may differ from below. So be sure to fill yours in!", "%%bash\nOUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned\nJOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)\necho ${OUTDIR} ${REGION} ${JOBNAME}\ngsutil -m rm -rf ${OUTDIR}\n\nIMAGE=gcr.io/${PROJECT}/babyweight_training_container\n\ngcloud ai-platform jobs submit training ${JOBNAME} \\\n --staging-bucket=gs://${BUCKET} \\\n --region=${REGION} \\\n --master-image-uri=${IMAGE} \\\n --master-machine-type=n1-standard-4 \\\n --scale-tier=CUSTOM \\\n -- \\\n --train_data_path=gs://${BUCKET}/babyweight/data/train*.csv \\\n --eval_data_path=gs://${BUCKET}/babyweight/data/eval*.csv \\\n --output_dir=${OUTDIR} \\\n --num_epochs=10 \\\n --train_examples=20000 \\\n --eval_steps=100 \\\n --batch_size=32 \\\n --nembeds=8", "Extra Summary:\nIn this lab, we set up the environment, created the trainer module's task.py to hold hyperparameter argparsing code, created the trainer module's model.py to hold Keras model code, built a container to run the trainer package ran the trainer module package locally, submitted a training job to Cloud AI Platform, and submitted a hyperparameter tuning job to Cloud AI Platform.\nCopyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.17/_downloads/99e8a7413b1277c668065b7630324d3b/plot_sensors_time_frequency.ipynb
bsd-3-clause
[ "%matplotlib inline", "Frequency and time-frequency sensors analysis\nThe objective is to show you how to explore the spectral content\nof your data (frequency and time-frequency). Here we'll work on Epochs.\nWe will use the somatosensory dataset that contains so-called\nevent related synchronizations (ERS) / desynchronizations (ERD) in\nthe beta band.", "import numpy as np\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne.time_frequency import tfr_morlet, psd_multitaper\nfrom mne.datasets import somato", "Set parameters", "data_path = somato.data_path()\nraw_fname = data_path + '/MEG/somato/sef_raw_sss.fif'\n\n# Setup for reading the raw data\nraw = mne.io.read_raw_fif(raw_fname)\nevents = mne.find_events(raw, stim_channel='STI 014')\n\n# picks MEG gradiometers\npicks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)\n\n# Construct Epochs\nevent_id, tmin, tmax = 1, -1., 3.\nbaseline = (None, 0)\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6),\n preload=True)\n\nepochs.resample(150., npad='auto') # resample to reduce computation time", "Frequency analysis\nWe start by exploring the frequence content of our epochs.\nLet's first check out all channel types by averaging across epochs.", "epochs.plot_psd(fmin=2., fmax=40.)", "Now let's take a look at the spatial distributions of the PSD.", "epochs.plot_psd_topomap(ch_type='grad', normalize=True)", "Alternatively, you can also create PSDs from Epochs objects with functions\nthat start with psd_ such as\n:func:mne.time_frequency.psd_multitaper and\n:func:mne.time_frequency.psd_welch.", "f, ax = plt.subplots()\npsds, freqs = psd_multitaper(epochs, fmin=2, fmax=40, n_jobs=1)\npsds = 10. * np.log10(psds)\npsds_mean = psds.mean(0).mean(0)\npsds_std = psds.mean(0).std(0)\n\nax.plot(freqs, psds_mean, color='k')\nax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,\n color='k', alpha=.5)\nax.set(title='Multitaper PSD (gradiometers)', xlabel='Frequency',\n ylabel='Power Spectral Density (dB)')\nplt.show()", "Time-frequency analysis: power and inter-trial coherence\nWe now compute time-frequency representations (TFRs) from our Epochs.\nWe'll look at power and inter-trial coherence (ITC).\nTo this we'll use the function :func:mne.time_frequency.tfr_morlet\nbut you can also use :func:mne.time_frequency.tfr_multitaper\nor :func:mne.time_frequency.tfr_stockwell.", "# define frequencies of interest (log-spaced)\nfreqs = np.logspace(*np.log10([6, 35]), num=8)\nn_cycles = freqs / 2. # different number of cycle per frequency\npower, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True,\n return_itc=True, decim=3, n_jobs=1)", "Inspect power\n<div class=\"alert alert-info\"><h4>Note</h4><p>The generated figures are interactive. In the topo you can click\n on an image to visualize the data for one sensor.\n You can also select a portion in the time-frequency plane to\n obtain a topomap for a certain time-frequency region.</p></div>", "power.plot_topo(baseline=(-0.5, 0), mode='logratio', title='Average power')\npower.plot([82], baseline=(-0.5, 0), mode='logratio', title=power.ch_names[82])\n\nfig, axis = plt.subplots(1, 2, figsize=(7, 4))\npower.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=8, fmax=12,\n baseline=(-0.5, 0), mode='logratio', axes=axis[0],\n title='Alpha', show=False)\npower.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=13, fmax=25,\n baseline=(-0.5, 0), mode='logratio', axes=axis[1],\n title='Beta', show=False)\nmne.viz.tight_layout()\nplt.show()", "Joint Plot\nYou can also create a joint plot showing both the aggregated TFR\nacross channels and topomaps at specific times and frequencies to obtain\na quick overview regarding oscillatory effects across time and space.", "power.plot_joint(baseline=(-0.5, 0), mode='mean', tmin=-.5, tmax=2,\n timefreqs=[(.5, 10), (1.3, 8)])", "Inspect ITC", "itc.plot_topo(title='Inter-Trial coherence', vmin=0., vmax=1., cmap='Reds')", "<div class=\"alert alert-info\"><h4>Note</h4><p>Baseline correction can be applied to power or done in plots.\n To illustrate the baseline correction in plots, the next line is\n commented power.apply_baseline(baseline=(-0.5, 0), mode='logratio')</p></div>\n\nExercise\n\nVisualize the inter-trial coherence values as topomaps as done with\n power." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
annarev/tensorflow
tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb
apache-2.0
[ "Copyright 2019 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Text classification with TensorFlow Lite Model Maker\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/lite/tutorials/model_maker_text_classification\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nThe TensorFlow Lite Model Maker library simplifies the process of adapting and converting a TensorFlow model to particular input data when deploying this model for on-device ML applications.\nThis notebook shows an end-to-end example that utilizes the Model Maker library to illustrate the adaptation and conversion of a commonly-used text classification model to classify movie reviews on a mobile device. The text classification model classifies text into predefined categories.The inputs should be preprocessed text and the outputs are the probabilities of the categories. The dataset used in this tutorial are positive and negative movie reviews.\nPrerequisites\nInstall the required packages\nTo run this example, install the required packages, including the Model Maker package from the GitHub repo.\nIf you run this notebook on Colab, you may see an error message about tensorflowjs and tensorflow-hub version imcompatibility. It is safe to ignore this error as we do not use tensorflowjs in this workflow.", "!pip install -q tflite-model-maker", "Import the required packages.", "import numpy as np\nimport os\n\nfrom tflite_model_maker import configs\nfrom tflite_model_maker import ExportFormat\nfrom tflite_model_maker import model_spec\nfrom tflite_model_maker import text_classifier\nfrom tflite_model_maker import TextClassifierDataLoader\n\nimport tensorflow as tf\nassert tf.__version__.startswith('2')\ntf.get_logger().setLevel('ERROR')", "Download the sample training data.\nIn this tutorial, we will use the SST-2 (Stanford Sentiment Treebank) which is one of the tasks in the GLUE benchmark. It contains 67,349 movie reviews for training and 872 movie reviews for testing. The dataset has two classes: positive and negative movie reviews.", "data_dir = tf.keras.utils.get_file(\n fname='SST-2.zip',\n origin='https://dl.fbaipublicfiles.com/glue/data/SST-2.zip',\n extract=True)\ndata_dir = os.path.join(os.path.dirname(data_dir), 'SST-2')", "The SST-2 dataset is stored in TSV format. The only difference between TSV and CSV is that TSV uses a tab \\t character as its delimiter instead of a comma , in the CSV format.\nHere are the first 5 lines of the training dataset. label=0 means negative, label=1 means positive.\n| sentence | label | | | |\n|-------------------------------------------------------------------------------------------|-------|---|---|---|\n| hide new secretions from the parental units | 0 | | | |\n| contains no wit , only labored gags | 0 | | | |\n| that loves its characters and communicates something rather beautiful about human nature | 1 | | | |\n| remains utterly satisfied to remain the same throughout | 0 | | | |\n| on the worst revenge-of-the-nerds clichés the filmmakers could dredge up | 0 | | | |\nNext, we will load the dataset into a Pandas dataframe and change the current label names (0 and 1) to a more human-readable ones (negative and positive) and use them for model training.", "import pandas as pd\n\ndef replace_label(original_file, new_file):\n # Load the original file to pandas. We need to specify the separator as\n # '\\t' as the training data is stored in TSV format\n df = pd.read_csv(original_file, sep='\\t')\n\n # Define how we want to change the label name\n label_map = {0: 'negative', 1: 'positive'}\n\n # Excute the label change\n df.replace({'label': label_map}, inplace=True)\n\n # Write the updated dataset to a new file\n df.to_csv(new_file)\n\n# Replace the label name for both the training and test dataset. Then write the\n# updated CSV dataset to the current folder.\nreplace_label(os.path.join(os.path.join(data_dir, 'train.tsv')), 'train.csv')\nreplace_label(os.path.join(os.path.join(data_dir, 'dev.tsv')), 'dev.csv')", "Quickstart\nThere are five steps to train a text classification model:\nStep 1. Choose a text classification model archiecture.\nHere we use the average word embedding model architecture, which will produce a small and fast model with decent accuracy.", "spec = model_spec.get('average_word_vec')", "Model Maker also supports other model architectures such as BERT. If you are interested to learn about other architecture, see the Choose a model architecture for Text Classifier section below.\nStep 2. Load the training and test data, then preprocess them according to a specific model_spec.\nModel Maker can take input data in the CSV format. We will load the training and test dataset with the human-readable label name that were created earlier.\nEach model architecture requires input data to be processed in a particular way. TextClassifierDataLoader reads the requirement from model_spec and automatically execute the necessary preprocessing.", "train_data = TextClassifierDataLoader.from_csv(\n filename='train.csv',\n text_column='sentence',\n label_column='label',\n model_spec=spec,\n is_training=True)\ntest_data = TextClassifierDataLoader.from_csv(\n filename='dev.csv',\n text_column='sentence',\n label_column='label',\n model_spec=spec,\n is_training=False)", "Step 3. Train the TensorFlow model with the training data.\nThe average word embedding model use batch_size = 32 by default. Therefore you will see that it takes 2104 steps to go through the 67,349 sentences in the training dataset. We will train the model for 10 epochs, which means going through the training dataset 10 times.", "model = text_classifier.create(train_data, model_spec=spec, epochs=10)", "Step 4. Evaluate the model with the test data.\nAfter training the text classification model using the sentences in the training dataset, we will use the remaining 872 sentences in the test dataset to evaluate how the model perform against new data it has never seen before.\nAs the default batch size is 32, it will take 28 steps to go through the 872 sentences in the test dataset.", "loss, acc = model.evaluate(test_data)", "Step 5. Export as a TensorFlow Lite model.\nLet's export the text classification that we have trained in the TensorFlow Lite format. We will specify which folder to export the model.\nYou may see an warning about vocab.txt file does not exist in the metadata but they can be safely ignore.", "model.export(export_dir='average_word_vec')", "You can download the TensorFlow Lite model file using the left sidebar of Colab. Go into the average_word_vec folder as we specified in export_dir parameter above, right-click on the model.tflite file and choose Download to download it to your local computer.\nThis model can be integrated into an Android or an iOS app using the NLClassifier API of the TensorFlow Lite Task Library.\nSee the TFLite Text Classification sample app for more details on how the model is used in an working app.\nNote 1: Android Studio Model Binding does not support text classification yet so please use the TensorFlow Lite Task Library.\nNote 2: There is a model.json file in the same folder with the TFLite model. It contains the JSON representation of the metadata bundled inside the TensorFlow Lite model. Model metadata helps the TFLite Task Library know what the model does and how to pre-process/post-process data for the model. You don't need to download the model.json file as it is only for informational purpose and its content is already inside the TFLite file.\nNote 3: If you train a text classification model using MobileBERT or BERT-Base architecture, you will need to use BertNLClassifier API instead to integrate the trained model into a mobile app.\nThe following sections walk through the example step by step to show more details.\nChoose a model architecture for Text Classifier\nEach model_spec object represents a specific model for the text classifier. TensorFlow Lite Model Maker currently supports MobileBERT, averaging word embeddings and BERT-Base models.\n| Supported Model | Name of model_spec | Model Description | Model size |\n|--------------------------|-------------------------|-----------------------------------------------------------------------------------------------------------------------|---------------------------------------------|\n| Averaging Word Embedding | 'average_word_vec' | Averaging text word embeddings with RELU activation. | <1MB |\n| MobileBERT | 'mobilebert_classifier' | 4.3x smaller and 5.5x faster than BERT-Base while achieving competitive results, suitable for on-device applications. | 25MB w/ quantization <br/> 100MB w/o quantization |\n| BERT-Base | 'bert_classifier' | Standard BERT model that is widely used in NLP tasks. | 300MB |\nIn the quick start, we have used the average word embedding model. Let's switch to MobileBERT to train a model with higher accuracy.", "mb_spec = model_spec.get('mobilebert_classifier')", "Load training data\nYou can upload your own dataset to work through this tutorial. Upload your dataset by using the left sidebar in Colab.\n<img src=\"https://storage.googleapis.com/download.tensorflow.org/models/tflite/screenshots/model_maker_text_classification.png\" alt=\"Upload File\" width=\"800\" hspace=\"100\">\nIf you prefer not to upload your dataset to the cloud, you can also locally run the library by following the guide.\nTo keep it simple, we will reuse the SST-2 dataset downloaded earlier. Let's use the TestClassifierDataLoader.from_csv method to load the data.\nPlease be noted that as we have changed the model architecture, we will need to reload the training and test dataset to apply the new preprocessing logic.", "train_data = TextClassifierDataLoader.from_csv(\n filename='train.csv',\n text_column='sentence',\n label_column='label',\n model_spec=mb_spec,\n is_training=True)\ntest_data = TextClassifierDataLoader.from_csv(\n filename='dev.csv',\n text_column='sentence',\n label_column='label',\n model_spec=mb_spec,\n is_training=False)", "The Model Maker library also supports the from_folder() method to load data. It assumes that the text data of the same class are in the same subdirectory and that the subfolder name is the class name. Each text file contains one movie review sample. The class_labels parameter is used to specify which the subfolders.\nTrain a TensorFlow Model\nTrain a text classification model using the training data.\nNote: As MobileBERT is a complex model, each training epoch will takes about 10 minutes on a Colab GPU. Please make sure that you are using a GPU runtime.", "model = text_classifier.create(train_data, model_spec=mb_spec, epochs=3)", "Examine the detailed model structure.", "model.summary()", "Evaluate the model\nEvaluate the model that we have just trained using the test data and measure the loss and accuracy value.", "loss, acc = model.evaluate(test_data)", "Quantize the model\nIn many on-device ML application, the model size is an important factor. Therefore, it is recommended that you apply quantize the model to make it smaller and potentially run faster. Model Maker automatically applies the recommended quantization scheme for each model architecture but you can customize the quantization config as below.", "config = configs.QuantizationConfig.create_dynamic_range_quantization(optimizations=[tf.lite.Optimize.OPTIMIZE_FOR_LATENCY])\nconfig.experimental_new_quantizer = True", "Export as a TensorFlow Lite model\nConvert the trained model to TensorFlow Lite model format with metadata so that you can later use in an on-device ML application. The label file and the vocab file are embedded in metadata. The default TFLite filename is model.tflite.", "model.export(export_dir='mobilebert/', quantization_config=config)", "The TensorFlow Lite model file can be integrated in a mobile app using the BertNLClassifier API in TensorFlow Lite Task Library. Please note that this is different from the NLClassifier API used to integrate the text classification trained with the average word vector model architecture.\nThe export formats can be one or a list of the following:\n\nExportFormat.TFLITE\nExportFormat.LABEL\nExportFormat.VOCAB\nExportFormat.SAVED_MODEL\n\nBy default, it exports only the TensorFlow Lite model file containing the model metadata. You can also choose to export other files related to the model for better examination. For instance, exporting only the label file and vocab file as follows:", "model.export(export_dir='mobilebert/', export_format=[ExportFormat.LABEL, ExportFormat.VOCAB])", "You can evaluate the TFLite model with evaluate_tflite method to measure its accuracy. Converting the trained TensorFlow model to TFLite format and apply quantization can affect its accuracy so it is recommended to evaluate the TFLite model accuracy before deployment.", "accuracy = model.evaluate_tflite('mobilebert/model.tflite', test_data)\nprint('TFLite model accuracy: ', accuracy)", "Advanced Usage\nThe create function is the driver function that the Model Maker library uses to create models. The model_spec parameter defines the model specification. The AverageWordVecModelSpec and BertClassifierModelSpec classes are currently supported. The create function comprises of the following steps:\n\nCreates the model for the text classifier according to model_spec.\nTrains the classifier model. The default epochs and the default batch size are set by the default_training_epochs and default_batch_size variables in the model_spec object.\n\nThis section covers advanced usage topics like adjusting the model and the training hyperparameters.\nCustomize the MobileBERT model hyperparameters\nThe model parameters you can adjust are:\n\nseq_len: Length of the sequence to feed into the model.\ninitializer_range: The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\ntrainable: Boolean that specifies whether the pre-trained layer is trainable.\n\nThe training pipeline parameters you can adjust are:\n\nmodel_dir: The location of the model checkpoint files. If not set, a temporary directory will be used.\ndropout_rate: The dropout rate.\nlearning_rate: The initial learning rate for the Adam optimizer.\ntpu: TPU address to connect to.\n\nFor instance, you can set the seq_len=256 (default is 128). This allows the model to classify longer text.", "new_model_spec = model_spec.get('mobilebert_classifier')\nnew_model_spec.seq_len = 256", "Customize the average word embedding model hyperparameters\nYou can adjust the model infrastructure like the wordvec_dim and the seq_len variables in the AverageWordVecModelSpec class.\nFor example, you can train the model with a larger value of wordvec_dim. Note that you must construct a new model_spec if you modify the model.", "new_model_spec = model_spec.AverageWordVecModelSpec(wordvec_dim=32)", "Get the preprocessed data.", "new_train_data = TextClassifierDataLoader.from_csv(\n filename='train.csv',\n text_column='sentence',\n label_column='label',\n model_spec=new_model_spec,\n is_training=True)", "Train the new model.", "model = text_classifier.create(new_train_data, model_spec=new_model_spec)", "Tune the training hyperparameters\nYou can also tune the training hyperparameters like epochs and batch_size that affect the model accuracy. For instance,\n\nepochs: more epochs could achieve better accuracy, but may lead to overfitting.\nbatch_size: the number of samples to use in one training step.\n\nFor example, you can train with more epochs.", "model = text_classifier.create(new_train_data, model_spec=new_model_spec, epochs=20)", "Evaluate the newly retrained model with 20 training epochs.", "new_test_data = TextClassifierDataLoader.from_csv(\n filename='dev.csv',\n text_column='sentence',\n label_column='label',\n model_spec=new_model_spec,\n is_training=False)\n\nloss, accuracy = model.evaluate(new_test_data)", "Change the Model Architecture\nYou can change the model by changing the model_spec. The following shows how to change to BERT-Base model.\nChange the model_spec to BERT-Base model for the text classifier.", "spec = model_spec.get('bert_classifier')", "The remaining steps are the same." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
rishuatgithub/MLPy
nlp/UPDATED_NLP_COURSE/01-NLP-Python-Basics/06-NLP-Basics-Assessment.ipynb
apache-2.0
[ "<a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>\n\nNLP Basics Assessment\nFor this assessment we'll be using the short story An Occurrence at Owl Creek Bridge by Ambrose Bierce (1890). <br>The story is in the public domain; the text file was obtained from Project Gutenberg.", "# RUN THIS CELL to perform standard imports:\nimport spacy\nnlp = spacy.load('en_core_web_sm')", "1. Create a Doc object from the file owlcreek.txt<br>\n\nHINT: Use with open('../TextFiles/owlcreek.txt') as f:", "# Enter your code here:\n\nwith open('../TextFiles/owlcreek.txt') as f:\n doc = nlp(f.read())\n\ntype(doc)\n\n# Run this cell to verify it worked:\n\ndoc[:36]", "2. How many tokens are contained in the file?", "len(doc)", "3. How many sentences are contained in the file?<br>HINT: You'll want to build a list first!", "s = [sent for sent in doc.sents]\nlen(s)", "4. Print the second sentence in the document<br> HINT: Indexing starts at zero, and the title counts as the first sentence.", "print(s[2].text)", "5. For each token in the sentence above, print its text, POS tag, dep tag and lemma<br>\nCHALLENGE: Have values line up in columns in the print output.", "# NORMAL SOLUTION:\n\nfor tokens in s[2]:\n print(tokens,tokens.pos_,tokens.dep_, tokens.lemma_)\n\n# CHALLENGE SOLUTION:\n\nfor token in s[2]:\n print(f'{token.text:{15}} {token.pos_:{5}} {token.dep_:{10}} {token.lemma_:{15}}')", "6. Write a matcher called 'Swimming' that finds both occurrences of the phrase \"swimming vigorously\" in the text<br>\nHINT: You should include an 'IS_SPACE': True pattern between the two words!", "# Import the Matcher library:\n\nfrom spacy.matcher import Matcher\nmatcher = Matcher(nlp.vocab)\n\n# Create a pattern and add it to matcher:\n\npattern = [{'LOWER':'swimming'},{'IS_SPACE':True},{'LOWER':'vigorously'}]\n\nmatcher.add('swimmer',None,pattern)\n\n\n# Create a list of matches called \"found_matches\" and print the list:\n\nfound_matches = matcher(doc)\n\nprint(found_matches)\n", "7. Print the text surrounding each found match", "print(doc[1265:1290])\n\nprint(doc[3600:3617])", "EXTRA CREDIT:<br>Print the sentence that contains each found match", "sents = [sent for sent in doc.sents]\n\nfor s in sents:\n if found_matches[0][1] < s.end:\n print(s)\n break\n\n\nfor s in sents:\n if found_matches[1][1] < s.end:\n print(s)\n break\n\n\n", "Great Job!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
GoogleCloudPlatform/vertex-ai-samples
notebooks/community/ml_ops/stage6/get_started_with_matching_engine.ipynb
apache-2.0
[ "# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Notebook is a revised version of notebook from Sara Robinson and Ivan Chueng\nE2E ML on GCP: MLOps stage 6 : serving: get started with Vertex AI Matching Engine\n<table align=\"left\">\n\n <td>\n <a href=\"https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage6/get_started_with_matching_engine.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/github-logo-32px.png\" alt=\"GitHub logo\">\n View on GitHub\n </a>\n </td>\n <td>\n <a href=\"https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/communityml_ops/stage6/get_started_with_matching_engine.ipynbb\">\n <img src=\"https://cloud.google.com/ml-engine/images/colab-logo-32px.png\\\" alt=\"Colab logo\"> Run in Colab\n </a>\n </td>\n <td>\n <a href=\"https://console.cloud.google.com/vertex-ai/workbench/deploy-notebook?download_url=https://raw.githubusercontent.com/GoogleCloudPlatform/vertex-ai-samples/main/notebooks/community/ml_ops/stage6/get_started_with_matching_engine.ipynbb\">\n <img src=\"https://lh3.googleusercontent.com/UiNooY4LUgW_oTvpsNhPpQzsstV5W8F7rYgxgGBD85cWJoLmrOzhVs_ksK_vgx40SHs7jCqkTkCk=e14-rj-sc0xffffff-h130-w32\" alt=\"Vertex AI logo\">\n Run in Vertex Workbench\n </a>\n </td>\n</table>\n\nOverview\nThis tutorial demonstrates how to use Vertex AI Matching Engine service. This Cloud AI service is a appropriamate nearest neighbor (ANN) index and matching service for vectors (i.e., embeddings), with high scaling and low latency.\nthe GCP ANN Service. The service is built upon Approximate Nearest Neighbor (ANN) technology developed by Google Research.\nThere are several levels of using this service.\nno code\nDemomstrated in this tutorial. The user brings their own embeddings for indexing and querying.\nlow code\nThe user constructs embeddings using a Vertex AI pre-built algorithm: Swivel and TwoTowers.\nhigh code\nThe user configures the serving binary how to generate embeddings from the model, indexing and querying, using Vertex AI Explanations by Examples\nLearn more about Vertex AI Matching Engine\nEmbeddings\nThe prebuilt embeddings used for this tutorial is the GloVe dataset.\n\"GloVe is an unsupervised learning algorithm for obtaining vector representations for words. Training is performed on aggregated global word-word co-occurrence statistics from a corpus, and the resulting representations showcase interesting linear substructures of the word vector space.\"\n\nObjective\nIn this notebook, you will learn how to create Approximate Nearest Neighbor (ANN) Index, query against indexes. \nThe steps performed include:\n\nCreate ANN Index.\nCreate an IndexEndpoint with VPC Network\nDeploy ANN Index\nPerform online query\nDeploy brute force Index.\nPerform calibration between ANN and brute force index.\n\nCosts\nThis tutorial uses billable components of Google Cloud:\n\nVertex AI\nCloud Storage\n\nLearn about Vertex AI\npricing and Cloud Storage\npricing, and use the Pricing\nCalculator\nto generate a cost estimate based on your projected usage.\nInstallation\nInstall the packages required for executing this notebook.", "import os\n\n# The Vertex AI Workbench Notebook product has specific requirements\nIS_WORKBENCH_NOTEBOOK = os.getenv(\"DL_ANACONDA_HOME\")\nIS_USER_MANAGED_WORKBENCH_NOTEBOOK = os.path.exists(\n \"/opt/deeplearning/metadata/env_version\"\n)\n\n# Vertex AI Notebook requires dependencies to be installed with '--user'\nUSER_FLAG = \"\"\nif IS_WORKBENCH_NOTEBOOK:\n USER_FLAG = \"--user\"\n\n! pip3 install --upgrade google-cloud-aiplatform {USER_FLAG} -q\n! pip3 install -U grpcio-tools {USER_FLAG} -q\n! pip3 install -U h5py {USER_FLAG} -q", "Restart the kernel\nAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.", "# Automatically restart kernel after installs\nimport os\n\nif not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)", "Before you begin\nGPU runtime\nMake sure you're running this notebook in a GPU runtime if you have that option. In Colab, select Runtime > Change Runtime Type > GPU\nSet up your Google Cloud project\nThe following steps are required, regardless of your notebook environment.\n\n\nSelect or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.\n\n\nMake sure that billing is enabled for your project.\n\n\nEnable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.\n\n\nEnable the Service Networking API.\n\n\nEnable the Cloud DNS API.\n\n\nIf you are running this notebook locally, you will need to install the Cloud SDK.\n\n\nEnter your project ID in the cell below. Then run the cell to make sure the\nCloud SDK uses the right project for all the commands in this notebook.\n\n\nNote: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $.\nSet your project ID\nIf you don't know your project ID, you may be able to get your project ID using gcloud.", "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n\nif PROJECT_ID == \"\" or PROJECT_ID is None or PROJECT_ID == \"[your-project-id]\":\n # Get your GCP project id from gcloud\n shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID:\", PROJECT_ID)\n\n! gcloud config set project $PROJECT_ID", "Get your project number\nNow that the project ID is set, you get your corresponding project number.", "shell_output = ! gcloud projects list --filter=\"PROJECT_ID:'{PROJECT_ID}'\" --format='value(PROJECT_NUMBER)'\nPROJECT_NUMBER = shell_output[0]\nprint(\"Project Number:\", PROJECT_NUMBER)", "Region\nYou can also change the REGION variable, which is used for operations\nthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.\n\nAmericas: us-central1\nEurope: europe-west4\nAsia Pacific: asia-east1\n\nYou may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.\nLearn more about Vertex AI regions.", "REGION = \"[your-region]\" # @param {type: \"string\"}\n\nif REGION == \"[your-region]\":\n REGION = \"us-central1\"", "Timestamp\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.", "from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")", "Authenticate your Google Cloud account\nIf you are using Vertex AI Workbench Notebooks, your environment is already authenticated. Skip this step.\nIf you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.\nOtherwise, follow these steps:\nIn the Cloud Console, go to the Create service account key page.\nClick Create service account.\nIn the Service account name field, enter a name, and click Create.\nIn the Grant this service account access to project section, click the Role drop-down list. Type \"Vertex\" into the filter box, and select Vertex Administrator. Type \"Storage Object Admin\" into the filter box, and select Storage Object Admin.\nClick Create. A JSON file that contains your key downloads to your local environment.\nEnter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.", "# If you are running this notebook in Colab, run this cell and follow the\n# instructions to authenticate your GCP account. This provides access to your\n# Cloud Storage bucket and lets you submit training jobs and prediction\n# requests.\n\nimport os\nimport sys\n\n# If on Vertex AI Workbench, then don't execute this code\nIS_COLAB = False\nif not os.path.exists(\"/opt/deeplearning/metadata/env_version\") and not os.getenv(\n \"DL_ANACONDA_HOME\"\n):\n if \"google.colab\" in sys.modules:\n IS_COLAB = True\n from google.colab import auth as google_auth\n\n google_auth.authenticate_user()\n\n # If you are running this notebook locally, replace the string below with the\n # path to your service account key and run this cell to authenticate your GCP\n # account.\n elif not os.getenv(\"IS_TESTING\"):\n %env GOOGLE_APPLICATION_CREDENTIALS ''", "Create a Cloud Storage bucket\nThe following steps are required, regardless of your notebook environment.\nWhen you initialize the Vertex AI SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.\nSet the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.", "BUCKET_NAME = \"[your-bucket-name]\" # @param {type:\"string\"}\nBUCKET_URI = f\"gs://{BUCKET_NAME}\"\n\nif BUCKET_URI == \"\" or BUCKET_URI is None or BUCKET_URI == \"gs://[your-bucket-name]\":\n BUCKET_NAME = PROJECT_ID + \"aip-\" + TIMESTAMP\n BUCKET_URI = \"gs://\" + BUCKET_NAME", "Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.", "! gsutil mb -l $REGION $BUCKET_URI", "Finally, validate access to your Cloud Storage bucket by examining its contents:", "! gsutil ls -al $BUCKET_URI", "Set up variables\nNext, set up some variables used throughout the tutorial.\nImport libraries and define constants", "import google.cloud.aiplatform as aiplatform\nimport h5py", "Initialize Vertex AI SDK for Python\nInitialize the Vertex AI SDK for Python for your project and corresponding bucket.", "aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_URI)", "Download and prepare the prebuilt GloVe embeddings\nThe GloVe embeddings consists of a set of pre-trained embeddings. The embeddings are split into a \"train\" and \"test\" splits.\nYou create a Vertex AI Matching Engine index from the \"train\" split, and use the embedding vectors in the \"test\" split as query vectors to test the index.\nNote: While the data split uses the term \"train\", these are pre-trained embeddings and thus are ready to be indexed for search. The terms \"train\" and \"test\" split are used just to be consistent with usual machine learning terminology.", "! gsutil cp gs://cloud-samples-data/vertex-ai/matching_engine/glove-100-angular.hdf5 .", "Load the embeddings into memory\nLoad the GloVe embeddings into memory from a HDF5 storage format.", "h5 = h5py.File(\"glove-100-angular.hdf5\", \"r\")\ntrain = h5[\"train\"]\ntest = h5[\"test\"]\nprint(train)", "Save the train split in JSONL format\nNext, you store the embeddings from the train split as a JSONL formatted file. Each embedding is stored as:\n{ 'id': .., 'embedding': [ ... ] }\n\nThe format of the embeddings for the index can be in either CSV, JSON, or Avro format.\nLearn more about Embedding Formats for Indexing", "with open(\"glove100.json\", \"w\") as f:\n for i in range(len(train)):\n f.write('{\"id\":\"' + str(i) + '\",')\n f.write('\"embedding\":[' + \",\".join(str(x) for x in train[i]) + \"]}\")\n f.write(\"\\n\")", "Store the JSONL formatted embeddings in Cloud Storage\nNext, you upload the training data to your Cloud Storage bucket.", "EMBEDDINGS_INITIAL_URI = f\"{BUCKET_URI}/matching_engine/initial/\"\n! gsutil cp glove100.json {EMBEDDINGS_INITIAL_URI}", "Create Matching Engine Index\nNext, you create the index for your embeddings. Currently, two indexing algorithms are supported:\n\ncreate_tree_ah_index(): Shallow tree + Asymmetric hashing.\ncreate_brute_force_index(): Linear search.\n\nIn this tutorial, you use the create_tree_ah_index()for production scale. The method is called with the following parameters:\n\ndisplay_name: A human readable name for the index.\ncontents_delta_uri: A Cloud Storage location for the embeddings, which are either to be inserted, updated or deleted.\ndimensions: The number of dimensions of the input vector\napproximate_neighbors_count: (for Tree AH) The default number of neighbors to find via approximate search before exact reordering is performed. Exact reordering is a procedure where results returned by an approximate search algorithm are reordered via a more expensive distance computation.\ndistance_measure_type: The distance measure used in nearest neighbor search.\nSQUARED_L2_DISTANCE: Euclidean (L2) Distance\nL1_DISTANCE: Manhattan (L1) Distance\nCOSINE_DISTANCE: Cosine Distance. Defined as 1 - cosine similarity.\nDOT_PRODUCT_DISTANCE: Default value. Defined as a negative of the dot product.\n\n\ndescription: A human readble description of the index.\nlabels: User metadata in the form of a dictionary.\nleaf_node_embedding_count: Number of embeddings on each leaf node. The default value is 1000 if not set.\nleaf_nodes_to_search_percent: The default percentage of leaf nodes that any query may be searched. Must be in range 1-100, inclusive. The default value is 10 (means 10%) if not set.\n\nThis may take upto 30 minutes.\nLearn more about Configuring Matching Engine Indexes.", "DIMENSIONS = 100\nDISPLAY_NAME = \"glove_100_1\"\n\ntree_ah_index = aiplatform.MatchingEngineIndex.create_tree_ah_index(\n display_name=DISPLAY_NAME,\n contents_delta_uri=EMBEDDINGS_INITIAL_URI,\n dimensions=DIMENSIONS,\n approximate_neighbors_count=150,\n distance_measure_type=\"DOT_PRODUCT_DISTANCE\",\n description=\"Glove 100 ANN index\",\n labels={\"label_name\": \"label_value\"},\n # TreeAH specific parameters\n leaf_node_embedding_count=500,\n leaf_nodes_to_search_percent=7,\n)\n\nINDEX_RESOURCE_NAME = tree_ah_index.resource_name\nprint(INDEX_RESOURCE_NAME)", "Update the Index\nNext, you update the index with a new embedding -- i.e., insertion.\nCreate update delta file\nFirst, you make a JSONL file with the embeddings to update. You use synthetic data -- in this case, all zeros -- for existing embedding with id of 0. You then upload the JSONL file to a Cloud Storage location.", "with open(\"glove100_incremental.json\", \"w\") as f:\n f.write(\n '{\"id\":\"0\",\"embedding\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}\\n'\n )\n\nEMBEDDINGS_UPDATE_URI = f\"{BUCKET_URI}/matching-engine/incremental/\"\n\n! gsutil cp glove100_incremental.json {EMBEDDINGS_UPDATE_URI}", "Update the index\nNext, you use the method update_embeddings() to incrementally update the index, with the following parameters:\n\ncontents_delta_uri: A Cloud Storage location for the embeddings, which are either to be inserted or updated.\n\nOptionally, the parameter is_complete_overwrite will replace the entire index.", "tree_ah_index = tree_ah_index.update_embeddings(\n contents_delta_uri=EMBEDDINGS_UPDATE_URI,\n)\n\nINDEX_RESOURCE_NAME = tree_ah_index.resource_name\nprint(INDEX_RESOURCE_NAME)", "Setup VPC peering network\nTo use a Matching Engine Index, you setup a VPC peering network between your project and the Vertex AI Matching Engine service project. This eliminates additional hops in network traffic and allows using efficient gRPC protocol.\nLearn more about VPC peering.\nIMPORTANT: you can only setup one VPC peering to servicenetworking.googleapis.com per project.\nCreate VPC peering for default network\nFor simplicity, we setup VPC peering to the default network. You can create a different network for your project.\nIf you setup VPC peering with any other network, make sure that the network already exists and that your VM is running on that network.", "# This is for display only; you can name the range anything.\nPEERING_RANGE_NAME = \"vertex-ai-prediction-peering-range\"\nNETWORK = \"default\"\n\n# NOTE: `prefix-length=16` means a CIDR block with mask /16 will be\n# reserved for use by Google services, such as Vertex AI.\n! gcloud compute addresses create $PEERING_RANGE_NAME \\\n --global \\\n --prefix-length=16 \\\n --description=\"peering range for Google service\" \\\n --network=$NETWORK \\\n --purpose=VPC_PEERING", "Create the VPC connection\nNext, create the connection for VPC peering.\nNote: If you get a PERMISSION DENIED, you may not have the neccessary role 'Compute Network Admin' set for your default service account. In the Cloud Console, do the following steps.\n\nGoto IAM &amp; Admin\nFind your service account.\nClick edit icon.\nSelect Add Another Role.\nEnter 'Compute Network Admin'.\nSelect Save", "! gcloud services vpc-peerings connect \\\n --service=servicenetworking.googleapis.com \\\n --network=$NETWORK \\\n --ranges=$PEERING_RANGE_NAME \\\n --project=$PROJECT_ID", "Check the status of your peering connections.", "! gcloud compute networks peerings list --network $NETWORK", "Construct the full network name\nYou need to have the full network resource name when you subsequently create an Matching Engine Index Endpoint resource for VPC peering.", "full_network_name = f\"projects/{PROJECT_NUMBER}/global/networks/{NETWORK}\"", "Create an IndexEndpoint with VPC Network\nNext, you create a Matching Engine Index Endpoint, similar to the concept of creating a Private Endpoint for prediction with a peer-to-peer network.\nTo create the Index Endpoint resource, you call the method create() with the following parameters:\n\ndisplay_name: A human readable name for the Index Endpoint.\ndescription: A description for the Index Endpoint.\nnetwork: The VPC network resource name.", "index_endpoint = aiplatform.MatchingEngineIndexEndpoint.create(\n display_name=\"index_endpoint_for_demo\",\n description=\"index endpoint description\",\n network=full_network_name,\n)\n\nINDEX_ENDPOINT_NAME = index_endpoint.resource_name\nprint(INDEX_ENDPOINT_NAME)", "Deploy the Matching Engine Index to the Index Endpoint resource\nNext, deploy your index to the Index Endpoint using the method deploy_index() with the following parameters:\n\ndisplay_name: A human readable name for the deployed index.\nindex: Your index.\ndeployed_index_id: A user assigned identifier for the deployed index.\nmachine_type: (optional) The VM instance type.\nmin_replica_count: (optional) Minimum number of VM instances for auto-scaling.\nmax_replica_count: (optional) Maximum number of VM instances for auto-scaling.\n\nLearn more about Machine resources for Index Endpoint", "DEPLOYED_INDEX_ID = \"tree_ah_glove_deployed_\" + TIMESTAMP\n\nMIN_NODES = 1\nMAX_NODES = 2\nDEPLOY_COMPUTE = \"n1-standard-16\"\n\nindex_endpoint.deploy_index(\n display_name=\"deployed_index_for_demo\",\n index=tree_ah_index,\n deployed_index_id=DEPLOYED_INDEX_ID,\n # machine_type=DEPLOY_COMPUTE,\n min_replica_count=MIN_NODES,\n max_replica_count=MAX_NODES,\n)\n\nprint(index_endpoint.deployed_indexes)", "Create and execute an online query\nNow that your index is deployed, you can make queries.\nFirst, you construct a vector query using synthetic data, to use as the example to return matches for.\nNext, you make the matching request using the method match(), with the following parameters:\n\ndeployed_index_id: The identifier of the deployed index.\nqueries: A list of queries (instances).\nnum_neighbors: The number of closest matches to return.", "# The number of nearest neighbors to be retrieved from database for each query.\nNUM_NEIGHBOURS = 10\n\n# Test query\nqueries = [\n [\n -0.11333,\n 0.48402,\n 0.090771,\n -0.22439,\n 0.034206,\n -0.55831,\n 0.041849,\n -0.53573,\n 0.18809,\n -0.58722,\n 0.015313,\n -0.014555,\n 0.80842,\n -0.038519,\n 0.75348,\n 0.70502,\n -0.17863,\n 0.3222,\n 0.67575,\n 0.67198,\n 0.26044,\n 0.4187,\n -0.34122,\n 0.2286,\n -0.53529,\n 1.2582,\n -0.091543,\n 0.19716,\n -0.037454,\n -0.3336,\n 0.31399,\n 0.36488,\n 0.71263,\n 0.1307,\n -0.24654,\n -0.52445,\n -0.036091,\n 0.55068,\n 0.10017,\n 0.48095,\n 0.71104,\n -0.053462,\n 0.22325,\n 0.30917,\n -0.39926,\n 0.036634,\n -0.35431,\n -0.42795,\n 0.46444,\n 0.25586,\n 0.68257,\n -0.20821,\n 0.38433,\n 0.055773,\n -0.2539,\n -0.20804,\n 0.52522,\n -0.11399,\n -0.3253,\n -0.44104,\n 0.17528,\n 0.62255,\n 0.50237,\n -0.7607,\n -0.071786,\n 0.0080131,\n -0.13286,\n 0.50097,\n 0.18824,\n -0.54722,\n -0.42664,\n 0.4292,\n 0.14877,\n -0.0072514,\n -0.16484,\n -0.059798,\n 0.9895,\n -0.61738,\n 0.054169,\n 0.48424,\n -0.35084,\n -0.27053,\n 0.37829,\n 0.11503,\n -0.39613,\n 0.24266,\n 0.39147,\n -0.075256,\n 0.65093,\n -0.20822,\n -0.17456,\n 0.53571,\n -0.16537,\n 0.13582,\n -0.56016,\n 0.016964,\n 0.1277,\n 0.94071,\n -0.22608,\n -0.021106,\n ],\n [\n -0.99544,\n -2.3651,\n -0.24332,\n -1.0321,\n 0.42052,\n -1.1817,\n -0.16451,\n -1.683,\n 0.49673,\n -0.27258,\n -0.025397,\n 0.34188,\n 1.5523,\n 1.3532,\n 0.33297,\n -0.0056677,\n -0.76525,\n 0.49587,\n 1.2211,\n 0.83394,\n -0.20031,\n -0.59657,\n 0.38485,\n -0.23487,\n -1.0725,\n 0.95856,\n 0.16161,\n -1.2496,\n 1.6751,\n 0.73899,\n 0.051347,\n -0.42702,\n 0.16257,\n -0.16772,\n 0.40146,\n 0.29837,\n 0.96204,\n -0.36232,\n -0.47848,\n 0.78278,\n 0.14834,\n 1.3407,\n 0.47834,\n -0.39083,\n -1.037,\n -0.24643,\n -0.75841,\n 0.7669,\n -0.37363,\n 0.52741,\n 0.018563,\n -0.51301,\n 0.97674,\n 0.55232,\n 1.1584,\n 0.73715,\n 1.3055,\n -0.44743,\n -0.15961,\n 0.85006,\n -0.34092,\n -0.67667,\n 0.2317,\n 1.5582,\n 1.2308,\n -0.62213,\n -0.032801,\n 0.1206,\n -0.25899,\n -0.02756,\n -0.52814,\n -0.93523,\n 0.58434,\n -0.24799,\n 0.37692,\n 0.86527,\n 0.069626,\n 1.3096,\n 0.29975,\n -1.3651,\n -0.32048,\n -0.13741,\n 0.33329,\n -1.9113,\n -0.60222,\n -0.23921,\n 0.12664,\n -0.47961,\n -0.89531,\n 0.62054,\n 0.40869,\n -0.08503,\n 0.6413,\n -0.84044,\n -0.74325,\n -0.19426,\n 0.098722,\n 0.32648,\n -0.67621,\n -0.62692,\n ],\n]\n\nmatches = index_endpoint.match(\n deployed_index_id=DEPLOYED_INDEX_ID, queries=queries, num_neighbors=NUM_NEIGHBOURS\n)\n\nfor instance in matches:\n print(\"INSTANCE\")\n for match in instance:\n print(match)", "Create brute force index for calibration\nThe brute force index uses a naive brute force method to find the nearest neighbors. This method uses a linear search and thus not efficient for large scale indexes. We recommend using the brute force index for calibrating the approximate nearest neighbor (ANN) index for recall, or for mission critical matches.\nCreate the brute force index\nNow create the brute force index using the method create_brute_force_index().\nTo ensure an apples to apples comparison, the distanceMeasureType and featureNormType, dimensions of the brute force index should match those of the production indices being tuned.", "brute_force_index = aiplatform.MatchingEngineIndex.create_brute_force_index(\n display_name=DISPLAY_NAME,\n contents_delta_uri=EMBEDDINGS_INITIAL_URI,\n dimensions=DIMENSIONS,\n distance_measure_type=\"DOT_PRODUCT_DISTANCE\",\n description=\"Glove 100 index (brute force)\",\n labels={\"label_name\": \"label_value\"},\n)\n\nINDEX_BRUTE_FORCE_RESOURCE_NAME = brute_force_index.resource_name\nprint(INDEX_BRUTE_FORCE_RESOURCE_NAME)", "Update the index\nFor apples to apples comparison, you perform the same incremental update to the brute force index as you did for the Tree AH index.", "brute_force_index = tree_ah_index.update_embeddings(\n contents_delta_uri=EMBEDDINGS_UPDATE_URI\n)", "Deploy the brute force index to the IndexEndpoint resource\nNext, you deploy the brute force index to the same IndexEndpoint.\nNote: You can deploy multiple indexes to the same Index Endpoint resource.", "DEPLOYED_BRUTE_FORCE_INDEX_ID = \"glove_brute_force_deployed_\" + TIMESTAMP\n\nindex_endpoint.deploy_index(\n index=brute_force_index, deployed_index_id=DEPLOYED_BRUTE_FORCE_INDEX_ID\n)", "Calibration\nNow your ready to do calibration. The production version of the index uses an approxiamation method, which means it may have less than perfect recall when compared to the slower exact match (brute force) method.\nGet test results for both indexes\nFirst, using the GloVe test embeddings, you make the identical request to both indexes.", "prod_matches = index_endpoint.match(\n deployed_index_id=DEPLOYED_INDEX_ID,\n queries=list(test),\n num_neighbors=NUM_NEIGHBOURS,\n)\n\nexact_matches = index_endpoint.match(\n deployed_index_id=DEPLOYED_BRUTE_FORCE_INDEX_ID,\n queries=list(test),\n num_neighbors=NUM_NEIGHBOURS,\n)", "Compute Recall\nFinally, you determine from the results the percentage of exact matches are recalled from the production index. You can subsequently use this information to tune the deployment of the production index.", "# Calculate recall by determining how many neighbors were correctly retrieved as compared to the brute-force option.\ncorrect_neighbors = 0\nfor tree_ah_neighbors, brute_force_neighbors in zip(prod_matches, exact_matches):\n tree_ah_neighbor_ids = [neighbor.id for neighbor in tree_ah_neighbors]\n brute_force_neighbor_ids = [neighbor.id for neighbor in brute_force_neighbors]\n\n correct_neighbors += len(\n set(tree_ah_neighbor_ids).intersection(brute_force_neighbor_ids)\n )\n\nrecall = correct_neighbors / (len(test) * NUM_NEIGHBOURS)\n\nprint(\"Recall: {}\".format(recall))", "Cleaning up\nTo clean up all Google Cloud resources used in this project, you can delete the Google Cloud\nproject you used for the tutorial.\nYou can also manually delete resources that you created by running the following code.", "# Force undeployment of indexes and delete endpoint\ntry:\n index_endpoint.delete(force=True)\nexcept Exception as e:\n print(e)\n\n# Delete indexes\ntry:\n tree_ah_index.delete()\n brute_force_index.delete()\nexcept Exception as e:\n print(e)\n\ndelete_bucket = False\nif delete_bucket or os.getenv(\"IS_TESTING\"):\n ! gsutil rm -rf {BUCKET_URI}" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mne-tools/mne-tools.github.io
stable/_downloads/2d6a273bc3ded0e873d80d802edc2b7e/70_report.ipynb
bsd-3-clause
[ "%matplotlib inline", "Getting started with mne.Report\n:class:mne.Report is a way to create interactive HTML summaries of your data.\nThese reports can show many different visualizations for one or multiple\nparticipants. A common use case is creating diagnostic summaries to check data\nquality at different stages in the processing pipeline. The report can show\nthings like plots of data before and after each preprocessing step, epoch\nrejection statistics, MRI slices with overlaid BEM shells, all the way up to\nplots of estimated cortical activity.\nCompared to a Jupyter notebook, :class:mne.Report is easier to deploy (the\nHTML pages it generates are self-contained and do not require a running Python\nenvironment) but less flexible (you can't change code and re-run something\ndirectly within the browser). This tutorial covers the basics of building a\n:class:~mne.Report. As usual, we'll start by importing the modules and data\nwe need:", "from pathlib import Path\nimport tempfile\nimport numpy as np\nimport scipy.ndimage\nimport matplotlib.pyplot as plt\nimport mne\n\ndata_path = Path(mne.datasets.sample.data_path(verbose=False))\nsample_dir = data_path / 'MEG' / 'sample'\nsubjects_dir = data_path / 'subjects'", "Before getting started with :class:mne.Report, make sure the files you want\nto render follow the filename conventions defined by MNE:\n.. cssclass:: table-bordered\n.. rst-class:: midvalign\n=================================== =========================================\nData object Filename convention (ends with)\n=================================== =========================================\n~mne.io.Raw -raw.fif(.gz), -raw_sss.fif(.gz),\n -raw_tsss.fif(.gz),\n _meg.fif(.gz), _eeg.fif(.gz),\n _ieeg.fif(.gz)\nevents -eve.fif(.gz)\n~mne.Epochs -epo.fif(.gz)\n~mne.Evoked -ave.fif(.gz)\n~mne.Covariance -cov.fif(.gz)\n~mne.Projection -proj.fif(.gz)\n~mne.transforms.Transform -trans.fif(.gz)\n~mne.Forward -fwd.fif(.gz)\n~mne.minimum_norm.InverseOperator -inv.fif(.gz)\n=================================== =========================================\nAlternatively, the dash - in the filename may be replaced with an\nunderscore _.\nThe basic process for creating an HTML report is to instantiate the\n:class:~mne.Report class and then use one or more of its many methods to\nadd content, one element at a time.\nYou may also use the :meth:~mne.Report.parse_folder method to select\nparticular files to include in the report. But more on that later.\n.. sidebar: Viewing the report\nOn successful creation of the report, the :meth:~mne.Report.save method\n will open the HTML in a new tab in your browser. To disable this, use the\n open_browser=False parameter of :meth:~mne.Report.save.\nAdding ~mne.io.Raw data\nRaw data can be added via the :meth:mne.Report.add_raw method. It can\noperate with a path to a raw file and ~mne.io.Raw objects, and will\nproduce – among other output – a slider that allows you to scrub through 10\nequally-spaced 1-second segments of the data:\n<div class=\"alert alert-danger\"><h4>Warning</h4><p>In the following example, we crop the raw data to 60 seconds merely to\n speed up processing; this is not usually recommended!</p></div>", "raw_path = sample_dir / 'sample_audvis_filt-0-40_raw.fif'\nraw = mne.io.read_raw(raw_path)\nraw.pick_types(eeg=True, eog=True, stim=True).crop(tmax=60).load_data()\n\nreport = mne.Report(title='Raw example')\n# This method also accepts a path, e.g., raw=raw_path\nreport.add_raw(raw=raw, title='Raw', psd=False) # omit PSD plot\nreport.save('report_raw.html', overwrite=True)", "Adding events\nEvents can be added via :meth:mne.Report.add_events. You also need to\nsupply the sampling frequency used during the recording; this information\nis used to generate a meaningful time axis.", "events_path = sample_dir / 'sample_audvis_filt-0-40_raw-eve.fif'\nevents = mne.find_events(raw=raw)\nsfreq = raw.info['sfreq']\n\nreport = mne.Report(title='Events example')\nreport.add_events(events=events_path, title='Events from Path', sfreq=sfreq)\nreport.add_events(events=events, title='Events from \"events\"', sfreq=sfreq)\nreport.save('report_events.html', overwrite=True)", "Adding ~mne.Epochs\nEpochs can be added via :meth:mne.Report.add_epochs. Note that although\nthis method accepts a path to an epochs file too, in the following example\nwe only add epochs that we create on the fly from raw data. To demonstrate\nthe representation of epochs metadata, we'll add some of that too.", "event_id = {\n 'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,\n 'visual/right': 4, 'face': 5, 'buttonpress': 32\n}\n\nmetadata, _, _ = mne.epochs.make_metadata(\n events=events,\n event_id=event_id,\n tmin=-0.2,\n tmax=0.5,\n sfreq=raw.info['sfreq']\n)\nepochs = mne.Epochs(\n raw=raw, events=events, event_id=event_id, metadata=metadata\n)\n\nreport = mne.Report(title='Epochs example')\nreport.add_epochs(epochs=epochs, title='Epochs from \"epochs\"')\nreport.save('report_epochs.html', overwrite=True)", "Adding ~mne.Evoked\nEvoked data can be added via :meth:mne.Report.add_evokeds. By default, the\nEvoked.comment attribute of each evoked will be used as a title. We can\nspecify custom titles via the titles parameter. Again, this method\nalso accepts the path to an evoked file stored on disk; in the following\nexample, however, we load the evokeds manually first, since we only want to\nadd a subset of them to the report. The evokeds are not baseline-corrected,\nso we apply baseline correction, too. Lastly, by providing an (optional)\nnoise covariance, we can add plots evokeds that were \"whitened\" using this\ncovariance matrix.\nBy default, this method will produce snapshots at 21 equally-spaced time\npoints (or fewer, if the data contains fewer time points). We can adjust this\nvia the n_time_points parameter.", "evoked_path = sample_dir / 'sample_audvis-ave.fif'\ncov_path = sample_dir / 'sample_audvis-cov.fif'\n\nevokeds = mne.read_evokeds(evoked_path, baseline=(None, 0))\nevokeds_subset = evokeds[:2] # The first two\nfor evoked in evokeds_subset:\n evoked.pick('eeg') # just for speed of plotting\n\nreport = mne.Report(title='Evoked example')\nreport.add_evokeds(\n evokeds=evokeds_subset,\n titles=['evoked 1', # Manually specify titles\n 'evoked 2'],\n noise_cov=cov_path,\n n_time_points=5\n)\nreport.save('report_evoked.html', overwrite=True)", "Adding ~mne.Covariance\n(Noise) covariance objects can be added via\n:meth:mne.Report.add_covariance. The method accepts ~mne.Covariance\nobjects and the path to a file on disk. It also expects us to pass an\n~mne.Info object or the path to a file to read the measurement info from,\nas well as a title.", "cov_path = sample_dir / 'sample_audvis-cov.fif'\n\nreport = mne.Report(title='Covariance example')\nreport.add_covariance(cov=cov_path, info=raw_path, title='Covariance')\nreport.save('report_cov.html', overwrite=True)", "Adding ~mne.Projection vectors\n~mne.Projection vectors can be added via\n:meth:mne.Report.add_projs. The method requires an ~mne.Info object\n(or the path to one) and a title. Projectors found in the ~mne.Info will\nbe visualized. You may also supply a list of ~mne.Projection objects or\na path to projectors stored on disk. In this case, the channel information\nis read from the ~mne.Info, but projectors potentially included will be\nignored; instead, only the explicitly passed projectors will be plotted.", "ecg_proj_path = sample_dir / 'sample_audvis_ecg-proj.fif'\neog_proj_path = sample_dir / 'sample_audvis_eog-proj.fif'\n\nreport = mne.Report(title='Projectors example')\nreport.add_projs(info=raw_path, title='Projs from info')\nreport.add_projs(info=raw_path, projs=ecg_proj_path,\n title='ECG projs from path')\nreport.add_projs(info=raw_path, projs=eog_proj_path,\n title='EOG projs from path')\nreport.save('report_projs.html', overwrite=True)", "Adding ~mne.preprocessing.ICA\n~mne.preprocessing.ICA objects can be added via\n:meth:mne.Report.add_ica. Aside from the parameters ica (that accepts\nan ~mne.preprocessing.ICA instance or a path to an ICA object stored on\ndisk) and the title, there is a third required parameter, inst.\ninst is used to specify a ~mne.io.Raw or ~mne.Epochs object for\nproducing ICA property plots and overlay plots demonstrating\nthe effects of ICA cleaning. If, instead, you only want to generate ICA\ncomponent topography plots, explicitly pass inst=None.\n<div class=\"alert alert-info\"><h4>Note</h4><p>:meth:`mne.Report.add_ica` only works with fitted ICAs.</p></div>\n\nYou can optionally specify for which components to produce topography and\nproperties plots by passing picks. By default, all components will be\nshown. It is also possible to pass evoked signals based on ECG and EOG events\nvia ecg_evoked and eog_evoked. This allows you directly see the\neffects of ICA component removal on these artifactual signals.\nArtifact detection scores produced by\n:meth:~mne.preprocessing.ICA.find_bads_ecg\nand :meth:~mne.preprocessing.ICA.find_bads_eog can be passed via the\necg_scores and eog_scores parameters, respectively, producing\nvisualizations of the scores for each ICA component.\nLastly, by passing n_jobs, you may largely speed up the generation of\nthe properties plots by enabling parallel execution.\n<div class=\"alert alert-danger\"><h4>Warning</h4><p>In the following example, we request a small number of ICA components\n to estimate, set the threshold for assuming ICA convergence to a very\n liberal value, and only visualize 2 of the components. All of this is\n done to largely reduce the processing time of this tutorial, and is\n usually **not** recommended for an actual data analysis.</p></div>", "ica = mne.preprocessing.ICA(\n n_components=5, # fit 5 ICA components\n fit_params=dict(tol=0.01) # assume very early on that ICA has converged\n)\n\nica.fit(inst=raw)\n\n# create epochs based on EOG events, find EOG artifacts in the data via pattern\n# matching, and exclude the EOG-related ICA components\neog_epochs = mne.preprocessing.create_eog_epochs(raw=raw)\neog_components, eog_scores = ica.find_bads_eog(\n inst=eog_epochs,\n ch_name='EEG 001', # a channel close to the eye\n threshold=1 # lower than the default threshold\n)\nica.exclude = eog_components\n\nreport = mne.Report(title='ICA example')\nreport.add_ica(\n ica=ica,\n title='ICA cleaning',\n picks=[0, 1], # only plot the first two components\n inst=raw,\n eog_evoked=eog_epochs.average(),\n eog_scores=eog_scores,\n n_jobs=1 # could be increased!\n)\nreport.save('report_ica.html', overwrite=True)", "Adding MRI with BEM\nMRI slices with superimposed traces of the boundary element model (BEM)\nsurfaces can be added via :meth:mne.Report.add_bem. All you need to pass is\nthe FreeSurfer subject name and subjects directory, and a title. To reduce\nthe resulting file size, you may pass the decim parameter to only include\nevery n-th volume slice, and width to specify the width of the resulting\nfigures in pixels.", "report = mne.Report(title='BEM example')\nreport.add_bem(\n subject='sample', subjects_dir=subjects_dir, title='MRI & BEM',\n decim=20,\n width=256\n)\nreport.save('report_mri_and_bem.html', overwrite=True)", "Adding coregistration\nThe sensor alignment (head -&gt; mri transformation obtained by\n\"coregistration\") can be visualized via :meth:mne.Report.add_trans. The\nmethod expects the transformation either as a ~mne.transforms.Transform\nobject or as a path to a trans.fif file, the FreeSurfer subject name and\nsubjects directory, and a title. The alpha parameter can be used to\ncontrol the transparency of the head, where a value of 1 means fully opaque.", "trans_path = sample_dir / 'sample_audvis_raw-trans.fif'\n\nreport = mne.Report(title='Coregistration example')\nreport.add_trans(\n trans=trans_path, info=raw_path, subject='sample',\n subjects_dir=subjects_dir, alpha=1.0, title='Coregistration'\n)\nreport.save('report_coregistration.html', overwrite=True)", "Adding a ~mne.Forward solution\nForward solutions (\"leadfields\") can be added by passing a ~mne.Forward\nobject or the path to a forward solution stored on disk to\nmeth:mne.Report.add_forward.", "fwd_path = sample_dir / 'sample_audvis-meg-oct-6-fwd.fif'\n\nreport = mne.Report(title='Forward solution example')\nreport.add_forward(forward=fwd_path, title='Forward solution')\nreport.save('report_forward_sol.html', overwrite=True)", "Adding an ~mne.minimum_norm.InverseOperator\nAn inverse operator can be added via :meth:mne.Report.add_inverse_operator.\nThe method expects an ~mne.minimum_norm.InverseOperator object or a path to\none stored on disk, and a title.", "inverse_op_path = sample_dir / 'sample_audvis-meg-oct-6-meg-inv.fif'\n\nreport = mne.Report(title='Inverse operator example')\nreport.add_inverse_operator(\n inverse_operator=inverse_op_path, title='Inverse operator'\n)\nreport.save('report_inverse_op.html', overwrite=True)", "Adding a ~mne.SourceEstimate\nAn inverse solution (also called source estimate or source time course, STC)\ncan be added via :meth:mne.Report.add_stc. The\nmethod expects an ~mne.SourceEstimate, the corresponding FreeSurfer subject\nname and subjects directory, and a title. By default, it will produce\nsnapshots at 51 equally-spaced time points (or fewer, if the data contains\nfewer time points). We can adjust this via the n_time_points parameter.", "stc_path = sample_dir / 'sample_audvis-meg'\n\nreport = mne.Report(title='Source estimate example')\nreport.add_stc(\n stc=stc_path, subject='sample', subjects_dir=subjects_dir,\n title='Source estimate', n_time_points=2 # few for speed\n)\nreport.save('report_inverse_sol.html', overwrite=True)", "Adding source code (e.g., a Python script)\nIt is possible to add code or scripts (e.g., the scripts you used for\nanalysis) to the report via :meth:mne.Report.add_code. The code blocks will\nbe automatically syntax-highlighted. You may pass a string with the\nrespective code snippet, or the path to a file. If you pass a path, it\nmust be a pathlib.Path object (and not a string), otherwise it will be\ntreated as a code literal.\nOptionally, you can specify which programming language to assume for syntax\nhighlighting by passing the language parameter. By default, we'll assume\nthe provided code is Python.", "mne_init_py_path = Path(mne.__file__) # __init__.py in the MNE-Python root\nmne_init_py_content = mne_init_py_path.read_text(encoding='utf-8')\n\nreport = mne.Report(title='Code example')\nreport.add_code(\n code=mne_init_py_path,\n title=\"Code from Path\"\n)\nreport.add_code(\n code=mne_init_py_content,\n title=\"Code from string\"\n)\n\nreport.save('report_code.html', overwrite=True)", "Adding custom figures\nCustom Matplotlib figures can be added via :meth:~mne.Report.add_figure.\nRequired parameters are the figure and a title. Optionally, may add a caption\nto appear below the figure. You can also specify the image format of the\nimage file that will be generated from the figure, so it can be embedded in\nthe HTML report.", "x = np.linspace(start=0, stop=10, num=100)\ny = x**2\n\nfig, ax = plt.subplots()\nax.plot(x, y, ls='--', lw=2, color='blue', label='my function')\nax.set_xlabel('x')\nax.set_ylabel('f(x)')\nax.legend()\n\nreport = mne.Report(title='Figure example')\nreport.add_figure(\n fig=fig, title='A custom figure',\n caption='A blue dashed line reaches up into the sky …',\n image_format='PNG'\n)\nreport.save('report_custom_figure.html', overwrite=True)\nplt.close(fig)", "The :meth:mne.Report.add_figure method can add multiple figures at once. In\nthis case, a slider will appear, allowing users to intuitively browse the\nfigures. To make this work, you need to provide a collection of figures,\na title, and optionally a collection of captions.\nIn the following example, we will read the MNE logo as a Matplotlib figure\nand rotate it with different angles. Each rotated figure and its respective\ncaption will be added to a list, which is then used to create the slider.", "mne_logo_path = Path(mne.__file__).parent / 'icons' / 'mne_icon-cropped.png'\nfig_array = plt.imread(mne_logo_path)\nrotation_angles = np.linspace(start=0, stop=360, num=17)\n\nfigs = []\ncaptions = []\nfor angle in rotation_angles:\n # Rotate and remove some rounding errors to avoid Matplotlib warnings\n fig_array_rotated = scipy.ndimage.rotate(input=fig_array, angle=angle)\n fig_array_rotated = fig_array_rotated.clip(min=0, max=1)\n\n # Create the figure\n fig, ax = plt.subplots()\n ax.imshow(fig_array_rotated)\n ax.set_axis_off()\n\n # Store figure and caption\n figs.append(fig)\n captions.append(f'Rotation angle: {round(angle, 1)}°')\n\n# can also be a MNEQtBrowser instance\nfigs.append(raw.plot())\ncaptions.append('... plus a raw data plot')\n\nreport = mne.Report(title='Multiple figures example')\nreport.add_figure(fig=figs, title='Fun with figures! 🥳', caption=captions)\nreport.save('report_custom_figures.html', overwrite=True)\nfor fig in figs[:-1]:\n plt.close(fig)\nfigs[-1].close()\ndel figs", "Adding image files\nExisting images (e.g., photos, screenshots, sketches etc.) can be added\nto the report via :meth:mne.Report.add_image. Supported image formats\ninclude JPEG, PNG, GIF, and SVG (and possibly others). Like with Matplotlib\nfigures, you can specify a caption to appear below the image.", "report = mne.Report(title='Image example')\nreport.add_image(\n image=mne_logo_path, title='MNE',\n caption='Powered by 🧠 🧠 🧠 around the world!'\n)\nreport.save('report_custom_image.html', overwrite=True)", "Working with tags\nEach add_* method accepts a keyword parameter tags, which can be\nused to pass one or more tags to associate with the respective content\nelements. By default, each add_* method adds a tag describing the data\ntype, e.g., evoked or source-estimate. When viewing the HTML report,\nthe Filter by tags dropdown menu can be used to interactively show or\nhide content with specific tags. This allows you e.g. to only view\nevoked or participant-001 data, should you have added those tags.\nVisible tags will appear with blue, and hidden tags with gray background\ncolor.\nTo toggle the visibility of all tags, use the respective checkbox in the\nFilter by tags dropdown menu, or press :kbd:T.", "report = mne.Report(title='Tags example')\nreport.add_image(\n image=mne_logo_path,\n title='MNE Logo',\n tags=('image', 'mne', 'logo', 'open-source')\n)\nreport.save('report_tags.html', overwrite=True)", "Editing a saved report\nSaving to HTML is a write-only operation, meaning that we cannot read an\n.html file back as a :class:~mne.Report object. In order to be able\nto edit a report once it's no longer in-memory in an active Python session,\nsave it as an HDF5 file instead of HTML:", "report = mne.Report(title='Saved report example', verbose=True)\nreport.add_image(image=mne_logo_path, title='MNE 1')\nreport.save('report_partial.hdf5', overwrite=True)", "The saved report can be read back and modified or amended. This allows the\npossibility to e.g. run multiple scripts in a processing pipeline, where each\nscript adds new content to an existing report.", "report_from_disk = mne.open_report('report_partial.hdf5')\nreport_from_disk.add_image(image=mne_logo_path, title='MNE 2')\nreport_from_disk.save('report_partial.hdf5', overwrite=True)", "To make this even easier, :class:mne.Report can be used as a\ncontext manager (note the with statement)`):", "with mne.open_report('report_partial.hdf5') as report:\n report.add_image(image=mne_logo_path, title='MNE 3')\n report.save('report_final.html', overwrite=True)", "With the context manager, the updated report is also automatically saved\nback to :file:report.h5 upon leaving the block.\nAdding an entire folder of files\nWe also provide a way to add an entire folder of files to the report at\nonce, without having to invoke the individual add_* methods outlined\nabove for each file. This approach, while convenient, provides less\nflexibility with respect to content ordering, tags, titles, etc.\nFor our first example, we'll generate a barebones report for all the\n:file:.fif files containing raw data in the sample dataset, by passing the\npattern *raw.fif to :meth:~mne.Report.parse_folder. We'll omit the\nsubject and subjects_dir parameters from the :class:~mne.Report\nconstructor, but we'll also pass render_bem=False to the\n:meth:~mne.Report.parse_folder method — otherwise we would get a warning\nabout not being able to render MRI and trans files without knowing the\nsubject. To save some processing time in this tutorial, we're also going to\ndisable rendering of the butterfly plots for the ~mne.io.Raw data by\npassing raw_butterfly=False.\nWhich files are included depends on both the pattern parameter passed to\n:meth:~mne.Report.parse_folder and also the subject and\nsubjects_dir parameters provided to the :class:~mne.Report constructor.", "report = mne.Report(title='parse_folder example')\nreport.parse_folder(\n data_path=data_path, pattern='*raw.fif', render_bem=False,\n raw_butterfly=False\n)\nreport.save('report_parse_folder_basic.html', overwrite=True)", "By default, the power spectral density and SSP projectors of the\n:class:~mne.io.Raw files are not shown to speed up report generation. You\ncan add them by passing raw_psd=True and projs=True to the\n:class:~mne.Report constructor. Like in the previous example, we're going\nto omit the butterfly plots by passing raw_butterfly=False. Lastly, let's\nalso refine our pattern to select only the filtered raw recording (omitting\nthe unfiltered data and the empty-room noise recordings).", "pattern = 'sample_audvis_filt-0-40_raw.fif'\nreport = mne.Report(title='parse_folder example 2', raw_psd=True, projs=True)\nreport.parse_folder(\n data_path=data_path, pattern=pattern, render_bem=False, raw_butterfly=False\n)\nreport.save('report_parse_folder_raw_psd_projs.html', overwrite=True)", "This time we'll pass a specific subject and subjects_dir (even though\nthere's only one subject in the sample dataset) and remove our\nrender_bem=False parameter so we can see the MRI slices, with BEM\ncontours overlaid on top if available. Since this is computationally\nexpensive, we'll also pass the mri_decim parameter for the benefit of our\ndocumentation servers, and skip processing the :file:.fif files.", "report = mne.Report(\n title='parse_folder example 3', subject='sample', subjects_dir=subjects_dir\n)\nreport.parse_folder(data_path=data_path, pattern='', mri_decim=25)\nreport.save('report_parse_folder_mri_bem.html', overwrite=True)", "Now let's look at how :class:~mne.Report handles :class:~mne.Evoked\ndata (we will skip the MRIs to save computation time).\nThe MNE sample dataset we're using in this example has not been\nbaseline-corrected; so let's apply baseline correction this now for the\nreport!\nTo request baseline correction, pass a baseline argument to\n~mne.Report, which should be a tuple with the starting and ending time of\nthe baseline period. For more details, see the documentation on\n~mne.Evoked.apply_baseline. Here, we will apply baseline correction for a\nbaseline period from the beginning of the time interval to time point zero.\nLastly, we want to render the \"whitened\" evoked data, too. Whitening\nrequires us to specify the path to a covariance matrix file via the\ncov_fname parameter of ~mne.Report.\nNow, let's put all of this together! Here we use a temporary directory\nfor speed so we can render a single Evoked instance, using just EEG\nchannels.", "baseline = (None, 0)\ncov_fname = sample_dir / 'sample_audvis-cov.fif'\npattern = 'sample_audvis-no-filter-ave.fif'\nevoked = mne.read_evokeds(sample_dir / pattern)[0]\nreport = mne.Report(\n title='parse_folder example 4', baseline=baseline, cov_fname=cov_fname\n)\nwith tempfile.TemporaryDirectory() as path:\n evoked.save(Path(path) / pattern)\n report.parse_folder(\n path, pattern=pattern, render_bem=False, n_time_points_evokeds=5\n )\nreport.save('report_parse_folder_evoked.html', overwrite=True)", "If you want to actually view the noise covariance in the report, make sure\nit is captured by the pattern passed to :meth:~mne.Report.parse_folder, and\nalso include a source for an :class:~mne.Info object (any of the\n:class:~mne.io.Raw, :class:~mne.Epochs or :class:~mne.Evoked\n:file:.fif files that contain subject data also contain the measurement\ninformation and should work):", "pattern = 'sample_audvis-cov.fif'\ninfo_fname = sample_dir / 'sample_audvis-ave.fif'\nreport = mne.Report(title='parse_folder example 5', info_fname=info_fname)\nreport.parse_folder(\n data_path, pattern=pattern, render_bem=False, n_time_points_evokeds=5\n)\nreport.save('report_parse_folder_cov.html', overwrite=True)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
bkimo/discrete-math-with-python
tutorial_1_jupyter_notebook.ipynb
mit
[ "Introduction to Jupyter notebooks\nThe first thing we'll do, discussed later, is import all the modules we'll need. You should in general do this at the very beginning of each notebook, and in fact each .py file you write.", "# Import numerical packages\nimport numpy as np\nimport scipy.integrate\n\n# Import pyplot for plotting\nimport matplotlib.pyplot as plt\n\n# Seaborn, useful for graphics\nimport seaborn as sns\n\n# Magic function to make matplotlib inline; other style specs must come AFTER\n%matplotlib inline\n\n# This enables SVG graphics inline. There is a bug, so uncomment if it works.\n#%config InlineBackend.figure_formats = {'svg',}\n\n# This enables high resolution PNGs. SVG is preferred, but has problems\n# rendering vertical and horizontal lines\n%config InlineBackend.figure_formats = {'png', 'retina'}\n\n# JB's favorite Seaborn settings for notebooks\nrc = {'lines.linewidth': 2, \n 'axes.labelsize': 18, \n 'axes.titlesize': 18, \n 'axes.facecolor': 'DFDFE5'}\nsns.set_context('notebook', rc=rc)\nsns.set_style('darkgrid', rc=rc)", "In this tutorial, you will learn the basics on how to use Jupyter notebooks. Most of your homework will be submitted as Jupyter notebooks, so this is something you will need to master. It will be useful for you to go over Tutorial 2 to learn how to use $\\LaTeX$ to write mathematical notations and statements in your Jupyter notebooks. You should, of course, read the official Jupyter documentation as well.\nContents\n\nWhat is Jupyter\nLaunching a Jupyter notebook\nCells\nCode cells\nDisplay of graphics\nProper formatting of cells\nBest practices for code cells\n\n\nMarkdown cells\nStyling your notebook\nCollaborating with Google Drive\n\nWhat is Jupyter?\nJupyter is a way to combine text (with math!) and code (which runs and can display graphic output!) in an easy-to-read document that renders in a web browser. The notebook itself is stored as a text file in JSON format. This text file is what you will email the course instructor when submitting your homework.\nIt is language agnostic as its name suggests. The name \"Jupyter\" is a combination of Julia (a new language for scientific computing), Python (which you know and love, or at least will when the course is over), and R (the dominant tool for statistical computation). However, you currently can run over 40 different languages in a Jupyter notebook, not just Julia, Python, and R.\nLaunching a Jupyter notebook\nA Jupyter was spawned from the IPython project. To launch a Jupyter notebook, you can do the following.\n* Mac: Use the Anaconda launcher and select Jupyter notebook.\n* Windows: Under \"Search programs and files\" from the Start menu, type jupyter notebook and select \"Jupyter notebook.\"\nA Jupyter notebook will then launch in your default web browser.\nYou can also launch Jupyter from the command line. To do this, simply enter\njupyter notebook\n\non the command line and hit enter. This also allows for greater flexibility, as you can launch Jupyter with command line flags. For example, I launch Jupyter using\njupyter notebook --browser=safari\n\nThis fires up Jupyter with Safari as the browser. If you launch Jupyter from the command line, your shell will be occupied with Jupyter and will occasionally print information to the screen. After you are finished with your Jupyter session (and have saved everything), you can kill Jupyter by hitting \"ctrl + C\" in the terminal/PowerShell window.\nWhen you launch Jupyter, you will be presented with a menu of files in your current working directory to choose to edit. You can also navigate around the files on your computer to find a file you wish to edit by clicking the \"Upload\" button in the upper right corner. You can also click \"New\" in the upper right corner to get a new Jupyter notebook. After selecting the file you wish to edit, it will appear in a new window in your browser, beautifully formatted and ready to edit.\nCells\nA Jupyter notebook consists of cells. The two main types of cells you will use are code cells and markdown cells, and we will go into their properties in depth momentarily. First, an overview.\nA code cell contains actual code that you want to run. You can specify a cell as a code cell using the pulldown menu in the toolbar in your Jupyter notebook. Otherwise, you can can hit esc and then y (denoted \"esc, y\") while a cell is selected to specify that it is a code cell. Note that you will have to hit enter after doing this to start editing it.\nIf you want to execute the code in a code cell, hit \"shift + enter.\" Note that code cells are executed in the order you execute them. That is to say, the ordering of the cells for which you hit \"shift + enter\" is the order in which the code is executed. If you did not explicitly execute a cell early in the document, its results are now known to the Python interpreter.\nMarkdown cells contain text. The text is written in markdown, a lightweight markup language. You can read about its syntax here. Note that you can also insert HTML into markdown cells, and this will be rendered properly. As you are typing the contents of these cells, the results appear as text. Hitting \"shift + enter\" renders the text in the formatting you specify.\nYou can specify a cell as being a markdown cell in the Jupyter toolbar, or by hitting \"esc, m\" in the cell. Again, you have to hit enter after using the quick keys to bring the cell into edit mode.\nIn general, when you want to add a new cell, you can use the \"Insert\" pulldown menu from the Jupyter toolbar. The shortcut to insert a cell below is \"esc, b\" and to insert a cell above is \"esc, a.\" Alternatively, you can execute a cell and automatically add a new one below it by hitting \"alt + enter.\" There is another shot cut, \"ctrl+enter\", which execute a cell but not add a new line below. \nCode cells\nBelow is an example of a code cell printing hello, world. Notice that the output of the print statement appears in the same cell, though separate from the code block.", "# Say hello to the world.\nprint('hello, world.')", "If you evaluate a Python expression that returns a value, that value is displayed as output of the code cell. This only happens, however, for the last line of the code cell.", "# Would show 9 if this were the last line, but it is not, so shows nothing\n4 + 5\n\n# I hope we see 11.\n5 + 6", "Note, however, if the last line does not return a value, such as if we assigned a variable, there is no visible output from the code cell.", "# Variable assignment, so no visible output.\na = 5 + 6\n\n# However, now if we ask for a, its value will be displayed\na", "Output is asynchronous\nAll output is displayed asynchronously as it is generated in the Kernel. If you execute the next cell, you will see the output one piece at a time, not all at the end.", "import time, sys\nfor i in range(8):\n print(i)\n time.sleep(0.5)", "Large outputs\nTo better handle large outputs, the output area can be collapsed. Run the following cell and then single- or double- click on the active area to the left of the output:", "for i in range(50):\n print(i)", "Beyond a certain point, output will scroll automatically:", "for i in range(500):\n print(2**i - 1)", "Display of graphics\nWhen displaying graphics, you should have them inline, meaning that they are displayed directly in the Jupyter notebook and not in a separate window. You can specify that, as I did at the top of this document, using the %matplotlib inline magic function. Below is an example of graphics displayed inline.\nGenerally, I prefer presenting graphics as scalable vector graphics (SVG). Vector graphics are infinitely zoom-able; i.e., the graphics are represented as points, lines, curves, etc., in space, not as a set of pixel values as is the case with raster graphics (such as PNG). By default, graphics are displayed as PNGs, but you can specify SVG as I have at the top of this document in the first code cell. \n%config InlineBackend.figure_formats = {'svg',}\n\nUnfortunately, there seems to be a bug, at least when I render in Safari, where vertical and horizontal lines are not properly rendered when using SVG. For some reason, when I select next cell and convert it to a code cell and back to markdown, the lines are then (sometimes) properly rendered. This is annoying, but I tend to think it is worth it to have nice SVG graphics. On the other hand, PNG graphics will usually suffice if you want to use them in your homework. To specify the ONG graphics to be high resolution, include\n%config InlineBackend.figure_formats = {'png', 'retina'}\n\nat the top of your file, as we have here.", "# Generate data to plot\nx = np.linspace(0, 2 * np.pi, 200)\ny = np.exp(np.sin(np.sin(x)))\n\n# Make plot\nplt.plot(x, y)\nplt.xlim((0, 2 * np.pi))\nplt.xlabel(r'$x$')\nplt.ylabel(r'$\\mathrm{e}^{\\sin{x}}$')", "The plot is included inline with the styling we specified using Seaborn at the beginning of the document. \nProper formatting of cells\nGenerally, it is a good idea to keep cells simple. You can define one function, or maybe two or three closely related functions, in a single cell, and that's about it. When you define a function, you should make sure it is properly commented with descriptive doc strings. Below is an example of how I might generate a plot of the Lorenz attractor (which I choose just because it is fun) with code cells and markdown cells with discussion of what I am doing.\nWe will use scipy.integrate.odeint to numerically integrate the Lorenz attractor. We therefore first define a function that returns the right hand side of the system of ODEs that define the Lorentz attractor.", "def lorenz_attractor(r, t, p):\n \"\"\"\n Compute the right hand side of system of ODEs for Lorenz attractor.\n \n Parameters\n ----------\n r : array_like, shape (3,)\n (x, y, z) position of trajectory.\n t : dummy_argument\n Dummy argument, necessary to pass function into \n scipy.integrate.odeint\n p : array_like, shape (3,)\n Parameters (s, k, b) for the attractor.\n \n Returns\n -------\n output : ndarray, shape (3,)\n Time derivatives of Lorenz attractor.\n \n Notes\n -----\n .. Returns the right hand side of the system of ODEs describing\n the Lorenz attractor.\n x' = s * (y - x)\n y' = x * (k - z) - y\n z' = x * y - b * z\n \"\"\"\n # Unpack variables and parameters\n x, y, z = r\n s, p, b = p\n \n return np.array([s * (y - x), \n x * (p - z) - y, \n x * y - b * z])", "With this function in hand, we just have to pick our initial conditions and time points, run the numerical integration, and then plot the result.", "# Parameters to use\np = np.array([10.0, 28.0, 8.0 / 3.0])\n\n# Initial condition\nr0 = np.array([0.1, 0.0, 0.0])\n\n# Time points to sample\nt = np.linspace(0.0, 80.0, 10000)\n\n# Use scipy.integrate.odeint to integrate Lorentz attractor\nr = scipy.integrate.odeint(lorenz_attractor, r0, t, args=(p,))\n\n# Unpack results into x, y, z.\nx, y, z = r.transpose()\n\n# Plot the result\nplt.plot(x, z, '-', linewidth=0.5)\nplt.xlabel(r'$x(t)$', fontsize=18)\nplt.ylabel(r'$z(t)$', fontsize=18)\nplt.title(r'$x$-$z$ proj. of Lorenz attractor traj.')", "Best practices for code cells\nHere is a summary of some general rules for composing and formatting your code cells.\n1. Do not exceed the width of the code cell. If the cells are rendered with my CSS (as discussed below), that width is 80 characters.\n2. Keep your code cells short. If you find yourself having one massive code cell, break it up.\n3. Always properly comment your code. Provide complete doc strings for any functions you define.\n4. Do all of your imports in the first code cell at the top of the notebook. With the exception of \"from ... import ...\" imports, import one module per line.\n5. For submitting assignments, always display your graphics inline. You can render the graphics as PNGs if your browser starts experiencing performance issues, but SVG is preferred.\nMarkdown cells\nMarkdown cells contain text. The text is written in markdown, a lightweight markup language. The list of syntactical constructions at this link are pretty much all you need to know for standard markdown. Note that you can also insert HTML into markdown cells, and this will be rendered properly. As you are typing the contents of these cells, the results appear as text. Hitting \"shift + enter\" renders the text in the formatting you specify.\nYou can specify a cell as being a markdown cell in the Jupyter tool bar, or by hitting \"esc, m\" in the cell. Again, you have to hit enter after using the quick keys to bring the cell into edit mode.\nIn addition to HTML, some $\\LaTeX$ expressions may be inserted into markdown cells. $\\LaTeX$ (pronounced \"lay-tech\") is a document markup language that uses the $\\TeX$ typesetting software. It is particularly well-suited for beautiful typesetting of mathematical expressions. In Jupyter notebooks, the $\\LaTeX$ mathematical input is rendered using software called MathJax. This is run off of a remote server, so if you are not connected to the internet, your equations will not be rendered. You will use $\\LaTeX$ extensively in preparation of your assignments. There are plenty of resources on the internet for getting started with $\\LaTeX$, but you will only need a tiny subset of its functionality in your assignments, and Tutorial 0c, plus cheat sheets you may find by Google (such as this one) are useful.\nStyling your notebook\nThe default styles of Jupyter notebooks usually work just fine. However, you may want to develope and use your own style, such as making bigger font sizzes or using different numbering style the code cells than default. For example, you can download a sample CSS file and the following to use the new style:\n\nOn the command line, execute <br />\n jupyter --config-dir \nLet's call the directory containing the Jupyter configuration $JUPYTER_CONFIG_DIR (this is actually the name of an environment variable Jupyter will look for). Create a directory $JUPYTER_CONFIG_DIR/custom/.\nCopy the file you downloaded custom.css to the directory you just created.\nThat's it! \n\nJupyter notebooks will now be styled with the sample CSS file. If you are a bit more proficient with using the command line and you want to be able to also use the standard Jupyter notebook formatting, you can create a new config directory, such as $HOME/.jupyter_custom, copy the CSS file there, and then change the JUPYTER_CONFIG_DIR enviroment variable to that directory. You then select which styling you want to use by changing the JUPYTER_CONFIG_DIR enviroment variable." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ddfabbro/ipython_tutorial
my_notebooks/faces_data_analysis.ipynb
mit
[ "Visualizing PCA on Face Images\nThe initial purpose of this tutorial was to introduce my colleague, whos a college sophomore in Engineering, an intuitive feeling for Dimensionality Reduction techniques and how to visualize it.\nI could choose the well-known iris dataset to do this, but I decided to work with data belonging to a much higher feature space - images. 75000 dimensions to be exact.\nThe following content is a visual approach to understanding PCA on extremely high dimensional datasets and walks through the code step-by-step, showing how easily these techniques can be implemented nowadays using Python.\nDisclaimer: This will not show an in depth view of PCA theory. I created this solely with the purpose to visualize the effects of the Principal Components on face images a.k.a. eigenfaces, which I couldn't find anything explained in such way on the internet.\nImporting the libraries\nThe libraries used in this tutorial are:", "%matplotlib inline\nimport numpy as np #as always\nimport matplotlib.pyplot as plt #to visualize things\nimport urllib #to download our dataset\nfrom PIL import Image #to manipulate images\nfrom StringIO import StringIO # these libraries are used to unzip \nfrom zipfile import ZipFile # and store images in memory\nfrom sklearn.decomposition import PCA # PCA itself. There's quite a theory behind this, but we'll skip it in this tutorial\nfrom pylab import rcParams #just so I can resize my resulting images\nrcParams['figure.figsize'] = 15, 5", "Downloading the dataset\nFirst thing to do is download the dataset.\nThis is a publicly available dataset from FEI in Brazil. It consists of two images (smiling and not smiling) taken from 200 individuals, totalling 400 images. For more information, please visit: http://fei.edu.br/~cet/facedatabase.html", "url1 = 'http://fei.edu.br/~cet/frontalimages_spatiallynormalized_part1.zip'\nurl2 = 'http://fei.edu.br/~cet/frontalimages_spatiallynormalized_part2.zip'\nzip_files = [urllib.urlretrieve(url1)[0],urllib.urlretrieve(url2)[0]]\narchive = [ZipFile(zip_files[0],'r'),ZipFile(zip_files[1],'r')]\n\nface_db = []\nfor name in archive[0].namelist()+archive[1].namelist():\n try:\n face = Image.open(StringIO(archive[0].read(name)))\n except:\n face = Image.open(StringIO(archive[1].read(name)))\n face_db.append(np.array(face))\n\nface_db = np.array(face_db)", "Transforming the data for analysis\nBefore we can use image data for any type of statistical analysis, we need to tranform it into a more \"statistical friendly\" data format. The problem here is that each image is represented by a matrix of pixels in which each pixel is a different variable representing its intensity from 0 (black) to 255 (white).\nA \"statistical friendly\" data format has its variables represented by columns and its samples represented by rows, just like spreadsheets we all familiar with.\nIf each row should represent a sample (image) of my data, then the image matrix has to be \"flattened\" into a vector. This is easily performed by an awesome numpy function: ravel().\nAfter doing so, we repeat this process for each image and store it into our \"statistical friendly\" dataset, which now has its variables represented by columns and its samples represented by rows.\nThe code for this task is as follows:", "face_db = np.array([np.ravel(face_db[i]) for i in range(face_db.shape[0])])", "Where: h and w are the height and width of the image, respectively.\nNote: Although the original database contains 400 images, for this tutorial, we're only using 200 of them, since we are only analysing the non-smiling face.\nTo check if our face_db is in a \"statistical friendly\" dataset, we use:", "print face_db.shape", "Indeed, this tells us that our dataset has 200 samples and 75000 variables!\nWe can also check the values of our dataset. Be careful to not print all of it, since the dataset contains 200 $\\times$ 75000 values, which equals to 15,000,000 values! In other words, it could possible crash your favorite spreadsheet.", "print face_db", "Although data_db represents a \"statistical friendly\" dataset, it isn't a \"human friendly\" dataset, since our eyes prefer to see images as a matrix of pixels.\nFortunately we can go back and forth to see the results.\nHere's a sample of this dataset:", "np.random.seed(4)\nf, axarr = plt.subplots(1,5)\nfor i in range(5):\n axarr[i].imshow(face_db[np.random.randint(face_db.shape[0])].reshape(300,250),cmap='gray')\n axarr[i].axis('off')", "First Analysis of data_db\nThe first question to my mind is: do we really need 75000 variables to represent a unique face?\nLet's plot the first 2 variables of face_db and see how they behave.", "plt.scatter(face_db[:,0],face_db[:,1])\nplt.xlim([0, 255])\nplt.ylim([0, 255])\nplt.xlabel('1st pixel')\nplt.ylabel('2nd pixel')\nplt.grid()", "The figure above tells us that, as the first variable (1st pixel) raises, the second variable (2nd pixel) raises as well. This is a possible indication that both variables are correlated.\nIn other words, this figure is telling us that we don't need 2 variables to represent this variation, which makes them redundant.\nGoing back to the image itself, it makes a lot of sense, since neighboring pixels in pictures are very likely to vary together.\nSo how many variable do we really need to represent a face?\nThis is where Principal Component Analysis comes in!\nPrincipal Component Analysis (PCA)\nNow we go through how should PCA be implemented in Python using scikit-learn.\nStep 1:\nNote: Python simplifies it so much that this step is not even needed. But it's good to keep the good practices though.\nBack to the point, the first step is to center your data. This means that the average of our data should be moved to the origin. Sounds confusing, but this just means that your dataset should be deducted by the average.", "mean_face = np.mean(face_db,0)\ncentered_face_db = face_db - mean_face", "But what does the average of my dataset looks like?\nAnswer: The average face", "plt.imshow(mean_face.reshape(300,250),cmap='gray')\nplt.axis('off')", "Step 2:\nNow we calculate the Principal Components of our centered face dataset:", "pca = PCA().fit(centered_face_db)", "And thats it! We now can explore our data in a whole new (almost magical) feature space.\nNote: The number of components should not exceed the number of samples. Just like we can't solve a linear system of 3 variables given only 2 equations. Thefore, we used the maximum number of components, 400.\nExploring the Components\nSo what PCA actually did to my data?\nIt made a linear transformation of my original variables (pixels) in a way that the new transformed variables are not correlated to each other.\nRemember that each variable represented a pixel intensity ranging from 0 to 255 and that a lot of them were correlated? Now we have a new set of variables in which none are correlated.\nAdditionaly, these new variables are orthogonal to each other, which means they will vary independently.\nBut if I don't represent my images by pixel intensity, what does these new variables represent then?\nWell, these new variables are described by the eigenvectors of my correlation matrix, which points towards maximum variance of my data. When dealing with faces, we call them eigenfaces, as shown:", "eigenvectors = pca.components_\nf, axarr = plt.subplots(1,5)\nfor i in range(5):\n axarr[i].imshow(eigenvectors[i].reshape(300,250),cmap='gray')\n axarr[i].set_title(r'$x_' + str(i+1)+'$')\n axarr[i].axis('off')", "It looks strange at first, but what these eigenfaces are telling us is that we don't need individual pixels to represent each face of my dataset. This comes really handy when we want to reconize someone using only the relevant features to avoid overfitting.\nLet's navigante between some of the most important components of my new data. Since the new base is orthogonal, we can navigate each component independently.\nNote: Since PCA has the data centered we should start from the average face, which is located at the origin of the feature space. In other words, we navigate across components using:\n<img src=\"https://latex.codecogs.com/svg.latex?\\overline{face}&plus;&space;w_n&space;\\cdot&space;v_n\" title=\"\\overline{face}+ w_n \\cdot v_n\" />\nwhere n represents my n th component, wn the magnitude of this component and vn the eigenvector\nFirst Component", "f, axarr = plt.subplots(1,7)\nfor i,w in enumerate([1000*k for k in range(-6,7,2)]):\n axarr[i].imshow((mean_face+w*eigenvectors[0]).reshape(300,250),cmap='gray')\n axarr[i].axis('off')", "Second Component", "f, axarr = plt.subplots(1,7)\nfor i,w in enumerate([1000*k for k in range(-6,7,2)]):\n axarr[i].imshow((mean_face+w*eigenvectors[1]).reshape(300,250),cmap='gray')\n axarr[i].axis('off')", "Third Component", "f, axarr = plt.subplots(1,7)\nfor i,w in enumerate([1000*k for k in range(-6,7,2)]):\n axarr[i].imshow((mean_face+w*eigenvectors[2]).reshape(300,250),cmap='gray')\n axarr[i].axis('off')", "Combining Components\nSo far we looked each component independently, but we can compose them as well.\nIn fact, if we use all components using exactly the same values w for each face, we reconstruct the original face entirely.\nFirstly, we find these values w. We can easily do so by transforming the 75000 pixel intesities into the new 400 variables.", "pca_face_db = pca.transform(centered_face_db)", "Next, we reconstruct a face using a linear combination of the eigenvectors\n<img src=\"https://latex.codecogs.com/svg.latex?\\overline{face}&plus;\\sum&space;w_n&space;\\cdot&space;v_n\" title=\"\\overline{face}+\\sum w_n \\cdot v_n\" />\nReconstructing Subject 1 Face Using 400 Components", "orthonormal_face = mean_face + np.sum(np.asarray([eigenvectors[i]*pca_face_db[2,i] for i in range(400)]),0)\nplt.imshow(orthonormal_face.reshape(300,250),cmap='gray')\nplt.axis('off')", "Obviously, we don't need all 400 Components to represent a face with statistical significance. Adopting 110 components we can explain 95% of variance in the face, which still makes Subject 1 recognizeable.", "print np.sum(pca.explained_variance_ratio_[0:110])*100", "Reconstructing Subject 1 Using 82 Components", "orthonormal_face = mean_face + np.sum(np.asarray([eigenvectors[i]*pca_face_db[2,i] for i in range(110)]),0)\nplt.imshow(orthonormal_face.reshape(300,250),cmap='gray')\nplt.axis('off')", "Still looks very similar, but that's just for us. Machine Learning Algorithms can reconize someone using even less components!\nGenerating a Random Face\nThis is the cool part. We can generate an entirely new face by using arbitrary values of w:", "np.random.seed(4)\northonormal_face = mean_face + np.sum(np.asarray([(eigenvectors[i]*pca_face_db[2,i]*np.random.normal()) for i in range(110)]),0)\nplt.imshow(orthonormal_face.reshape(300,250),cmap='gray')\nplt.axis('off')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
NathanYee/ThinkBayes2
code/chap09soln.ipynb
gpl-2.0
[ "Think Bayes: Chapter 9\nThis notebook presents code and exercises from Think Bayes, second edition.\nCopyright 2016 Allen B. Downey\nMIT License: https://opensource.org/licenses/MIT", "from __future__ import print_function, division\n\n% matplotlib inline\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport math\nimport numpy as np\n\nfrom thinkbayes2 import Pmf, Cdf, Suite, Joint\nimport thinkplot", "Improving Reading Ability\nFrom DASL(http://lib.stat.cmu.edu/DASL/Stories/ImprovingReadingAbility.html)\n\nAn educator conducted an experiment to test whether new directed reading activities in the classroom will help elementary school pupils improve some aspects of their reading ability. She arranged for a third grade class of 21 students to follow these activities for an 8-week period. A control classroom of 23 third graders followed the same curriculum without the activities. At the end of the 8 weeks, all students took a Degree of Reading Power (DRP) test, which measures the aspects of reading ability that the treatment is designed to improve.\nSummary statistics on the two groups of children show that the average score of the treatment class was almost ten points higher than the average of the control class. A two-sample t-test is appropriate for testing whether this difference is statistically significant. The t-statistic is 2.31, which is significant at the .05 level.\n\nI'll use Pandas to load the data into a DataFrame.", "import pandas as pd\n\ndf = pd.read_csv('drp_scores.csv', skiprows=21, delimiter='\\t')\ndf.head()", "And use groupby to compute the means for the two groups.", "grouped = df.groupby('Treatment')\nfor name, group in grouped:\n print(name, group.Response.mean())", "The Normal class provides a Likelihood function that computes the likelihood of a sample from a normal distribution.", "from scipy.stats import norm\nfrom thinkbayes2 import EvalNormalPdf\n\nclass Normal(Suite, Joint):\n \n def Likelihood(self, data, hypo):\n \"\"\"\n \n data: sequence of test scores\n hypo: mu, sigma\n \"\"\"\n mu, sigma = hypo\n likes = EvalNormalPdf(data, mu, sigma)\n return np.prod(likes)", "The prior distributions for mu and sigma are uniform.", "mus = np.linspace(20, 80, 101)\nsigmas = np.linspace(5, 30, 101)", "I use itertools.product to enumerate all pairs of mu and sigma.", "from itertools import product\n\ncontrol = Normal(product(mus, sigmas))\ndata = df[df.Treatment=='Control'].Response\ncontrol.Update(data)", "After the update, we can plot the probability of each mu-sigma pair as a contour plot.", "thinkplot.Contour(control, pcolor=True)\nthinkplot.Config(xlabel='mu', ylabel='sigma')", "And then we can extract the marginal distribution of mu", "pmf_mu0 = control.Marginal(0)\nthinkplot.Pdf(pmf_mu0)\nthinkplot.Config(xlabel='mu', ylabel='Pmf')", "And the marginal distribution of sigma", "pmf_sigma0 = control.Marginal(1)\nthinkplot.Pdf(pmf_sigma0)\nthinkplot.Config(xlabel='sigma', ylabel='Pmf')", "Exercise: Run this analysis again for the control group. What is the distribution of the difference between the groups? What is the probability that the average \"reading power\" for the treatment group is higher? What is the probability that the variance of the treatment group is higher?", "# Solution\n\ntreated = Normal(product(mus, sigmas))\ndata = df[df.Treatment=='Treated'].Response\ntreated.Update(data)\n\n# Solution\n\n# Here's the posterior joint distribution for the treated group\n\nthinkplot.Contour(treated, pcolor=True)\nthinkplot.Config(xlabel='mu', ylabel='Pmf')\n\n# Solution\n\n# The marginal distribution of mu\n\npmf_mu1 = treated.Marginal(0)\nthinkplot.Pdf(pmf_mu1)\nthinkplot.Config(xlabel='mu', ylabel='Pmf')\n\n# Solution\n\n# The marginal distribution of sigma\n\npmf_sigma1 = treated.Marginal(1)\nthinkplot.Pdf(pmf_sigma1)\nthinkplot.Config(xlabel='sigma', ylabel='Pmf')\n\n# Solution\n\n# Now we can compute the distribution of the difference between groups\n\npmf_diff = pmf_mu1 - pmf_mu0\npmf_diff.Mean(), pmf_diff.MAP()\n\n# Solution\n\n# And CDF_diff(0), which is the probability that the difference is <= 0\n\npmf_diff = pmf_mu1 - pmf_mu0\ncdf_diff = pmf_diff.MakeCdf()\nthinkplot.Cdf(cdf_diff)\ncdf_diff[0]\n\n# Solution\n\n# Or we could directly compute the probability that mu is\n# greater than mu2\n\npmf_mu1.ProbGreater(pmf_mu0)\n\n# Solution\n\n# Finally, here's the probability that the standard deviation\n# in the treatment group is higher.\n\npmf_sigma1.ProbGreater(pmf_sigma0)\n\n# It looks like there is a high probability that the mean of\n# the treatment group is higher, and the most likely size of\n# the effect is 9-10 points.\n\n# It looks like the variance of the treated group is substantially\n# smaller, which suggests that the treatment might be helping\n# low scorers more than high scorers.", "Paintball\nSuppose you are playing paintball in an indoor arena 30 feet\nwide and 50 feet long. You are standing near one of the 30 foot\nwalls, and you suspect that one of your opponents has taken cover\nnearby. Along the wall, you see several paint spatters, all the same\ncolor, that you think your opponent fired recently.\nThe spatters are at 15, 16, 18, and 21 feet, measured from the\nlower-left corner of the room. Based on these data, where do you\nthink your opponent is hiding?\nHere's the Suite that does the update. It uses MakeLocationPmf,\ndefined below.", "class Paintball(Suite, Joint):\n \"\"\"Represents hypotheses about the location of an opponent.\"\"\"\n\n def __init__(self, alphas, betas, locations):\n \"\"\"Makes a joint suite of parameters alpha and beta.\n\n Enumerates all pairs of alpha and beta.\n Stores locations for use in Likelihood.\n\n alphas: possible values for alpha\n betas: possible values for beta\n locations: possible locations along the wall\n \"\"\"\n self.locations = locations\n pairs = [(alpha, beta) \n for alpha in alphas \n for beta in betas]\n Suite.__init__(self, pairs)\n\n def Likelihood(self, data, hypo):\n \"\"\"Computes the likelihood of the data under the hypothesis.\n\n hypo: pair of alpha, beta\n data: location of a hit\n\n Returns: float likelihood\n \"\"\"\n alpha, beta = hypo\n x = data\n pmf = MakeLocationPmf(alpha, beta, self.locations)\n like = pmf.Prob(x)\n return like\n\ndef MakeLocationPmf(alpha, beta, locations):\n \"\"\"Computes the Pmf of the locations, given alpha and beta. \n\n Given that the shooter is at coordinates (alpha, beta),\n the probability of hitting any spot is inversely proportionate\n to the strafe speed.\n\n alpha: x position\n beta: y position\n locations: x locations where the pmf is evaluated\n\n Returns: Pmf object\n \"\"\"\n pmf = Pmf()\n for x in locations:\n prob = 1.0 / StrafingSpeed(alpha, beta, x)\n pmf.Set(x, prob)\n pmf.Normalize()\n return pmf\n\ndef StrafingSpeed(alpha, beta, x):\n \"\"\"Computes strafing speed, given location of shooter and impact.\n\n alpha: x location of shooter\n beta: y location of shooter\n x: location of impact\n\n Returns: derivative of x with respect to theta\n \"\"\"\n theta = math.atan2(x - alpha, beta)\n speed = beta / math.cos(theta)**2\n return speed", "The prior probabilities for alpha and beta are uniform.", "alphas = range(0, 31)\nbetas = range(1, 51)\nlocations = range(0, 31)\n\nsuite = Paintball(alphas, betas, locations)\nsuite.UpdateSet([15, 16, 18, 21])", "To visualize the joint posterior, I take slices for a few values of beta and plot the conditional distributions of alpha. If the shooter is close to the wall, we can be somewhat confident of his position. The farther away he is, the less certain we are.", "locations = range(0, 31)\nalpha = 10\nbetas = [10, 20, 40]\nthinkplot.PrePlot(num=len(betas))\n\nfor beta in betas:\n pmf = MakeLocationPmf(alpha, beta, locations)\n pmf.label = 'beta = %d' % beta\n thinkplot.Pdf(pmf)\n\nthinkplot.Config(xlabel='Distance',\n ylabel='Prob')", "Here are the marginal posterior distributions for alpha and beta.", "marginal_alpha = suite.Marginal(0, label='alpha')\nmarginal_beta = suite.Marginal(1, label='beta')\n\nprint('alpha CI', marginal_alpha.CredibleInterval(50))\nprint('beta CI', marginal_beta.CredibleInterval(50))\n\nthinkplot.PrePlot(num=2)\n \nthinkplot.Cdf(Cdf(marginal_alpha))\nthinkplot.Cdf(Cdf(marginal_beta))\n \nthinkplot.Config(xlabel='Distance',\n ylabel='Prob')", "To visualize the joint posterior, I take slices for a few values of beta and plot the conditional distributions of alpha. If the shooter is close to the wall, we can be somewhat confident of his position. The farther away he is, the less certain we are.", "betas = [10, 20, 40]\nthinkplot.PrePlot(num=len(betas))\n\nfor beta in betas:\n cond = suite.Conditional(0, 1, beta)\n cond.label = 'beta = %d' % beta\n thinkplot.Pdf(cond)\n\nthinkplot.Config(xlabel='Distance',\n ylabel='Prob')", "Another way to visualize the posterio distribution: a pseudocolor plot of probability as a function of alpha and beta.", "thinkplot.Contour(suite.GetDict(), contour=False, pcolor=True)\n\nthinkplot.Config(xlabel='alpha',\n ylabel='beta',\n axis=[0, 30, 0, 20])", "Here's another visualization that shows posterior credible regions.", "d = dict((pair, 0) for pair in suite.Values())\n\npercentages = [75, 50, 25]\nfor p in percentages:\n interval = suite.MaxLikeInterval(p)\n for pair in interval:\n d[pair] += 1\n\nthinkplot.Contour(d, contour=False, pcolor=True)\nthinkplot.Text(17, 4, '25', color='white')\nthinkplot.Text(17, 15, '50', color='white')\nthinkplot.Text(17, 30, '75')\n\nthinkplot.Config(xlabel='alpha',\n ylabel='beta',\n legend=False)", "Exercise: From John D. Cook\n\"Suppose you have a tester who finds 20 bugs in your program. You want to estimate how many bugs are really in the program. You know there are at least 20 bugs, and if you have supreme confidence in your tester, you may suppose there are around 20 bugs. But maybe your tester isn't very good. Maybe there are hundreds of bugs. How can you have any idea how many bugs there are? There’s no way to know with one tester. But if you have two testers, you can get a good idea, even if you don’t know how skilled the testers are.\nSuppose two testers independently search for bugs. Let k1 be the number of errors the first tester finds and k2 the number of errors the second tester finds. Let c be the number of errors both testers find. The Lincoln Index estimates the total number of errors as k1 k2 / c [I changed his notation to be consistent with mine].\"\nSo if the first tester finds 20 bugs, the second finds 15, and they find 3 in common, we estimate that there are about 100 bugs. What is the Bayesian estimate of the number of errors based on this data?", "# Solution\n\nfrom scipy.special import binom as choose\n\ndef binom(k, n, p):\n \"\"\"Computes the rest of the binomial PMF.\n\n k: number of hits\n n: number of attempts\n p: probability of a hit\n \"\"\"\n return p**k * (1-p)**(n-k)\n\n\nclass Lincoln(Suite, Joint):\n \"\"\"Represents hypotheses about the number of errors.\"\"\"\n\n def Likelihood(self, data, hypo):\n \"\"\"Computes the likelihood of the data under the hypothesis.\n\n hypo: n, p1, p2\n data: k1, k2, c\n \"\"\"\n n, p1, p2 = hypo\n k1, k2, c = data\n\n part1 = choose(n, k1) * binom(k1, n, p1)\n part2 = choose(k1, c) * choose(n-k1, k2-c) * binom(k2, n, p2)\n return part1 * part2\n\n# Solution\n\ndata = 20, 15, 3\nprobs = np.linspace(0, 1, 31)\nhypos = []\nfor n in range(32, 350):\n for p1 in probs:\n for p2 in probs:\n hypos.append((n, p1, p2))\n\nsuite = Lincoln(hypos)\nsuite.Update(data)\n\n# Solution\n\nn_marginal = suite.Marginal(0)\nthinkplot.Pmf(n_marginal, label='n')\nthinkplot.Config(xlabel='number of bugs',\n ylabel='PMF')\n\n# Solution\n\nprint('post mean n', n_marginal.Mean())\nprint('MAP n', n_marginal.MAP())", "Exercise: The GPS problem. According to Wikipedia\n\n\nGPS included a (currently disabled) feature called Selective Availability (SA) that adds intentional, time varying errors of up to 100 meters (328 ft) to the publicly available navigation signals. This was intended to deny an enemy the use of civilian GPS receivers for precision weapon guidance.\n[...]\nBefore it was turned off on May 2, 2000, typical SA errors were about 50 m (164 ft) horizontally and about 100 m (328 ft) vertically.[10] Because SA affects every GPS receiver in a given area almost equally, a fixed station with an accurately known position can measure the SA error values and transmit them to the local GPS receivers so they may correct their position fixes. This is called Differential GPS or DGPS. DGPS also corrects for several other important sources of GPS errors, particularly ionospheric delay, so it continues to be widely used even though SA has been turned off. The ineffectiveness of SA in the face of widely available DGPS was a common argument for turning off SA, and this was finally done by order of President Clinton in 2000.\n\nSuppose it is 1 May 2000, and you are standing in a field that is 200m square. You are holding a GPS unit that indicates that your location is 51m north and 15m west of a known reference point in the middle of the field.\nHowever, you know that each of these coordinates has been perturbed by a \"feature\" that adds random errors with mean 0 and standard deviation 30m.\n1) After taking one measurement, what should you believe about your position?\nNote: Since the intentional errors are independent, you could solve this problem independently for X and Y. But we'll treat it as a two-dimensional problem, partly for practice and partly to see how we could extend the solution to handle dependent errors.\nYou can start with the code in gps.py.\n2) Suppose that after one second the GPS updates your position and reports coordinates (48, 90). What should you believe now?\n3) Suppose you take 8 more measurements and get:\n(11.903060613102866, 19.79168669735705)\n(77.10743601503178, 39.87062906535289)\n(80.16596823095534, -12.797927542984425)\n(67.38157493119053, 83.52841028148538)\n(89.43965206875271, 20.52141889230797)\n(58.794021026248245, 30.23054016065644)\n(2.5844401241265302, 51.012041625783766)\n(45.58108994142448, 3.5718287379754585)\n\nAt this point, how certain are you about your location?", "# Solution\n\nfrom thinkbayes2 import EvalNormalPdf\n\nclass Gps(Suite, Joint):\n \"\"\"Represents hypotheses about your location in the field.\"\"\"\n\n def Likelihood(self, data, hypo):\n \"\"\"Computes the likelihood of the data under the hypothesis.\n\n hypo: \n data: \n \"\"\"\n std = 30\n meanx, meany = hypo\n x, y = data\n like = EvalNormalPdf(x, meanx, std)\n like *= EvalNormalPdf(y, meany, std)\n return like\n\n# Solution\n\nfrom itertools import product\n\ncoords = np.linspace(-100, 100, 101)\njoint = Gps(product(coords, coords))\njoint.Update((51, -15))\n\n# Solution\n\njoint.Update((48, 90))\n\n# Solution\n\npairs = [(11.903060613102866, 19.79168669735705),\n (77.10743601503178, 39.87062906535289),\n (80.16596823095534, -12.797927542984425),\n (67.38157493119053, 83.52841028148538),\n (89.43965206875271, 20.52141889230797),\n (58.794021026248245, 30.23054016065644),\n (2.5844401241265302, 51.012041625783766),\n (45.58108994142448, 3.5718287379754585)]\n\njoint.UpdateSet(pairs)\n\n# Solution\n\nthinkplot.PrePlot(2)\npdfx = joint.Marginal(0)\npdfy = joint.Marginal(1)\nthinkplot.Pdf(pdfx, label='posterior x')\nthinkplot.Pdf(pdfy, label='posterior y')\n\n# Solution\n\nprint(pdfx.Mean(), pdfx.Std())\nprint(pdfy.Mean(), pdfy.Std())", "Exercise: The Flea Beetle problem from DASL\nDatafile Name: Flea Beetles\nDatafile Subjects: Biology\nStory Names: Flea Beetles\nReference: Lubischew, A.A. (1962) On the use of discriminant functions in taxonomy. Biometrics, 18, 455-477. Also found in: Hand, D.J., et al. (1994) A Handbook of Small Data Sets, London: Chapman & Hall, 254-255.\nAuthorization: Contact Authors\nDescription: Data were collected on the genus of flea beetle Chaetocnema, which contains three species: concinna (Con), heikertingeri (Hei), and heptapotamica (Hep). Measurements were made on the width and angle of the aedeagus of each beetle. The goal of the original study was to form a classification rule to distinguish the three species.\nNumber of cases: 74\nVariable Names:\nWidth: The maximal width of aedeagus in the forpart (in microns)\nAngle: The front angle of the aedeagus (1 unit = 7.5 degrees)\nSpecies: Species of flea beetle from the genus Chaetocnema\nSuggestions: \n\n\nPlot CDFs for the width and angle data, broken down by species, to get a visual sense of whether the normal distribution is a good model.\n\n\nUse the data to estimate the mean and standard deviation for each variable, broken down by species.\n\n\nGiven a joint posterior distribution for mu and sigma, what is the likelihood of a given datum?\n\n\nWrite a function that takes a measured width and angle and returns a posterior PMF of species.\n\n\nUse the function to classify each of the specimens in the table and see how many you get right.", "import pandas as pd\n\ndf = pd.read_csv('flea_beetles.csv', delimiter='\\t')\ndf.head()\n\ndef plot_cdfs(df, col):\n for name, group in df.groupby('Species'):\n cdf = Cdf(group[col], label=name)\n thinkplot.Cdf(cdf)\n \n thinkplot.Config(xlabel=col, legend=True, loc='lower right')\n\nplot_cdfs(df, 'Width')\n\nplot_cdfs(df, 'Angle')\n\nfrom thinkbayes2 import EvalNormalPdf\n\nclass Beetle(Suite, Joint):\n \n def Likelihood(self, data, hypo):\n \"\"\"\n data: sequence of measurements\n hypo: mu, sigma\n \"\"\"\n mu, sigma = hypo\n likes = EvalNormalPdf(data, mu, sigma)\n return np.prod(likes)\n \n def PredictiveProb(self, data):\n \"\"\"Compute the posterior total probability of a datum.\n \n data: sequence of measurements\n \"\"\"\n total = 0\n for (mu, sigma), prob in self.Items():\n likes = norm.pdf(data, mu, sigma)\n total += prob * np.prod(likes)\n return total\n\nfrom itertools import product\n\ndef MakeWidthSuite(data):\n mus = np.linspace(115, 160, 51)\n sigmas = np.linspace(1, 10, 51)\n suite = Beetle(product(mus, sigmas))\n suite.Update(data)\n return suite\n\ngroups = df.groupby('Species')\n\nfor name, group in groups:\n suite = MakeWidthSuite(group.Width)\n thinkplot.Contour(suite)\n print(name, suite.PredictiveProb(137))\n\ndef MakeAngleSuite(data):\n mus = np.linspace(8, 16, 101)\n sigmas = np.linspace(0.1, 2, 101)\n suite = Beetle(product(mus, sigmas))\n suite.Update(data)\n return suite\n\nfor name, group in groups:\n suite = MakeAngleSuite(group.Angle)\n thinkplot.Contour(suite)\n print(name, suite.PredictiveProb(13))\n\nclass Species:\n \n def __init__(self, name, suite_width, suite_angle):\n self.name = name\n self.suite_width = suite_width\n self.suite_angle = suite_angle\n \n def __str__(self):\n return self.name\n \n def Likelihood(self, data):\n width, angle = data\n like1 = self.suite_width.PredictiveProb(width)\n like2 = self.suite_angle.PredictiveProb(angle)\n return like1 * like2\n\nspecies = {}\n\nfor name, group in groups:\n suite_width = MakeWidthSuite(group.Width)\n suite_angle = MakeAngleSuite(group.Angle)\n species[name] = Species(name, suite_width, suite_angle)\n\nspecies['Con'].Likelihood((145, 14))\n\nclass Classifier(Suite):\n \n def Likelihood(self, data, hypo):\n return hypo.Likelihood(data)\n\nsuite = Classifier(species.values())\nsuite.Print()\n\nsuite.Update((145, 14))\nsuite.Print()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
CitrineInformatics/lolo
python/examples/ling-immi-2017.ipynb
apache-2.0
[ "Recreating Ling IMMI (2017)\nIn this notebook, we will recreate some key results from Ling et al. IMMI (2017). We will show that the errors produced from the Random Forest implemented in lolo are well-calibrated and that the uncertainties can be used with Sequential Learning to quickly find optimal materials within a search space.\nNote: This notebook will require you to install matminer and establish an account with Citrination to get an an API key (see Quickstart), and set it as an environment variable named CITRINE_KEY.", "%matplotlib inline\nfrom matplotlib import pyplot as plt\nfrom matminer.data_retrieval.retrieve_Citrine import CitrineDataRetrieval\nfrom matminer.featurizers.base import MultipleFeaturizer\nfrom matminer.featurizers import composition as cf\nfrom lolopy.learners import RandomForestRegressor\nfrom sklearn.model_selection import KFold\nfrom pymatgen import Composition\nfrom scipy.stats import norm\nimport pandas as pd\nimport numpy as np\nimport os", "Set the random seed", "np.random.seed(8)", "Get the Datasets\nThe Ling Paper used 4 different datasets to test the uncertainty estimates", "cdr = CitrineDataRetrieval()\n\ndata = cdr.get_dataframe(criteria={'data_set_id': 150888}, print_properties_options=False)", "Convert the composition and class variable from strings", "def get_compostion(c):\n \"\"\"Attempt to parse composition, return None if failed\"\"\"\n \n try:\n return Composition(c)\n except:\n return None\n\ndata['composition'] = data['chemicalFormula'].apply(get_compostion)\ndata['ZT'] = pd.to_numeric(data['ZT'], errors='coerce')\n\ndata.reset_index(drop=True, inplace=True)", "Compute Features\nEvery dataset except the steel fatigue dataset uses the composition-based features of Ward et al..", "f = MultipleFeaturizer([cf.Stoichiometry(), cf.ElementProperty.from_preset(\"magpie\"),\n cf.ValenceOrbital(props=['avg']), cf.IonProperty(fast=True)])\n\nX = np.array(f.featurize_many(data['composition']))", "Get the Residuals and RF Uncertainty\nAs described in the Ling paper, ideally-calibrated uncertainty estimaes should have a particular relationship with the errors of a machine learning model. Specifically, the distribution of $r(x)/\\sigma(x)$ where $r(x)$ is the residual of the prediction and $\\sigma(x)$ is the uncertainty of the prediction for x should have a Gaussian distribution with zero mean and unit standard deviation.", "model = RandomForestRegressor()", "Get the errors from 8-fold cross-validation", "y = data['ZT'].values\n\ny_resid = []\ny_uncer = []\nfor train_id, test_id in KFold(8, shuffle=True).split(X):\n model.fit(X[train_id], y[train_id])\n yf_pred, yf_std = model.predict(X[test_id], return_std=True)\n y_resid.extend(yf_pred - y[test_id])\n y_uncer.extend(yf_std)", "Plot the normalized residuals ($r(x)/\\sigma(x)$) against the normal distribution", "fig, axs = plt.subplots(1, 2, sharey=True)\n\nx = np.linspace(-8, 8, 50)\n\n# Plot the RF uncertainty\nresid = np.divide(y_resid, y_uncer)\naxs[0].hist(resid, x, density=True)\naxs[0].set_title('With Lolo Uncertainty Estimates')\n\n# Plot assuming constant errors\nresid = np.divide(y_resid, np.sqrt(np.power(y_resid, 2).mean()))\naxs[1].hist(resid, x, density=True)\naxs[1].set_title('Assuming Constant Error')\n\nfor ax in axs:\n ax.plot(x, norm.pdf(x), 'k--', lw=0.75)\n ax.set_xlabel('Normalized Residual')\n\naxs[0].set_ylabel('Probability Density')\n\nfig.set_size_inches(6.5, 2)\nfig.tight_layout()", "Here, we compare the error distribution using the Lolo uncertainty estimates (left) and the assumption that all entries have the same error (right). The normalized residuals for the uncertainty estimates have a distribution closer to the unit normal distribution, which means - as expected - that it better captures which predictions will have a higher error.\nSequential Learning\nOne important use of model uncertainties is to employ them to guide which experiments to pick to find optimal materials with minimal experiments/computations. As described in the Ling paper (and other nice articles), it is not always best to pick the experiment that the model predicts to have the best properties if you can perform more than one experiment sequentially. Rather, it can be better to pick entries with large uncertainities that, when tested and added to the training set, can improve the models predictions for the next experiments. \nHere, we demonstrate one approach for picking experiments: Maximum Liklihood of Improvement (MLI). In contrast to picking the material with the best predicted properties (an approach we refer to Maximum Expected Improvment (MEU)), the MLI approach pickes the material with with the highest liklihood of being better than the best material in the training set - a measure that uses both the predicted value and the uncertainty.\nStep 1: Pick an initial training set\nWe'll start with a small set of entries from the training set", "in_train = np.zeros(len(data), dtype=np.bool)\nin_train[np.random.choice(len(data), 10, replace=False)] = True\nprint('Picked {} training entries'.format(in_train.sum()))\n\nassert not np.isclose(max(y), max(y[in_train]))", "Step 2: Demonstrate picking the entries based on MLI and MEI\nJust to give a visual of how the selection process works\nMake the predictions", "model.fit(X[in_train], y[in_train])\ny_pred, y_std = model.predict(X[~in_train], return_std=True)", "For MEI, we pick the highest predicted value. For MLI, we pick the material that has the highest probability of being better than any material in the training set. As we assume the predictions to be normally distributed, the probability of materials can be computed from the Z-score $Z = (y - y^)/\\sigma$ where $y^$ is the maximum of the $y$ of the training set. Formally, the probability can be computed from the Z-score using the cumulative distribution function of the normal distribution. For our purposes, we can use the Z-score becuase the probability is a monotonic function of the Z-score (stated simply: the material with the highest probability will have the highest Z-score).", "mei_selection = np.argmax(y_pred)\nmli_selection = np.argmax(np.divide(y_pred - np.max(y[in_train]), y_std))\n\nprint('Predicted ZT of material #{} selected based on MEI: {:.2f} +/- {:.2f}'.format(mei_selection, y_pred[mei_selection], y_std[mei_selection]))\nprint('Predicted ZT of material #{} selected based on MLI: {:.2f} +/- {:.2f}'.format(mli_selection, y_pred[mli_selection], y_std[mli_selection]))", "For this particular iteration, the MEI and MLI strategies pick the same material. Depending on the random seed of this notebook and that used by lolo, you may see that the material picked by MLI has a lower predicted $ZT$ but a higher variance. According to the logic behind MLI, picking that entry will (1) yield a higher liklihood of finding a well-performing material and (2) lead to an improved model.\nStep 3: Run an iterative search\nStarting with the same 32 materials in the training set, we will iteratively pick materials, add them to the training set, and retrain the model using 3 different strategies for picking entries: MEI, MLI, and randomly.", "n_steps = 32\n\nall_inds = set(range(len(y)))", "Random Selection\nJust pick an entry at random, no need to train a model", "random_train = [set(np.where(in_train)[0].tolist())]\n\nfor i in range(n_steps):\n # Get the current train set and search space\n train_inds = set(random_train[-1]) # Last iteration\n search_inds = sorted(all_inds.difference(train_inds))\n \n # Pick an entry at random\n train_inds.add(np.random.choice(search_inds))\n \n # Add it to the list of training sets\n random_train.append(train_inds)", "Maximum Expected Improvement\nPick the entry with the largest predicted value", "mei_train = [set(np.where(in_train)[0].tolist())]\n\nfor i in range(n_steps):\n # Get the current train set and search space\n train_inds = sorted(set(mei_train[-1])) # Last iteration\n search_inds = sorted(all_inds.difference(train_inds))\n \n # Pick entry with the largest maximum value\n model.fit(X[train_inds], y[train_inds])\n y_pred = model.predict(X[search_inds])\n train_inds.append(search_inds[np.argmax(y_pred)])\n \n # Add it to the list of training sets\n mei_train.append(set(train_inds))", "Maximum Likelihood of Improvement\nPick the entry with the largest probability of improvement", "mli_train = [set(np.where(in_train)[0].tolist())]\n\nfor i in range(n_steps):\n # Get the current train set and search space\n train_inds = sorted(set(mei_train[-1])) # Last iteration\n search_inds = sorted(all_inds.difference(train_inds))\n \n # Pick entry with the largest maximum value\n model.fit(X[train_inds], y[train_inds])\n y_pred, y_std = model.predict(X[search_inds], return_std=True)\n train_inds.append(search_inds[np.argmax(np.divide(y_pred - np.max(y[train_inds]), y_std))])\n \n # Add it to the list of training sets\n mli_train.append(set(train_inds))", "Plot the results", "fig, ax = plt.subplots()\n\nfor train_inds, label in zip([random_train, mei_train, mli_train], ['Random', 'MEI', 'MLI']):\n ax.plot(np.arange(len(train_inds)), [max(y[list(t)]) for t in train_inds], label=label)\n\nax.set_xlabel('Number of New Experiments')\nax.set_ylabel('Best $ZT$ Found')\n \nfig.set_size_inches(3.5, 2)\nax.legend()\nfig.tight_layout()", "For this particular case, we find that the MLI strategy finds the best material more quickly than the Random or MEI approaches. In Ling 2017, they evaluate the performance of these strategies over many iterations and find that, on average, MLI finds the optimal materials as fast or better than any other approach." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
0.20/_downloads/ba93e79a900327aac9ad4c8f17e818c8/plot_brainstorm_phantom_elekta.ipynb
bsd-3-clause
[ "%matplotlib inline", "Brainstorm Elekta phantom dataset tutorial\nHere we compute the evoked from raw for the Brainstorm Elekta phantom\ntutorial dataset. For comparison, see [1]_ and:\nhttps://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta\n\nReferences\n.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.\n Brainstorm: A User-Friendly Application for MEG/EEG Analysis.\n Computational Intelligence and Neuroscience, vol. 2011, Article ID\n 879716, 13 pages, 2011. doi:10.1155/2011/879716", "# Authors: Eric Larson <larson.eric.d@gmail.com>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne import find_events, fit_dipole\nfrom mne.datasets.brainstorm import bst_phantom_elekta\nfrom mne.io import read_raw_fif\n\nprint(__doc__)", "The data were collected with an Elekta Neuromag VectorView system at 1000 Hz\nand low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data\nare read to construct instances of :class:mne.io.Raw.", "data_path = bst_phantom_elekta.data_path(verbose=True)\nsubject = 'sample'\n\nraw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')\nraw = read_raw_fif(raw_fname)", "Data channel array consisted of 204 MEG planor gradiometers,\n102 axial magnetometers, and 3 stimulus channels. Let's get the events\nfor the phantom, where each dipole (1-32) gets its own event:", "events = find_events(raw, 'STI201')\nraw.plot(events=events)\nraw.info['bads'] = ['MEG1933', 'MEG2421']", "The data have strong line frequency (60 Hz and harmonics) and cHPI coil\nnoise (five peaks around 300 Hz). Here we plot only out to 60 seconds\nto save memory:", "raw.plot_psd(tmax=30., average=False)", "Our phantom produces sinusoidal bursts at 20 Hz:", "raw.plot(events=events)", "Now we epoch our data, average it, and look at the first dipole response.\nThe first peak appears around 3 ms. Because we low-passed at 40 Hz,\nwe can also decimate our data to save memory.", "tmin, tmax = -0.1, 0.1\nbmax = -0.05 # Avoid capture filter ringing into baseline\nevent_id = list(range(1, 33))\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, bmax),\n preload=False)\nepochs['1'].average().plot(time_unit='s')", "Let's use a sphere head geometry model &lt;eeg_sphere_model&gt;\nand let's see the coordinate alignment and the sphere location. The phantom\nis properly modeled by a single-shell sphere with origin (0., 0., 0.).", "sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.08)\n\nmne.viz.plot_alignment(epochs.info, subject=subject, show_axes=True,\n bem=sphere, dig=True, surfaces='inner_skull')", "Let's do some dipole fits. We first compute the noise covariance,\nthen do the fits for each event_id taking the time instant that maximizes\nthe global field power.", "# here we can get away with using method='oas' for speed (faster than \"shrunk\")\n# but in general \"shrunk\" is usually better\ncov = mne.compute_covariance(epochs, tmax=bmax)\nmne.viz.plot_evoked_white(epochs['1'].average(), cov)\n\ndata = []\nt_peak = 0.036 # true for Elekta phantom\nfor ii in event_id:\n # Avoid the first and last trials -- can contain dipole-switching artifacts\n evoked = epochs[str(ii)][1:-1].average().crop(t_peak, t_peak)\n data.append(evoked.data[:, 0])\nevoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.)\ndel epochs\ndip, residual = fit_dipole(evoked, cov, sphere, n_jobs=1)", "Do a quick visualization of how much variance we explained, putting the\ndata and residuals on the same scale (here the \"time points\" are the\n32 dipole peak values that we fit):", "fig, axes = plt.subplots(2, 1)\nevoked.plot(axes=axes)\nfor ax in axes:\n ax.texts = []\n for line in ax.lines:\n line.set_color('#98df81')\nresidual.plot(axes=axes)", "Now we can compare to the actual locations, taking the difference in mm:", "actual_pos, actual_ori = mne.dipole.get_phantom_dipoles()\nactual_amp = 100. # nAm\n\nfig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(6, 7))\n\ndiffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1))\nprint('mean(position error) = %0.1f mm' % (np.mean(diffs),))\nax1.bar(event_id, diffs)\nax1.set_xlabel('Dipole index')\nax1.set_ylabel('Loc. error (mm)')\n\nangles = np.rad2deg(np.arccos(np.abs(np.sum(dip.ori * actual_ori, axis=1))))\nprint(u'mean(angle error) = %0.1f°' % (np.mean(angles),))\nax2.bar(event_id, angles)\nax2.set_xlabel('Dipole index')\nax2.set_ylabel(u'Angle error (°)')\n\namps = actual_amp - dip.amplitude / 1e-9\nprint('mean(abs amplitude error) = %0.1f nAm' % (np.mean(np.abs(amps)),))\nax3.bar(event_id, amps)\nax3.set_xlabel('Dipole index')\nax3.set_ylabel('Amplitude error (nAm)')\n\nfig.tight_layout()\nplt.show()", "Let's plot the positions and the orientations of the actual and the estimated\ndipoles", "actual_amp = np.ones(len(dip)) # misc amp to create Dipole instance\nactual_gof = np.ones(len(dip)) # misc GOF to create Dipole instance\ndip_true = \\\n mne.Dipole(dip.times, actual_pos, actual_amp, actual_ori, actual_gof)\n\nfig = mne.viz.plot_alignment(evoked.info, bem=sphere, surfaces='inner_skull',\n coord_frame='head', meg='helmet', show_axes=True)\n\n# Plot the position and the orientation of the actual dipole\nfig = mne.viz.plot_dipole_locations(dipoles=dip_true, mode='arrow',\n subject=subject, color=(0., 0., 0.),\n fig=fig)\n\n# Plot the position and the orientation of the estimated dipole\nfig = mne.viz.plot_dipole_locations(dipoles=dip, mode='arrow', subject=subject,\n color=(0.2, 1., 0.5), fig=fig)\n\nmne.viz.set_3d_view(figure=fig, azimuth=70, elevation=80, distance=0.5)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
satishgoda/learning
python/libs/yaml/ruamel_1_intro.ipynb
mit
[ "About\nruamel.yaml is a YAML 1.2 loader/dumper package for Python. It is a derivative of Kirill Simonov’s PyYAML 3.11\nruamel.yaml supports YAML 1.2 and has round-trip loaders and dumpers that preserves, among others:\n\ncomments\nblock style and key ordering are kept, so you can diff the round-tripped source\nflow style sequences ( ‘a: b, c, d’) (based on request and test by Anthony Sottile)\nanchors names that are hand-crafted (i.e. not of the formidNNN)\nmerges in dictionaries are preserved\n\n\nYou can read more at http://yaml.readthedocs.io/en/latest/overview.html\nImporting", "import ruamel.yaml\n\nruamel.yaml\n\nruamel\n\ndir(ruamel)", "Examples\n\nhttp://yaml.readthedocs.io/en/latest/example.html", "inp = \"\"\"\\\n# example\nname:\n # details\n family: Goda # Very uncommon\n given: Satish # One of the siblings (Comman name)\n\"\"\"\n\nprint(inp)\n\nhelp(ruamel.yaml.load)\n\ncode = ruamel.yaml.load(inp, Loader=ruamel.yaml.RoundTripLoader)\n\ncode\n\ncode['name']['given']\n\ncode['name']['family']", "Anchors and References\nAPI Study\nBased on the example (abridged version)", "import ruamel.yaml\n\ninp = \"\"\"\\\n- &CENTER {x: 1, y: 2}\n- &LEFT {x: 0, y: 2}\n- &BIG {r: 10}\n- &SMALL {r: 1}\n\"\"\"\n\ncode = ruamel.yaml.load(inp, Loader=ruamel.yaml.RoundTripLoader)\n\ncode\n\ntype(code)\n\ndir(code)\n\ncode.anchor\n\ncode.anchor.value\n\ncode.keys()\n\nfor item in code:\n print(item)\n\nfor item in code:\n print(item.anchor.value, item.keys())", "Full example", "import ruamel.yaml\n\ninp = \"\"\"\\\n- &CENTER {x: 1, y: 2}\n- &LEFT {x: 0, y: 2}\n- &BIG {r: 10}\n- &SMALL {r: 1}\n\n# All the following maps are equal:\n\n# Explicit keys\n- x: 1\n y: 2\n r: 10\n label: center/big\n\n# Merge one map\n- <<: *CENTER\n r: 10\n label: center/big\n\n# Merge multiple maps\n- <<: [*CENTER, *BIG]\n label: center/big\n \n# Override\n- <<: [*BIG, *LEFT, *SMALL]\n x: 1\n label: center/big\n\"\"\"\n\ndata = ruamel.yaml.load(inp, Loader=ruamel.yaml.RoundTripLoader)\n\ndata\n\n# Modifying the merge order of the aliases\n\ninp = \"\"\"\\\n- &CENTER {x: 1, y: 2}\n- &LEFT {x: 0, y: 2}\n- &BIG {r: 10}\n- &SMALL {r: 1}\n\n# All the following maps are equal:\n\n# Explicit keys\n- x: 1\n y: 2\n r: 10\n label: center/big\n\n# Merge one map\n- <<: *CENTER\n r: 10\n label: center/big\n\n# Merge multiple maps\n- <<: [*CENTER, *BIG]\n label: center/big\n \n# Override\n- <<: [*SMALL, *BIG, *LEFT]\n x: 1\n label: center/small\n\"\"\"\n\ndata = ruamel.yaml.load(inp, Loader=ruamel.yaml.RoundTripLoader)\n\ndata\n\nassert data[7]['y'] == 2", "Inserting Keys and Comments", "yaml_str = \"\"\"\\\nfirst_name: Art\noccupation: Architect # This is an occupation comment\nabout: Art Vandelay is a fictional character that George invents...\n\"\"\"\n\ndata = ruamel.yaml.round_trip_load(yaml_str)\n\ndata\n\ndata.insert(1, 'last name', 'Vandelay', comment='new key')\n\nprint(ruamel.yaml.round_trip_dump(data))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
atavory/ibex
examples/boston_plotting_cv_preds.ipynb
bsd-3-clause
[ "Plotting Cross-Validated Predictions On The Boston Dataset\nThis notebook illustrates finding feature importance in the Boston dataset. It is a version of the Scikit-Learn example Plotting Cross-Validated Predictions.\nThe main point it shows is using pandas structures throughout the code, and integrate nicely with seaborn.", "import pandas as pd\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn import model_selection\nimport seaborn as sns\nsns.set_style('whitegrid')\nsns.despine()\n\nfrom ibex import trans\nfrom ibex.sklearn import linear_model as pd_linear_model\nfrom ibex.sklearn import decomposition as pd_decomposition\nfrom ibex.sklearn import preprocessing as pd_preprocessing\nfrom ibex.sklearn import ensemble as pd_ensemble\nfrom ibex import xgboost as pd_xgboost\nfrom ibex.sklearn import model_selection as pd_model_selection\n\n%pylab inline", "Loading The Data\nFirst we load the dataset into a pandas.DataFrame.", "dataset = datasets.load_boston()\nboston = pd.DataFrame(dataset.data, columns=dataset.feature_names)\nfeatures = dataset.feature_names\nboston['price'] = dataset.target\nboston.head()", "Building The Cross Validated Predictions\nWe will use a linear predictor, and a random forest predictor.", "linear_y_hat = pd_model_selection.cross_val_predict(\n pd_linear_model.LinearRegression(),\n boston[features],\n boston.price)\nlinear_y_hat.head()\n\nlinear_cv= pd.concat([linear_y_hat, boston.price], axis=1)\nlinear_cv['type'] = 'linear'\nlinear_cv.columns = ['y_hat', 'y', 'regressor']\nlinear_cv.head()\n\nrf_y_hat = pd_model_selection.cross_val_predict(\n pd_ensemble.RandomForestRegressor(),\n boston[features],\n boston.price)\nrf_cv= pd.concat([rf_y_hat, boston.price], axis=1)\nrf_cv['type'] = 'rf'\nrf_cv.columns = ['y_hat', 'y', 'regressor']\n\nxgb_rf_y_hat = pd_model_selection.cross_val_predict(\n pd_xgboost.XGBRegressor(),\n boston[features],\n boston.price)\nxgb_rf_cv= pd.concat([xgb_rf_y_hat, boston.price], axis=1)\nxgb_rf_cv['type'] = 'xgb_rf'\nxgb_rf_cv.columns = ['y_hat', 'y', 'regressor']\n\ncvs = pd.concat([linear_cv, rf_cv, xgb_rf_cv])\n\ncvs.regressor.unique()", "Plotting The Cross-Validated Predictions\nFinally, we can plot the results:", "min_, max_ = cvs[['y_hat', 'y']].min().min(), cvs[['y_hat', 'y']].max().max()\nsns.lmplot(\n x='y', \n y='y_hat', \n hue='regressor', \n data=cvs,\n palette={'linear': 'grey', 'rf': 'brown', 'xgb_rf': 'green'});\nplot(np.linspace(min_, max_, 100), np.linspace(min_, max_, 100), '--', color='darkgrey');\ntick_params(colors='0.6')\nxlim((min_, max_))\nylim((min_, max_))\nfigtext(\n 0, \n -0.1, \n 'Cross-validated predictions for linear and random-forest regressor on the price in the Boston dataset;\\n'\n 'the linear regressor has inferior performance here, in particular for lower prices');" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
cloudmesh/book
notebooks/fingerprint/fingerprint_matching.ipynb
apache-2.0
[ "Python Fingerprint Example\nPython is a flexible and popular language for running data analysis pipelines. In this tutorial we will implement a solution for a fingerprint matching.\nOverview\nFingerprint recognition refers to the automated method for verifying a match between two fingerprints and that is used to identify individuals and verify their identity. Fingerprints (Figure 1) are the most widely used form of biometric used to identify individuals.\n\nThe automated fingerprint matching generally required the detection of different fingerprint features (aggregate characteristics of ridges, and minutia points) and then the use of fingerprint matching algorithm, which can do both one-to- one and one-to- many matching operations. Based on the number of matches a proximity score (distance or similarity) can be calculated.\nWe use the following NIST dataset for the study:\nSpecial Database 14 - NIST Mated Fingerprint Card Pairs 2. (http://www.nist.gov/itl/iad/ig/special_dbases.cfm)\nObjectives\nMatch the fingerprint images from a probe set to a gallery set and report the match scores.\nPrerequisites\nFor this work we will use the following algorithms:\n\nMINDTCT: The NIST minutiae detector, which automatically locates and records ridge ending and bifurcations in a fingerprint image. (http://www.nist.gov/itl/iad/ig/nbis.cfm)\nBOZORTH3: A NIST fingerprint matching algorithm, which is a minutiae based fingerprint-matching algorithm. It can do both one-to- one and one-to- many matching operations. (http://www.nist.gov/itl/iad/ig/nbis.cfm)\n\nIn order to follow along, you must have the NBIS tools which provide mindtct and bozorth3 installed. If you are on Ubuntu 16.04 Xenial, the following steps will accomplish this:\n$ sudo apt-get update -qq\n$ sudo apt-get install -y build-essential cmake unzip\n$ wget \"http://nigos.nist.gov:8080/nist/nbis/nbis_v5_0_0.zip\"\n$ unzip -d nbis nbis_v5_0_0.zip\n$ cd nbis/Rel_5.0.0\n$ ./setup.sh /usr/local --without-X11\n$ sudo make\nImplementation\n\nFetch the fingerprint images from the web\nCall out to external programs to prepare and compute the match scoreds\nStore the results in a database\nGenerate a plot to identify likely matches.", "from __future__ import print_function\n\nimport urllib\nimport zipfile\nimport hashlib", "We'll be interacting with the operating system and manipulating files and their pathnames.", "import os.path\nimport os\nimport sys\nimport shutil\nimport tempfile", "Some general usefull utilities", "import itertools\nimport functools\nimport types\nfrom pprint import pprint", "Using the attrs library provides some nice shortcuts to defining objects", "import attr\n\nimport sys\n", "We'll be randomly dividing the entire dataset, based on user input, into the probe and gallery stets", "import random", "We'll need to call out to the NBIS software. We'll also be using multiple processes to take advantage of all the cores on our machine", "import subprocess\nimport multiprocessing", "As for plotting, we'll use matplotlib, though there are many alternatives.", "import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np", "Finally, we'll write the results to a database.", "import sqlite3", "Utility functions\nNext, we'll define some utility functions:", "def take(n, iterable):\n \"Returns a generator of the first **n** elements of an iterable\"\n return itertools.islice(iterable, n )\n\n\ndef zipWith(function, *iterables):\n \"Zip a set of **iterables** together and apply **function** to each tuple\"\n for group in itertools.izip(*iterables):\n yield function(*group)\n\n\ndef uncurry(function):\n \"Transforms an N-arry **function** so that it accepts a single parameter of an N-tuple\"\n @functools.wraps(function)\n def wrapper(args):\n return function(*args)\n return wrapper\n\n\ndef fetch_url(url, sha256, prefix='.', checksum_blocksize=2**20, dryRun=False):\n \"\"\"Download a url.\n\n :param url: the url to the file on the web\n :param sha256: the SHA-256 checksum. Used to determine if the file was previously downloaded.\n :param prefix: directory to save the file\n :param checksum_blocksize: blocksize to used when computing the checksum\n :param dryRun: boolean indicating that calling this function should do nothing\n :returns: the local path to the downloaded file\n :rtype:\n\n \"\"\"\n\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n\n local = os.path.join(prefix, os.path.basename(url))\n\n if dryRun: return local\n\n if os.path.exists(local):\n print ('Verifying checksum')\n chk = hashlib.sha256()\n with open(local, 'rb') as fd:\n while True:\n bits = fd.read(checksum_blocksize)\n if not bits: break\n chk.update(bits)\n if sha256 == chk.hexdigest():\n return local\n\n print ('Downloading', url)\n\n def report(sofar, blocksize, totalsize):\n msg = '{}%\\r'.format(100 * sofar * blocksize / totalsize, 100)\n sys.stderr.write(msg)\n\n urllib.urlretrieve(url, local, report)\n\n return local\n", "Dataset\nWe'll now define some global parameters\nFirst, the fingerprint dataset", "DATASET_URL = 'https://s3.amazonaws.com/nist-srd/SD4/NISTSpecialDatabase4GrayScaleImagesofFIGS.zip'\nDATASET_SHA256 = '4db6a8f3f9dc14c504180cbf67cdf35167a109280f121c901be37a80ac13c449'", "We’ll define how to download the dataset. This function is general enough that it could be used to retrieve most files, but we’ll default it to use the values from above.", "def prepare_dataset(url=None, sha256=None, prefix='.', skip=False):\n url = url or DATASET_URL\n sha256 = sha256 or DATASET_SHA256\n local = fetch_url(url, sha256=sha256, prefix=prefix, dryRun=skip)\n\n if not skip:\n print ('Extracting', local, 'to', prefix)\n with zipfile.ZipFile(local, 'r') as zip:\n zip.extractall(prefix)\n\n name, _ = os.path.splitext(local)\n return name\n\n\ndef locate_paths(path_md5list, prefix):\n with open(path_md5list) as fd:\n for line in itertools.imap(str.strip, fd):\n parts = line.split()\n if not len(parts) == 2: continue\n md5sum, path = parts\n chksum = Checksum(value=md5sum, kind='md5')\n filepath = os.path.join(prefix, path)\n yield Path(checksum=chksum, filepath=filepath)\n\n\ndef locate_images(paths):\n\n def predicate(path):\n _, ext = os.path.splitext(path.filepath)\n return ext in ['.png']\n\n for path in itertools.ifilter(predicate, paths):\n yield image(id=path.checksum.value, path=path)", "Data Model\nWe'll define some classes so we have a nice API for working with the dataflow. We set slots=True so that the resulting objects will be more space-efficient.\nUtilities\nChecksum\nThe checksum consists of the actual hash value (value) as well as a string representing the hashing algorithm.\nThe validator enforces that the algorith can only be one of the listed acceptable methods", "@attr.s(slots=True)\nclass Checksum(object):\n value = attr.ib()\n kind = attr.ib(validator=lambda o, a, v: v in 'md5 sha1 sha224 sha256 sha384 sha512'.split())", "Path\nPaths refer to an image's filepath and associated Checksum. We get the checksum \"for \"free\" since the MD5 hash is provided for each image in the dataset.", "@attr.s(slots=True)\nclass Path(object):\n checksum = attr.ib()\n filepath = attr.ib()\n", "Image\nThe start of the data pipeline is the image. An image has an id (the md5 hash) and the path to the image.", "@attr.s(slots=True)\nclass image(object):\n id = attr.ib()\n path = attr.ib()", "Mindtct\nThe next step in the pipeline is to apply the mindtct program from NBIS. A mindtct object therefore represents the results of applying mindtct on an image. The xyt output is needed fo r the next step, and the image attribute represents the image id.", "@attr.s(slots=True)\nclass mindtct(object):\n image = attr.ib()\n xyt = attr.ib()\n \n def pretty(self):\n d = dict(id=self.image.id, path=self.image.path)\n return pprint(d)", "We need a way to construct a mindtct object from an image object. A straightforward way of doing this would be to have a from_image @staticmethod or @classmethod, but that doesn't work well with multiprocessing as top-level functions work best as they need to be serialized.", "def mindtct_from_image(image):\n imgpath = os.path.abspath(image.path.filepath)\n tempdir = tempfile.mkdtemp()\n oroot = os.path.join(tempdir, 'result')\n\n cmd = ['mindtct', imgpath, oroot]\n\n try:\n subprocess.check_call(cmd)\n\n with open(oroot + '.xyt') as fd:\n xyt = fd.read()\n\n result = mindtct(image=image.id, xyt=xyt)\n return result\n\n finally:\n shutil.rmtree(tempdir)\n", "Bozorth3\nThe final step in the pipeline is running the bozorth3 from NBIS. The bozorth3 class represents the match being done: tracking the ids of the probe and gallery images as well as the match score.\nSince we'll be writing these instance out to a database, we provide some static methods for SQL statements. While there are many Object-Relational-Model (ORM) libraries available for Python, this approach keeps the current implementation simple.", "@attr.s(slots=True)\nclass bozorth3(object):\n probe = attr.ib()\n gallery = attr.ib()\n score = attr.ib()\n \n @staticmethod\n def sql_stmt_create_table():\n return 'CREATE TABLE IF NOT EXISTS bozorth3' \\\n + '(probe TEXT, gallery TEXT, score NUMERIC)'\n \n @staticmethod\n def sql_prepared_stmt_insert():\n return 'INSERT INTO bozorth3 VALUES (?, ?, ?)'\n \n def sql_prepared_stmt_insert_values(self):\n return self.probe, self.gallery, self.score", "In order to work well with multiprocessing, we define a class representuing the input paramaters to bozorth3 and a helper function to run bozorth3. This way the pipeline definition can be kept simple to a map to create the input and then a map to run the program.\nAs NBIS bozorth3 can be called to compare one-to-one or one-to-many, we'll also dynamically choose between these approaches depending on if the gallery attribute is a list or a single object.", "@attr.s(slots=True)\nclass bozorth3_input(object):\n probe = attr.ib()\n gallery = attr.ib()\n \n def run(self):\n if isinstance(self.gallery, mindtct):\n return bozorth3_from_one_to_one(self.probe, self.gallery)\n elif isinstance(self.gallery, types.ListType):\n return bozorth3_from_one_to_many(self.probe, self.gallery)\n else:\n raise ValueError('Unhandled type for gallery: {}'.format(type(gallery)))", "The next is the top-level function to running bozorth3. It accepts an instance of bozorth3_input. The is implemented as a simple top-level wrapper so that it can be easily passed to the multiprocessing library.", "def run_bozorth3(input):\n return input.run()", "Running Bozorth3\nThere are two cases to handle:\n1. One-to-one probe to gallery sets\n1. One-to-many probe to gallery sets\nBoth approaches are implemented below.\nThe implementations follow the same pattern:\n1. Create a temporary directory within with to work\n1. Write the probe and gallery images to files in the temporary directory\n1. Call the bozorth3 executable\n1. The match score is written to stdout which is captured and then parsed.\n1. Return a bozorth3 instance for each match\n1. Make sure to clean up the temporary directory\nOne-to-one", "def bozorth3_from_one_to_one(probe, gallery):\n tempdir = tempfile.mkdtemp()\n probeFile = os.path.join(tempdir, 'probe.xyt')\n galleryFile = os.path.join(tempdir, 'gallery.xyt')\n \n with open(probeFile, 'wb') as fd: fd.write(probe.xyt)\n with open(galleryFile, 'wb') as fd: fd.write(gallery.xyt)\n \n cmd = ['bozorth3', probeFile, galleryFile]\n \n try:\n result = subprocess.check_output(cmd)\n score = int(result.strip())\n return bozorth3(probe=probe.image, gallery=gallery.image, score=score)\n finally:\n shutil.rmtree(tempdir)", "One-to-many", "def bozorth3_from_one_to_many(probe, galleryset):\n tempdir = tempfile.mkdtemp()\n probeFile = os.path.join(tempdir, 'probe.xyt')\n galleryFiles = [os.path.join(tempdir, 'gallery%d.xyt' % i)\n for i,_ in enumerate(galleryset)]\n \n with open(probeFile, 'wb') as fd: fd.write(probe.xyt)\n for galleryFile, gallery in itertools.izip(galleryFiles, galleryset):\n with open(galleryFile, 'wb') as fd: fd.write(gallery.xyt)\n \n cmd = ['bozorth3', '-p', probeFile] + galleryFiles\n \n try:\n result = subprocess.check_output(cmd).strip()\n scores = map(int, result.split('\\n'))\n return [bozorth3(probe=probe.image, gallery=gallery.image, score=score)\n for score, gallery in zip(scores, galleryset)]\n finally:\n shutil.rmtree(tempdir)", "Plotting\nFor plotting we'll operate only on the database. We'll select a small number of probe images and plot the score between them and the rest of the gallery images.\nThe mk_short_labels helper function will be defined below.", "def plot(dbfile, nprobes=10):\n conn = sqlite3.connect(dbfile)\n results = pd.read_sql(\n \"SELECT DISTINCT probe FROM bozorth3 ORDER BY score LIMIT '%s'\" % nprobes,\n con=conn\n )\n shortlabels = mk_short_labels(results.probe)\n plt.figure()\n \n for i, probe in results.probe.iteritems():\n stmt = 'SELECT gallery, score FROM bozorth3 WHERE probe = ? ORDER BY gallery DESC'\n matches = pd.read_sql(stmt, params=(probe,), con=conn)\n xs = np.arange(len(matches), dtype=np.int)\n plt.plot(xs, matches.score, label='probe %s' % shortlabels[i])\n \n plt.ylabel('Score')\n plt.xlabel('Gallery')\n plt.legend(bbox_to_anchor=(0, 0, 1, -0.2))\n plt.show()", "The image ids are long hash strings. In ordere to minimize the amount of space on the figure the labels occupy, we provide a helper function to create a short label that still uniquely identifies each probe image in the selected sample", "def mk_short_labels(series, start=7):\n for size in xrange(start, len(series[0])):\n if len(series) == len(set(map(lambda s: s[:size], series))):\n break\n return map(lambda s: s[:size], series)", "Putting it all Together\nFirst, set up a temporary directory in which to work:", "pool = multiprocessing.Pool()\nprefix = '/tmp/fingerprint_example/'\nif not os.path.exists(prefix):\n os.makedirs(prefix)", "Next we download and extract the fingerprint images from NIST:", "%%time\ndataprefix = prepare_dataset(prefix=prefix)", "Next we'll configure the location of of the MD5 checksum file that comes with the download", "md5listpath = os.path.join(prefix, 'NISTSpecialDatabase4GrayScaleImagesofFIGS/sd04/sd04_md5.lst')", "Load the images from the downloaded files to start the analysis pipeline", "%%time\nprint('Loading images')\npaths = locate_paths(md5listpath, dataprefix)\nimages = locate_images(paths)\nmindtcts = pool.map(mindtct_from_image, images)\nprint('Done')", "We can examine one of the loaded image. Note that image is refers to the MD5 checksum that came with the image and the xyt attribute represents the raw image data.", "print(mindtcts[0].image)\nprint(mindtcts[0].xyt[:50])", "For example purposes we'll only a use a small percentage of the database, randomly selected, for pur probe and gallery datasets.", "perc_probe = 0.001\nperc_gallery = 0.1\n\n%%time\nprint('Generating samples')\nprobes = random.sample(mindtcts, int(perc_probe * len(mindtcts)))\ngallery = random.sample(mindtcts, int(perc_gallery * len(mindtcts)))\nprint('|Probes| =', len(probes))\nprint('|Gallery|=', len(gallery))", "We can now compute the matching scores between the probe and gallery sets.\nThis will use all cores available on this workstation.", "%%time\nprint('Matching')\ninput = [bozorth3_input(probe=probe, gallery=gallery)\n for probe in probes]\nbozorth3s = pool.map(run_bozorth3, input)", "bozorth3s is now a list of lists of bozorth3 instances.", "print('|Probes| =', len(bozorth3s))\nprint('|Gallery| =', len(bozorth3s[0]))\nprint('Result:', bozorth3s[0][0])", "Now add the results to the database", "dbfile = os.path.join(prefix, 'scores.db')\nconn = sqlite3.connect(dbfile)\ncursor = conn.cursor()\ncursor.execute(bozorth3.sql_stmt_create_table())\n\n%%time\nfor group in bozorth3s:\n vals = map(bozorth3.sql_prepared_stmt_insert_values, group)\n cursor.executemany(bozorth3.sql_prepared_stmt_insert(), vals)\n conn.commit()\n print('Inserted results for probe', group[0].probe)", "We now plot the results.", "plot(dbfile, nprobes=len(probes))\n\ncursor.close()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
peastman/deepchem
examples/tutorials/The_Basic_Tools_of_the_Deep_Life_Sciences.ipynb
mit
[ "The Basic Tools of the Deep Life Sciences\nWelcome to DeepChem's introductory tutorial for the deep life sciences. This series of notebooks is a step-by-step guide for you to get to know the new tools and techniques needed to do deep learning for the life sciences. We'll start from the basics, assuming that you're new to machine learning and the life sciences, and build up a repertoire of tools and techniques that you can use to do meaningful work in the life sciences.\nScope: This tutorial will encompass both the machine learning and data handling needed to build systems for the deep life sciences.\nColab\nThis tutorial and the rest in the sequences are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link.\n\nWhy do the DeepChem Tutorial?\n1) Career Advancement: Applying AI in the life sciences is a booming\nindustry at present. There are a host of newly funded startups and initiatives\nat large pharmaceutical and biotech companies centered around AI. Learning and\nmastering DeepChem will bring you to the forefront of this field and will\nprepare you to enter a career in this field.\n2) Humanitarian Considerations: Disease is the oldest cause of human\nsuffering. From the dawn of human civilization, humans have suffered from pathogens,\ncancers, and neurological conditions. One of the greatest achievements of\nthe last few centuries has been the development of effective treatments for\nmany diseases. By mastering the skills in this tutorial, you will be able to\nstand on the shoulders of the giants of the past to help develop new\nmedicine.\n3) Lowering the Cost of Medicine: The art of developing new medicine is\ncurrently an elite skill that can only be practiced by a small core of expert\npractitioners. By enabling the growth of open source tools for drug discovery,\nyou can help democratize these skills and open up drug discovery to more\ncompetition. Increased competition can help drive down the cost of medicine.\nGetting Extra Credit\nIf you're excited about DeepChem and want to get more involved, there are some things that you can do right now:\n\nStar DeepChem on GitHub! - https://github.com/deepchem/deepchem\nJoin the DeepChem forums and introduce yourself! - https://forum.deepchem.io\nSay hi on the DeepChem gitter - https://gitter.im/deepchem/Lobby\nMake a YouTube video teaching the contents of this notebook.\n\nPrerequisites\nThis tutorial sequence will assume some basic familiarity with the Python data science ecosystem. We will assume that you have familiarity with libraries such as Numpy, Pandas, and TensorFlow. We'll provide some brief refreshers on basics through the tutorial so don't worry if you're not an expert.\nSetup\nThe first step is to get DeepChem up and running. We recommend using Google Colab to work through this tutorial series. You'll also need to run the following commands to get DeepChem installed on your colab notebook.", "!pip install --pre deepchem", "You can of course run this tutorial locally if you prefer. In this case, don't run the above cell since it will download and install Anaconda on your local machine. In either case, we can now import the deepchem package to play with.", "import deepchem as dc\ndc.__version__", "Training a Model with DeepChem: A First Example\nDeep learning can be used to solve many sorts of problems, but the basic workflow is usually the same. Here are the typical steps you follow.\n\nSelect the data set you will train your model on (or create a new data set if there isn't an existing suitable one).\nCreate the model.\nTrain the model on the data.\nEvaluate the model on an independent test set to see how well it works.\nUse the model to make predictions about new data.\n\nWith DeepChem, each of these steps can be as little as one or two lines of Python code. In this tutorial we will walk through a basic example showing the complete workflow to solve a real world scientific problem.\nThe problem we will solve is predicting the solubility of small molecules given their chemical formulas. This is a very important property in drug development: if a proposed drug isn't soluble enough, you probably won't be able to get enough into the patient's bloodstream to have a therapeutic effect. The first thing we need is a data set of measured solubilities for real molecules. One of the core components of DeepChem is MoleculeNet, a diverse collection of chemical and molecular data sets. For this tutorial, we can use the Delaney solubility data set. The property of solubility in this data set is reported in log(solubility) where solubility is measured in moles/liter.", "tasks, datasets, transformers = dc.molnet.load_delaney(featurizer='GraphConv')\ntrain_dataset, valid_dataset, test_dataset = datasets", "I won't say too much about this code right now. We will see many similar examples in later tutorials. There are two details I do want to draw your attention to. First, notice the featurizer argument passed to the load_delaney() function. Molecules can be represented in many ways. We therefore tell it which representation we want to use, or in more technical language, how to \"featurize\" the data. Second, notice that we actually get three different data sets: a training set, a validation set, and a test set. Each of these serves a different function in the standard deep learning workflow.\nNow that we have our data, the next step is to create a model. We will use a particular kind of model called a \"graph convolutional network\", or \"graphconv\" for short.", "model = dc.models.GraphConvModel(n_tasks=1, mode='regression', dropout=0.2)", "Here again I will not say much about the code. Later tutorials will give lots more information about GraphConvModel, as well as other types of models provided by DeepChem.\nWe now need to train the model on the data set. We simply give it the data set and tell it how many epochs of training to perform (that is, how many complete passes through the data to make).", "model.fit(train_dataset, nb_epoch=100)", "If everything has gone well, we should now have a fully trained model! But do we? To find out, we must evaluate the model on the test set. We do that by selecting an evaluation metric and calling evaluate() on the model. For this example, let's use the Pearson correlation, also known as r<sup>2</sup>, as our metric. We can evaluate it on both the training set and test set.", "metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)\nprint(\"Training set score:\", model.evaluate(train_dataset, [metric], transformers))\nprint(\"Test set score:\", model.evaluate(test_dataset, [metric], transformers))", "Notice that it has a higher score on the training set than the test set. Models usually perform better on the particular data they were trained on than they do on similar but independent data. This is called \"overfitting\", and it is the reason it is essential to evaluate your model on an independent test set.\nOur model still has quite respectable performance on the test set. For comparison, a model that produced totally random outputs would have a correlation of 0, while one that made perfect predictions would have a correlation of 1. Our model does quite well, so now we can use it to make predictions about other molecules we care about.\nSince this is just a tutorial and we don't have any other molecules we specifically want to predict, let's just use the first ten molecules from the test set. For each one we print out the chemical structure (represented as a SMILES string) and the predicted log(solubility). To put these predictions in \ncontext, we print out the log(solubility) values from the test set as well.", "solubilities = model.predict_on_batch(test_dataset.X[:10])\nfor molecule, solubility, test_solubility in zip(test_dataset.ids, solubilities, test_dataset.y):\n print(solubility, test_solubility, molecule)", "Congratulations! Time to join the Community!\nCongratulations on completing this tutorial notebook! If you enjoyed working through the tutorial, and want to continue working with DeepChem, we encourage you to finish the rest of the tutorials in this series. You can also help the DeepChem community in the following ways:\nStar DeepChem on GitHub\nThis helps build awareness of the DeepChem project and the tools for open source drug discovery that we're trying to build.\nJoin the DeepChem Gitter\nThe DeepChem Gitter hosts a number of scientists, developers, and enthusiasts interested in deep learning for the life sciences. Join the conversation!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
rahulremanan/python_tutorial
Beginners/notebook/Objects_Classes_Constructors_and_Functions.ipynb
mit
[ "Object oriented programming in python:\nAuthor: Dr. Rahul Remanan\nCEO and Chief Imagination Officer\nMoad Computer\n\nObjects are an encapsulation of variables and functions into a single entity. \nObjects get their variables and functions from classes. \nClasses are essentially a template to create your objects.\n\nPart 01 -- Functions in python:\nJust add two numbers:", "a = int(input())\nb = int(input())\n\nprint (a+b)", "Explicitly calling garbage collection to free-up working memory:", "import gc\n\ndel a\ndel b\ngc.collect()", "A very basic python function:\n\nA function to add two numbers.\nUses positional argument.", "def add_function(a,b):\n print ('Calculating the sum of: ' + str(a) + \" , \" + str(b))\n return (a+b) ", "The input() helps user pass an input value.", "input_1 = int(input())\ninput_2 = int(input())\n\nresult = add_function(input_1, input_2)\n\nprint (result)", "Explicitly freeing-up working memory and calling garbage collector:\n\nObject oriented programing is memory efficient.", "import gc\n\ndel input_1\ndel input_2\ndel result\n\ngc.collect()", "Another basic python function:\n\nThis is a function to add two numbers.\nUses keyword arguments instead of positional arguments.\nThe function takes two decimal values instead of two integer values in the function above.\nExplicit error messages.", "def add_function(input_1=None, input_2=None):\n if input_1 and input_2 !=None:\n print ('Calculating the sum of: ' + str(input_1) + \" , \" + str(input_2))\n return (a+b)\n else:\n print ('Nothing to do here ...')\n print ('Ensure add_function passes two keyword variables: input_1 and input_2 ...')\n\na = float(input())\nb = float(input())\n\nresult = add_function(input_2=b, input_1=a)\n\nprint (result)", "Explicit error message looks like this:", "result = add_function(input_1=a)", "Part 02 -- Classes in python:\nPython is an “object-oriented programming language.” This means almost all the code is implemented using a special construct called classes. Programmers use classes to keep related things together. This is done using the keyword “class,” which is a grouping of object-oriented constructs.\nDefine a very basic class in python:\n\nContains a variable called \"some_variable\".\nContains a function called \"some_function\".", "class some_class:\n some_variable = \"Some string inside python class.\"\n def some_function(self):\n return (\"This is a message inside the class.\")", "Assign an object to the class:\n\nTo assign the above class(template) to an object(variable), do the following:", "some_object = some_class()", "Accessing object variables:\n\nTo access the variable inside of the newly created object object_name.variable_name.", "print(some_class.some_variable)\n\nprint(some_object.some_variable)\n\nsome_object.some_variable = 'This is the modified variable.'\n\nprint (some_object.some_variable)", "Accessing object functions:\n\nTo access the function inside the newly created object the syntax is: object_name.function_name().", "print(some_object.some_function())", "Importance of object oriented programing:\n\nObject oriented programing helps create multiple different objects.\nThey are of the same class with the same variables and functions defined. \nEach of those objects contain independent copies of the variables defined in the class. \nIf another object with the \"some_class\" class is defined, then the string in the variable can be changed as follows:", "first_object = some_class()\nsecond_object = some_class()\n\nsecond_object.some_variable = \"Modified variable string for some_class.\"\n\nprint(first_object.some_variable)\nprint(second_object.some_variable)", "Accessing object functions:\n\nTo access a function inside of an object in python, the notation similar to accessing a variable.", "print(first_object.some_function())", "Part 03 -- Constructor or initializer in python:\n\nThe init function is called a constructor, or initializer.\nConstrucor or initializer is automatically called when you create a new instance of a class.\ninit doesn't initialize a class, it initializes an instance of a class or an object. \n\nA slightly more complex example of object oriented programing:\n\nUses a constructor for variables \"name\" and \"type\".\nUses a class variable, \"kind\".\nHas an object function, \"color\".", "class Dog:\n kind = 'I am a dog. According to scientists, I belong to a species called Canis lupus, under the sub-species familiaris.'\n \n def __init__(self, name = None, type = None):\n self.name = name\n self.type = type\n\n def dog_color(self, color = None):\n if color and self.name and self.type != None:\n color = color.lower()\n if color == ('pink'):\n print (\"Your dog \" + str(self.name) + \"'s \" + color + \" color is so cute ...\")\n elif color == ('blue'):\n print (\"Your dog \" + str(self.name) + \"'s \" + color + \" color makes it look like a smurf ...\")\n else:\n print (\"Your dog: \" + str(self.name) + \"'s \" + \"color is: \" + color + \".\")\n else:\n print (\"Nothing to do here ...\")\n \n def my_dog(self):\n if self.name and self.type != None:\n print (\"Your dog \" + str(self.name) + \" is: \" + str(self.type))\n else:\n print (\"Nothing to do here ...\")\n\nmy_dog = Dog()\n\nmy_dog.name = 'Puppy'\n\nmy_dog.type = 'Maltese'\n\nprint(my_dog)\n\nprint(my_dog.name)\n\nprint(my_dog.kind)\n\nprint(my_dog.type)\n\nmy_dog = Dog(name = 'Puppy', type = 'Maltese')\n\nmy_dog.my_dog()", "An object can store any random variable:\n\nAn object can store any variables, not necessarily those sepcified in the class.", "my_dog.hello_world = \"Hello World\"\n\nmy_dog.hello_world", "Accessing an object function that also takes a keyword argument:", "my_dog.dog_color(color = 'Pink')\n\nmy_dog.dog_color(color='Blue')", "Change the values of a variable in an object:", "my_dog.name = \"Cute Puppy\"\n\nmy_dog.my_dog()\n\nmy_dog.dog_color(color='Black')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
linglaiyao1314/maths-with-python
08-statistics.ipynb
mit
[ "There are many specialized packages for dealing with data analysis and statistical programming. One very important code that you will see in MATH1024, Introduction to Probability and Statistics, is R. A Python package for performing similar analysis of large data sets is pandas. However, simple statistical tasks on simple data sets can be tackled using numpy and scipy.\nGetting data in\nA data file containing the monthly rainfall for Southampton, taken from the Met Office data can be downloaded from this link. We will save that file locally, and then look at the data.\nThe first few lines of the file are:", "!head southampton_precip.txt", "We can use numpy to load this data into a variable, where we can manipulate it. This is not ideal: it will lose the information in the header, and that the first column corresponds to years. However, it is simple to use.", "import numpy\n\ndata = numpy.loadtxt('southampton_precip.txt')\n\ndata", "We see that the first column - the year - has been converted to a floating point number, which is not helpful. However, we can now split the data using standard numpy operations:", "years = data[:, 0]\nrainfall = data[:, 1:]", "We can now plot, for example, the rainfall in January for all years:", "%matplotlib inline\nfrom matplotlib import pyplot\n\npyplot.plot(years, rainfall[:,0])\npyplot.xlabel('Year')\npyplot.ylabel('Rainfall in January');", "Basic statistical functions\nnumpy contains a number of basic statistical functions, such as min, max and mean. These will act on entire arrays to give the \"all time\" minimum, maximum, and average rainfall:", "print(\"Minimum rainfall: {}\".format(rainfall.min()))\nprint(\"Maximum rainfall: {}\".format(rainfall.max()))\nprint(\"Mean rainfall: {}\".format(rainfall.mean()))", "Of more interest would be either\n\nthe mean (min/max) rainfall in a given month for all years, or\nthe mean (min/max) rainfall in a given year for all months.\n\nSo the mean rainfall in the first year, 1855, would be", "print (\"Mean rainfall in 1855: {}\".format(rainfall[0, :].mean()))", "Whilst the mean rainfall in January, averaging over all years, would be", "print (\"Mean rainfall in January: {}\".format(rainfall[:, 0].mean()))", "If we wanted to plot the mean rainfall per year, across all years, this would be tedious - there are 145 years of data in the file. Even computing the mean rainfall in each month, across all years, would be bad with 12 months. We could write a loop. However, numpy allows us to apply a function along an axis of the array, which does this is one operation:", "mean_rainfall_in_month = rainfall.mean(axis=0)\nmean_rainfall_per_year = rainfall.mean(axis=1)", "The axis argument gives the direction we want to keep - that we do not apply the operation to. For this data set, each row contains a year and each column a month. To find the mean in a given month we want to keep the row information (axis 0) and take the mean over the column. To find the mean in a given year we want to keep the column information (axis 1) and take the mean over the row.\nWe can now plot how the mean varies with each year.", "pyplot.plot(years, mean_rainfall_per_year)\npyplot.xlabel('Year')\npyplot.ylabel('Mean rainfall');", "We can also compute the standard deviation:", "std_rainfall_per_year = rainfall.std(axis=1)", "We can then add confidence intervals to the plot:", "pyplot.errorbar(years, mean_rainfall_per_year, yerr = std_rainfall_per_year)\npyplot.xlabel('Year')\npyplot.ylabel('Mean rainfall');", "This isn't particularly pretty or clear: a nicer example would use better packages, but a quick fix uses an alternative matplotlib approach:", "pyplot.plot(years, mean_rainfall_per_year)\npyplot.fill_between(years, mean_rainfall_per_year - std_rainfall_per_year, \n mean_rainfall_per_year + std_rainfall_per_year,\n alpha=0.25, color=None)\npyplot.xlabel('Year')\npyplot.ylabel('Mean rainfall');", "Categorical data\nLooking at the means by month, it would be better to give them names rather than numbers. We will also summarize the available information using a boxplot:", "months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\npyplot.boxplot(rainfall, labels=months)\npyplot.xlabel('Month')\npyplot.ylabel('Mean rainfall');", "Much better ways of working with categorical data are available through more specialized packages.\nRegression\nWe can go beyond the basic statistical functions in numpy and look at other standard tasks. For example, we can look for simple trends in our data with a linear regression. There is a function to compute the linear regression in scipy we can use. We will use this to see if there is a trend in the mean yearly rainfall:", "from scipy import stats\n\nslope, intercept, r_value, p_value, std_err = stats.linregress(years, mean_rainfall_per_year)\n\npyplot.plot(years, mean_rainfall_per_year, 'b-', label='Data')\npyplot.plot(years, intercept + slope*years, 'k-', label='Linear Regression')\npyplot.xlabel('Year')\npyplot.ylabel('Mean rainfall')\npyplot.legend();\n\nprint(\"The change in rainfall (the slope) is {}.\".format(slope))\nprint(\"However, the error estimate is {}.\".format(std_err))\nprint(\"The correlation coefficient between rainfall and year\"\n \" is {}.\".format(r_value))\nprint(\"The probability that the slope is zero is {}.\".format(p_value))", "It looks like there's a good chance that the slight decrease in mean rainfall with time is a real effect.\nRandom numbers\nRandom processes and random variables may be at the heart of probability and statistics, but computers cannot generate anything \"truly\" random. Instead they can generate pseudo-random numbers using random number generators (RNGs). Constructing a random number generator is a hard problem and wherever possible you should use a well-tested RNG rather than attempting to write your own.\nPython has many ways of generating random numbers. Perhaps the most useful are given by the numpy.random module, which can generate a numpy array filled with random numbers from various distributions. For example:", "from numpy import random\n\nuniform = random.rand(10000)\nnormal = random.randn(10000)\n\nfig = pyplot.figure()\nax1 = fig.add_subplot(1,2,1)\nax2 = fig.add_subplot(1,2,2)\nax1.hist(uniform, 20)\nax1.set_title('Uniform data')\nax2.hist(normal, 20)\nax2.set_title('Normal data')\nfig.tight_layout()\nfig.show();", "More distributions\nWhilst the standard distributions are given by the convenience functions above, the full documentation of numpy.random shows many other distributions available. For example, we can draw $10,000$ samples from the Beta distribution using the parameters $\\alpha = 1/2 = \\beta$ as", "beta_samples = random.beta(0.5, 0.5, 10000)\n\npyplot.hist(beta_samples, 20)\npyplot.title('Beta data')\npyplot.show();", "We can do this $5,000$ times and compute the mean of each set of samples:", "n_trials = 5000\nbeta_means = numpy.zeros((n_trials,))\n\nfor trial in range(n_trials):\n beta_samples = random.beta(0.5, 0.5, 10000)\n beta_means[trial] = numpy.mean(beta_samples)\n \npyplot.hist(beta_means, 20)\npyplot.title('Mean of Beta trials')\npyplot.show();", "Here we see the Central Limit Theorem in action: the distribution of the means appears to be normal, despite the distribution of any individual trial coming from the Beta distribution, which looks very different." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
barjacks/foundations-homework
06/Dark Sky Forecast_Homework_6_Skinner.ipynb
mit
[ "You'll be using the Dark Sky Forecast API from Forecast.io, available at https://developer.forecast.io. It's a pretty simple API, but be sure to read the documentation!\nA forecast request returns the current forecast (for the next week): \nhttps://api.forecast.io/forecast/APIKEY/LATITUDE,LONGITUDE\nA time-machine request returns the observed weather at a given time (for many places, up to 60 years in the past): https://api.forecast.io/forecast/APIKEY/LATITUDE,LONGITUDE,TIME\n1) Make a request from the Forecast.io API for where you were born (or lived, or want to visit!).", "import requests\n\nresponse = requests.get(\"https://api.forecast.io/forecast/e554f37a8164ce189acd210d00a452e0/47.4079,9.4647\")\nweather_data = response.json()\nweather_data.keys()\n\nprint(weather_data['timezone'])", "The time zone of Trogen is correct! This is where I live.", "print(\"Longitude:\", weather_data['longitude'], \"Latitude\", weather_data['latitude'])", "The longitude is mentioned first, and then the latitude. Usually, it is the other way round.\n2) What's the current wind speed? How much warmer does it feel than it actually is?", "type(weather_data['currently'])\n\nweather_data['currently'].keys()\n\nprint(\"The wind in Trogen, Switzerland, is currently blowing at\", weather_data['currently']['windSpeed'], \"mph.\")\n\nweather = weather_data['currently']\nTemperature = int(weather['apparentTemperature']) - int(weather['temperature'])\nCelsius = round((int(weather['temperature']) - 32) * 5 / 9)\nif Temperature == 0:\n print(\"The temperature feels exactly as you would expect it to feel, namely\", weather['temperature'], \"degrees Fahrenheit, or\", Celsius, \"degrees Celsius.\")\nelif Temperature > 0:\n print(\"It feels\", Temperature, \"degrees Fahrenheit warmer than the actual temperature, which is\", weather['temperature'], \"degrees Fahrenheit, or\", Celsius, \"degrees Celsius.\")\nelse:\n print(\"It feels\", Temperature, \"degrees Fahrenheit colder than the actual temperature, which is\", weather['temperature'], \"degrees Fahrenheit, or\", Celsius, \"degrees Celsius.\")", "3) The first daily forecast is the forecast for today. For the place you decided on up above, how much of the moon is currently visible?", "type(weather_data['daily'])\n\nweather_data['daily'].keys()\n\ntype(weather_data['daily']['data'])\n\nfor phase in weather_data['daily']['data']:\n moon_forecast_tomorrow = phase['moonPhase']\n break\n#print(phase['moonPhase'])\nif moon_forecast_tomorrow == 0:\n print(\"Tomorrow is New Moon.\")\nelif moon_forecast_tomorrow > .75:\n print(\"Tomorrow the Moon is in a Waning Crescent phase.\")\nelif moon_forecast_tomorrow == .75:\n print(\"Tomorrow is last quarter Moon.\")\nelif moon_forecast_tomorrow > .5:\n print(\"Tommorrow the Moon is in a Waning Gibbous phase.\")\nelif moon_forecast_tomorrow == .5:\n print(\"Tommorrow is Full Moon.\")\nelif moon_forecast_tomorrow > .25:\n print(\"Tommorrow the Moon is a Waxing Gibbous phase.\")\nelif moon_forecast_tomorrow == .25:\n print(\"Tommorrow is first Quarter Moon.\")\nelif moon_forecast_tomorrow > 0:\n print(\"Tommorrow the Moon is in a Waxing Crescent phase. This is the first phase after New Moon.\")\n", "4) What's the difference between the high and low temperatures for today?", "print(weather_data['currently'])\n\nresponse = requests.get(\"https://api.forecast.io/forecast/e554f37a8164ce189acd210d00a452e0/47.4079,9.4647,1465420000\")\nhist_weather_data = response.json()\n\nhist_weather_data.keys()\n\n\n\n\nhist_weather_data['daily'].keys()\n\nprint(hist_weather_data['daily']['data'])\n\nfor n in hist_weather_data['daily']['data']:\n Min = n['temperatureMin']\n Max = n['temperatureMax']\n\nMin_Max = Max - Min\nMin_Max_Celsius = ((Max - 32) * 5 / 9) - ((Min - 32) * 5 / 9)\nprint(\"The diffrence between the high and low temperatures on Wedesday, June 8, in Trogen, Switzerland, was\", round(Min_Max), \"Fahrenheit\", \"or\", round(Min_Max_Celsius), \"Celsius.\")", "5) Loop through the daily forecast, printing out the next week's worth of predictions. I'd like to know the high temperature for each day, and whether it's hot, warm, or cold, based on what temperatures you think are hot, warm or cold.", "response = requests.get(\"https://api.forecast.io/forecast/e554f37a8164ce189acd210d00a452e0/40.7128,-74.0059\")\nweather_data = response.json()\nweather_data.keys()\n\nweather_data['daily'].keys()\n\nprint(weather_data['daily']['data'][0])\n\nForecast = weather_data['daily']['data']\nfor max_temp in Forecast:\n Convert_Celsius = (max_temp['temperatureMax'] - 32) * 5 / 9\n if Convert_Celsius > 30:\n Temperature = \"hot\"\n elif Convert_Celsius > 20:\n Temperature = \"warm\"\n else:\n Temperature = \"cold\"\n print(\"The maximum temperature in New York for next week are\", max_temp['temperatureMax'], \"Fahrenheit or\", round(Convert_Celsius), \"Celsius. That is\", Temperature)\n\n import datetime\nprint(datetime.datetime.fromtimestamp(int(\"1284101485\")).strftime('%Y-%m-%d'))\n\nimport time\nimport datetime\nForecast = weather_data['daily']['data']\nfor max_temp in Forecast:\n Convert_Celsius = (max_temp['temperatureMax'] - 32) * 5 / 9\n time1 = max_temp['time']\n if Convert_Celsius > 30:\n Temperature = \"hot\"\n elif Convert_Celsius > 20:\n Temperature = \"comfortably warm.\"\n else:\n Temperature = \"cold\"\n #print(\"On\", time.strftime('%A, %b %d', time.localtime(time1)), \"the maximum temperature will be\", max_temp['temperatureMax'], \"Fahrenheit or\", round(Convert_Celsius), \"Celsius. That is\", Temperature)\n print(\"On\", time.strftime('%A', time.localtime(time1)), \"the maximum temperature will be\", max_temp['temperatureMax'], \"Fahrenheit or\", round(Convert_Celsius), \"Celsius. That is\", Temperature)\n\nimport datetime\nprint(datetime.datetime.fromtimestamp(int(time)).strftime('%Y-%m-%d'))", "6) What's the weather looking like for the rest of today in Miami, Florida? I'd like to know the temperature for every hour, and if it's going to have cloud cover of more than 0.5 say \"{temperature} and cloudy\" instead of just the temperature.", "response = requests.get(\"https://api.forecast.io/forecast/e554f37a8164ce189acd210d00a452e0/25.7617,-80.1918\")\nFlorida = response.json()\nFlorida.keys()\n\nFlorida['hourly'].keys()\n\n#print(Florida['hourly']['data'])\n\nimport time\nimport datetime\nHourly = Florida['hourly']['data']\nHourly = Hourly\nfor weather in Hourly:\n time = weather['time']\n stop_time = datetime.datetime.fromtimestamp(int(time)).strftime('%H')\n if stop_time == '01':\n break\n if weather['cloudCover'] > 0.5:\n cloudy = \"and cloudy\"\n else:\n cloudy = \"not cloudy\"\n print(datetime.datetime.fromtimestamp(int(time)).strftime('%H:%M'), \"{\", weather['temperature'], \"°F}\",cloudy)", "7) What was the temperature in Central Park on Christmas Day, 1980? How about 1990? 2000?", "response = requests.get(\"https://api.forecast.io/forecast/e554f37a8164ce189acd210d00a452e0/40.781750,-73.966641,346593600\")\nweather_data = response.json()\nweather_data.keys()\n\n#print(weather_data['daily']['data'][0])\n\nfor Christmas in weather_data['daily']['data']:\n Convert_Celsius = (Christmas['temperatureMax'] - 32) * 5 / 9\n print(\"The maximum temperature on Christmas Day 1980 in Centralpark was\", \n Christmas['temperatureMax'], \"Fahrenheit, or\", round(Convert_Celsius), \"degrees Celsius.\")\n\nresponse = requests.get(\"https://api.forecast.io/forecast/e554f37a8164ce189acd210d00a452e0/40.781750,-73.966641,662126400\")\nweather_data_1990 = response.json()\nweather_data_1990.keys()\n\nresponse = requests.get(\"https://api.forecast.io/forecast/e554f37a8164ce189acd210d00a452e0/40.781750,-73.966641,977745600\")\nweather_data_2000 = response.json()\nweather_data_2000.keys()\n\nfor Christmas in weather_data_1990['daily']['data']:\n Convert_Celsius = (Christmas['temperatureMax'] - 32) * 5 / 9\n print(\"The maximum temperature on Christmas Day 1990 in Centralpark was\", \n Christmas['temperatureMax'], \"Fahrenheit, or\", round(Convert_Celsius), \"degrees Celsius.\")\n\nfor Christmas in weather_data_2000['daily']['data']:\n Convert_Celsius = (Christmas['temperatureMax'] - 32) * 5 / 9\n print(\"The maximum temperature on Christmas Day 1980 in Centralpark was\", \n Christmas['temperatureMax'], \"Fahrenheit, or\", round(Convert_Celsius), \"degrees Celsius.\")" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
zomansud/coursera
ml-classification/week-2/module-4-linear-classifier-regularization-assignment-blank.ipynb
mit
[ "Logistic Regression with L2 regularization\nThe goal of this second notebook is to implement your own logistic regression classifier with L2 regularization. You will do the following:\n\nExtract features from Amazon product reviews.\nConvert an SFrame into a NumPy array.\nWrite a function to compute the derivative of log likelihood function with an L2 penalty with respect to a single coefficient.\nImplement gradient ascent with an L2 penalty.\nEmpirically explore how the L2 penalty can ameliorate overfitting.\n\nFire up GraphLab Create\nMake sure you have the latest version of GraphLab Create. Upgrade by\npip install graphlab-create --upgrade\nSee this page for detailed instructions on upgrading.", "from __future__ import division\nimport graphlab", "Load and process review dataset\nFor this assignment, we will use the same subset of the Amazon product review dataset that we used in Module 3 assignment. The subset was chosen to contain similar numbers of positive and negative reviews, as the original dataset consisted of mostly positive reviews.", "products = graphlab.SFrame('amazon_baby_subset.gl/')\n\nproducts.head()", "Just like we did previously, we will work with a hand-curated list of important words extracted from the review data. We will also perform 2 simple data transformations:\n\nRemove punctuation using Python's built-in string functionality.\nCompute word counts (only for the important_words)\n\nRefer to Module 3 assignment for more details.", "# The same feature processing (same as the previous assignments)\n# ---------------------------------------------------------------\nimport json\nwith open('important_words.json', 'r') as f: # Reads the list of most frequent words\n important_words = json.load(f)\nimportant_words = [str(s) for s in important_words]\n\n\ndef remove_punctuation(text):\n import string\n return text.translate(None, string.punctuation) \n\n# Remove punctuation.\nproducts['review_clean'] = products['review'].apply(remove_punctuation)\n\n# Split out the words into individual columns\nfor word in important_words:\n products[word] = products['review_clean'].apply(lambda s : s.split().count(word))", "Now, let us take a look at what the dataset looks like (Note: This may take a few minutes).", "products", "Train-Validation split\nWe split the data into a train-validation split with 80% of the data in the training set and 20% of the data in the validation set. We use seed=2 so that everyone gets the same result.\nNote: In previous assignments, we have called this a train-test split. However, the portion of data that we don't train on will be used to help select model parameters. Thus, this portion of data should be called a validation set. Recall that examining performance of various potential models (i.e. models with different parameters) should be on a validation set, while evaluation of selected model should always be on a test set.", "train_data, validation_data = products.random_split(.8, seed=2)\n\nprint 'Training set : %d data points' % len(train_data)\nprint 'Validation set : %d data points' % len(validation_data)", "Convert SFrame to NumPy array\nJust like in the second assignment of the previous module, we provide you with a function that extracts columns from an SFrame and converts them into a NumPy array. Two arrays are returned: one representing features and another representing class labels. \nNote: The feature matrix includes an additional column 'intercept' filled with 1's to take account of the intercept term.", "import numpy as np\n\ndef get_numpy_data(data_sframe, features, label):\n data_sframe['intercept'] = 1\n features = ['intercept'] + features\n features_sframe = data_sframe[features]\n feature_matrix = features_sframe.to_numpy()\n label_sarray = data_sframe[label]\n label_array = label_sarray.to_numpy()\n return(feature_matrix, label_array)", "We convert both the training and validation sets into NumPy arrays.\nWarning: This may take a few minutes.", "feature_matrix_train, sentiment_train = get_numpy_data(train_data, important_words, 'sentiment')\nfeature_matrix_valid, sentiment_valid = get_numpy_data(validation_data, important_words, 'sentiment') ", "Are you running this notebook on an Amazon EC2 t2.micro instance? (If you are using your own machine, please skip this section)\nIt has been reported that t2.micro instances do not provide sufficient power to complete the conversion in acceptable amount of time. For interest of time, please refrain from running get_numpy_data function. Instead, download the binary file containing the four NumPy arrays you'll need for the assignment. To load the arrays, run the following commands:\narrays = np.load('module-4-assignment-numpy-arrays.npz')\nfeature_matrix_train, sentiment_train = arrays['feature_matrix_train'], arrays['sentiment_train']\nfeature_matrix_valid, sentiment_valid = arrays['feature_matrix_valid'], arrays['sentiment_valid']\nBuilding on logistic regression with no L2 penalty assignment\nLet us now build on Module 3 assignment. Recall from lecture that the link function for logistic regression can be defined as:\n$$\nP(y_i = +1 | \\mathbf{x}_i,\\mathbf{w}) = \\frac{1}{1 + \\exp(-\\mathbf{w}^T h(\\mathbf{x}_i))},\n$$\nwhere the feature vector $h(\\mathbf{x}_i)$ is given by the word counts of important_words in the review $\\mathbf{x}_i$. \nWe will use the same code as in this past assignment to make probability predictions since this part is not affected by the L2 penalty. (Only the way in which the coefficients are learned is affected by the addition of a regularization term.)", "'''\nproduces probablistic estimate for P(y_i = +1 | x_i, w).\nestimate ranges between 0 and 1.\n'''\nimport math\nsigmoid = lambda x: 1 / (1 + math.exp(-x))\n\ndef predict_probability(feature_matrix, coefficients):\n # Take dot product of feature_matrix and coefficients \n dot_product = np.dot(feature_matrix, coefficients)\n \n # Compute P(y_i = +1 | x_i, w) using the link function\n predictions = []\n for dpi in dot_product:\n predictions.append(sigmoid(dpi))\n\n # return predictions\n return predictions", "Adding L2 penalty\nLet us now work on extending logistic regression with L2 regularization. As discussed in the lectures, the L2 regularization is particularly useful in preventing overfitting. In this assignment, we will explore L2 regularization in detail.\nRecall from lecture and the previous assignment that for logistic regression without an L2 penalty, the derivative of the log likelihood function is:\n$$\n\\frac{\\partial\\ell}{\\partial w_j} = \\sum_{i=1}^N h_j(\\mathbf{x}_i)\\left(\\mathbf{1}[y_i = +1] - P(y_i = +1 | \\mathbf{x}_i, \\mathbf{w})\\right)\n$$\n Adding L2 penalty to the derivative \nIt takes only a small modification to add a L2 penalty. All terms indicated in red refer to terms that were added due to an L2 penalty.\n\nRecall from the lecture that the link function is still the sigmoid:\n$$\nP(y_i = +1 | \\mathbf{x}_i,\\mathbf{w}) = \\frac{1}{1 + \\exp(-\\mathbf{w}^T h(\\mathbf{x}_i))},\n$$\nWe add the L2 penalty term to the per-coefficient derivative of log likelihood:\n$$\n\\frac{\\partial\\ell}{\\partial w_j} = \\sum_{i=1}^N h_j(\\mathbf{x}_i)\\left(\\mathbf{1}[y_i = +1] - P(y_i = +1 | \\mathbf{x}_i, \\mathbf{w})\\right) \\color{red}{-2\\lambda w_j }\n$$\n\nThe per-coefficient derivative for logistic regression with an L2 penalty is as follows:\n$$\n\\frac{\\partial\\ell}{\\partial w_j} = \\sum_{i=1}^N h_j(\\mathbf{x}i)\\left(\\mathbf{1}[y_i = +1] - P(y_i = +1 | \\mathbf{x}_i, \\mathbf{w})\\right) \\color{red}{-2\\lambda w_j }\n$$\nand for the intercept term, we have\n$$\n\\frac{\\partial\\ell}{\\partial w_0} = \\sum{i=1}^N h_0(\\mathbf{x}_i)\\left(\\mathbf{1}[y_i = +1] - P(y_i = +1 | \\mathbf{x}_i, \\mathbf{w})\\right)\n$$\nNote: As we did in the Regression course, we do not apply the L2 penalty on the intercept. A large intercept does not necessarily indicate overfitting because the intercept is not associated with any particular feature.\nWrite a function that computes the derivative of log likelihood with respect to a single coefficient $w_j$. Unlike its counterpart in the last assignment, the function accepts five arguments:\n * errors vector containing $(\\mathbf{1}[y_i = +1] - P(y_i = +1 | \\mathbf{x}_i, \\mathbf{w}))$ for all $i$\n * feature vector containing $h_j(\\mathbf{x}_i)$ for all $i$\n * coefficient containing the current value of coefficient $w_j$.\n * l2_penalty representing the L2 penalty constant $\\lambda$\n * feature_is_constant telling whether the $j$-th feature is constant or not.", "def feature_derivative_with_L2(errors, feature, coefficient, l2_penalty, feature_is_constant): \n \n # Compute the dot product of errors and feature\n derivative = np.dot(errors, feature)\n\n # add L2 penalty term for any feature that isn't the intercept.\n if not feature_is_constant: \n derivative += -2 * l2_penalty * coefficient\n \n return derivative", "Quiz question: In the code above, was the intercept term regularized?\nTo verify the correctness of the gradient ascent algorithm, we provide a function for computing log likelihood (which we recall from the last assignment was a topic detailed in an advanced optional video, and used here for its numerical stability).\n$$\\ell\\ell(\\mathbf{w}) = \\sum_{i=1}^N \\Big( (\\mathbf{1}[y_i = +1] - 1)\\mathbf{w}^T h(\\mathbf{x}_i) - \\ln\\left(1 + \\exp(-\\mathbf{w}^T h(\\mathbf{x}_i))\\right) \\Big) \\color{red}{-\\lambda\\|\\mathbf{w}\\|_2^2} $$", "def compute_log_likelihood_with_L2(feature_matrix, sentiment, coefficients, l2_penalty):\n indicator = (sentiment==+1)\n scores = np.dot(feature_matrix, coefficients)\n \n lp = np.sum((indicator-1)*scores - np.log(1. + np.exp(-scores))) - l2_penalty*np.sum(coefficients[1:]**2)\n \n return lp", "Quiz question: Does the term with L2 regularization increase or decrease $\\ell\\ell(\\mathbf{w})$?\nThe logistic regression function looks almost like the one in the last assignment, with a minor modification to account for the L2 penalty. Fill in the code below to complete this modification.", "def logistic_regression_with_L2(feature_matrix, sentiment, initial_coefficients, step_size, l2_penalty, max_iter):\n coefficients = np.array(initial_coefficients) # make sure it's a numpy array\n for itr in xrange(max_iter):\n # Predict P(y_i = +1|x_i,w) using your predict_probability() function\n ## YOUR CODE HERE\n predictions = predict_probability(feature_matrix, coefficients)\n \n # Compute indicator value for (y_i = +1)\n indicator = (sentiment==+1)\n \n # Compute the errors as indicator - predictions\n errors = indicator - predictions\n \n for j in xrange(len(coefficients)): # loop over each coefficient\n is_intercept = (j == 0)\n # Recall that feature_matrix[:,j] is the feature column associated with coefficients[j].\n # Compute the derivative for coefficients[j]. Save it in a variable called derivative\n derivative = feature_derivative_with_L2(\n errors, \n feature_matrix[:,j], \n coefficients[j], \n l2_penalty, \n j == 0\n )\n \n # add the step size times the derivative to the current coefficient\n coefficients[j] += step_size * derivative\n \n # Checking whether log likelihood is increasing\n if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \\\n or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:\n lp = compute_log_likelihood_with_L2(feature_matrix, sentiment, coefficients, l2_penalty)\n print 'iteration %*d: log likelihood of observed labels = %.8f' % \\\n (int(np.ceil(np.log10(max_iter))), itr, lp)\n return coefficients", "Explore effects of L2 regularization\nNow that we have written up all the pieces needed for regularized logistic regression, let's explore the benefits of using L2 regularization in analyzing sentiment for product reviews. As iterations pass, the log likelihood should increase.\nBelow, we train models with increasing amounts of regularization, starting with no L2 penalty, which is equivalent to our previous logistic regression implementation.", "# run with L2 = 0\ncoefficients_0_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,\n initial_coefficients=np.zeros(194),\n step_size=5e-6, l2_penalty=0, max_iter=501)\n\n# run with L2 = 4\ncoefficients_4_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,\n initial_coefficients=np.zeros(194),\n step_size=5e-6, l2_penalty=4, max_iter=501)\n\n# run with L2 = 10\ncoefficients_10_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,\n initial_coefficients=np.zeros(194),\n step_size=5e-6, l2_penalty=10, max_iter=501)\n\n# run with L2 = 1e2\ncoefficients_1e2_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,\n initial_coefficients=np.zeros(194),\n step_size=5e-6, l2_penalty=1e2, max_iter=501)\n\n# run with L2 = 1e3\ncoefficients_1e3_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,\n initial_coefficients=np.zeros(194),\n step_size=5e-6, l2_penalty=1e3, max_iter=501)\n\n# run with L2 = 1e5\ncoefficients_1e5_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,\n initial_coefficients=np.zeros(194),\n step_size=5e-6, l2_penalty=1e5, max_iter=501)", "Compare coefficients\nWe now compare the coefficients for each of the models that were trained above. We will create a table of features and learned coefficients associated with each of the different L2 penalty values.\nBelow is a simple helper function that will help us create this table.", "table = graphlab.SFrame({'word': ['(intercept)'] + important_words})\ndef add_coefficients_to_table(coefficients, column_name):\n table[column_name] = coefficients\n return table", "Now, let's run the function add_coefficients_to_table for each of the L2 penalty strengths.", "add_coefficients_to_table(coefficients_0_penalty, 'coefficients [L2=0]')\nadd_coefficients_to_table(coefficients_4_penalty, 'coefficients [L2=4]')\nadd_coefficients_to_table(coefficients_10_penalty, 'coefficients [L2=10]')\nadd_coefficients_to_table(coefficients_1e2_penalty, 'coefficients [L2=1e2]')\nadd_coefficients_to_table(coefficients_1e3_penalty, 'coefficients [L2=1e3]')\nadd_coefficients_to_table(coefficients_1e5_penalty, 'coefficients [L2=1e5]')", "Using the coefficients trained with L2 penalty 0, find the 5 most positive words (with largest positive coefficients). Save them to positive_words. Similarly, find the 5 most negative words (with largest negative coefficients) and save them to negative_words.\nQuiz Question. Which of the following is not listed in either positive_words or negative_words?", "coefficients_l2_0_no_intercept = list(coefficients_0_penalty[1:]) # exclude intercept\nword_coefficient_tuples = [(word, coefficient) for word, coefficient in zip(important_words, coefficients_l2_0_no_intercept)]\nword_coefficient_tuples = sorted(word_coefficient_tuples, key=lambda x:x[1], reverse=True)\n\npositive_words = []\nfor t in word_coefficient_tuples[:5]:\n positive_words.append(t[0])\n \npositive_words\n\nnegative_words = []\nfor t in word_coefficient_tuples[-5:]:\n negative_words.append(t[0])\n \nnegative_words", "Let us observe the effect of increasing L2 penalty on the 10 words just selected. We provide you with a utility function to plot the coefficient path.", "import matplotlib.pyplot as plt\n%matplotlib inline\nplt.rcParams['figure.figsize'] = 10, 6\n\ndef make_coefficient_plot(table, positive_words, negative_words, l2_penalty_list):\n cmap_positive = plt.get_cmap('Reds')\n cmap_negative = plt.get_cmap('Blues')\n \n xx = l2_penalty_list\n plt.plot(xx, [0.]*len(xx), '--', lw=1, color='k')\n \n table_positive_words = table.filter_by(column_name='word', values=positive_words)\n table_negative_words = table.filter_by(column_name='word', values=negative_words)\n del table_positive_words['word']\n del table_negative_words['word']\n \n for i in xrange(len(positive_words)):\n color = cmap_positive(0.8*((i+1)/(len(positive_words)*1.2)+0.15))\n plt.plot(xx, table_positive_words[i:i+1].to_numpy().flatten(),\n '-', label=positive_words[i], linewidth=4.0, color=color)\n \n for i in xrange(len(negative_words)):\n color = cmap_negative(0.8*((i+1)/(len(negative_words)*1.2)+0.15))\n plt.plot(xx, table_negative_words[i:i+1].to_numpy().flatten(),\n '-', label=negative_words[i], linewidth=4.0, color=color)\n \n plt.legend(loc='best', ncol=3, prop={'size':16}, columnspacing=0.5)\n plt.axis([1, 1e5, -1, 2])\n plt.title('Coefficient path')\n plt.xlabel('L2 penalty ($\\lambda$)')\n plt.ylabel('Coefficient value')\n plt.xscale('log')\n plt.rcParams.update({'font.size': 18})\n plt.tight_layout()", "Run the following cell to generate the plot. Use the plot to answer the following quiz question.", "make_coefficient_plot(table, positive_words, negative_words, l2_penalty_list=[0, 4, 10, 1e2, 1e3, 1e5])", "Quiz Question: (True/False) All coefficients consistently get smaller in size as the L2 penalty is increased.\nQuiz Question: (True/False) The relative order of coefficients is preserved as the L2 penalty is increased. (For example, if the coefficient for 'cat' was more positive than that for 'dog', this remains true as the L2 penalty increases.)\nMeasuring accuracy\nNow, let us compute the accuracy of the classifier model. Recall that the accuracy is given by\n$$\n\\mbox{accuracy} = \\frac{\\mbox{# correctly classified data points}}{\\mbox{# total data points}}\n$$\nRecall from lecture that that the class prediction is calculated using\n$$\n\\hat{y}_i = \n\\left{\n\\begin{array}{ll}\n +1 & h(\\mathbf{x}_i)^T\\mathbf{w} > 0 \\\n -1 & h(\\mathbf{x}_i)^T\\mathbf{w} \\leq 0 \\\n\\end{array} \n\\right.\n$$\nNote: It is important to know that the model prediction code doesn't change even with the addition of an L2 penalty. The only thing that changes is the estimated coefficients used in this prediction.\nBased on the above, we will use the same code that was used in Module 3 assignment.", "def get_classification_accuracy(feature_matrix, sentiment, coefficients):\n scores = np.dot(feature_matrix, coefficients)\n apply_threshold = np.vectorize(lambda x: 1. if x > 0 else -1.)\n predictions = apply_threshold(scores)\n \n num_correct = (predictions == sentiment).sum()\n accuracy = num_correct / len(feature_matrix) \n return accuracy", "Below, we compare the accuracy on the training data and validation data for all the models that were trained in this assignment. We first calculate the accuracy values and then build a simple report summarizing the performance for the various models.", "train_accuracy = {}\ntrain_accuracy[0] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_0_penalty)\ntrain_accuracy[4] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_4_penalty)\ntrain_accuracy[10] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_10_penalty)\ntrain_accuracy[1e2] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e2_penalty)\ntrain_accuracy[1e3] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e3_penalty)\ntrain_accuracy[1e5] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e5_penalty)\n\nvalidation_accuracy = {}\nvalidation_accuracy[0] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_0_penalty)\nvalidation_accuracy[4] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_4_penalty)\nvalidation_accuracy[10] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_10_penalty)\nvalidation_accuracy[1e2] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e2_penalty)\nvalidation_accuracy[1e3] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e3_penalty)\nvalidation_accuracy[1e5] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e5_penalty)\n\n# Build a simple report\nfor key in sorted(validation_accuracy.keys()):\n print \"L2 penalty = %g\" % key\n print \"train accuracy = %s, validation_accuracy = %s\" % (train_accuracy[key], validation_accuracy[key])\n print \"--------------------------------------------------------------------------------\"", "Quiz question: Which model (L2 = 0, 4, 10, 100, 1e3, 1e5) has the highest accuracy on the training data?\nQuiz question: Which model (L2 = 0, 4, 10, 100, 1e3, 1e5) has the highest accuracy on the validation data?\nQuiz question: Does the highest accuracy on the training data imply that the model is the best one?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ematvey/tensorflow-seq2seq-tutorials
3-seq2seq-native-new.ipynb
mit
[ "Playing with new 2017 tf.contrib.seq2seq\nTF now has new tf.contrib.seq2seq. Let's make small example of using it.", "%matplotlib inline\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.rnn import LSTMCell, GRUCell\nfrom model_new import Seq2SeqModel, train_on_copy_task\nimport pandas as pd\nimport helpers\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ntf.__version__", "By this point implementations are quite long, so I put them in model_new.py, while notebook will illustrate the application.", "tf.reset_default_graph()\ntf.set_random_seed(1)\n\nwith tf.Session() as session:\n\n # with bidirectional encoder, decoder state size should be\n # 2x encoder state size\n model = Seq2SeqModel(encoder_cell=LSTMCell(10),\n decoder_cell=LSTMCell(20), \n vocab_size=10,\n embedding_size=10,\n attention=True,\n bidirectional=True,\n debug=False)\n\n session.run(tf.global_variables_initializer())\n\n train_on_copy_task(session, model,\n length_from=3, length_to=8,\n vocab_lower=2, vocab_upper=10,\n batch_size=100,\n max_batches=3000,\n batches_in_epoch=1000,\n verbose=True)", "Fun exercise, compare performance of different seq2seq variants.\nComparison will be done using train loss tracks, since the task is algorithmic and data is generated directly from true distribution and out-of-sample testing doesn't really make sense.", "loss_tracks = dict()\n\ndef do_train(session, model):\n return train_on_copy_task(session, model,\n length_from=3, length_to=8,\n vocab_lower=2, vocab_upper=10,\n batch_size=100,\n max_batches=5000,\n batches_in_epoch=1000,\n verbose=False)\n\ndef make_model(**kwa):\n args = dict(cell_class=LSTMCell,\n num_units_encoder=10,\n vocab_size=10,\n embedding_size=10,\n attention=False,\n bidirectional=False,\n debug=False)\n args.update(kwa)\n \n cell_class = args.pop('cell_class')\n \n num_units_encoder = args.pop('num_units_encoder')\n num_units_decoder = num_units_encoder\n \n if args['bidirectional']:\n num_units_decoder *= 2\n \n args['encoder_cell'] = cell_class(num_units_encoder)\n args['decoder_cell'] = cell_class(num_units_decoder)\n \n return Seq2SeqModel(**args)", "Test bidirectional/forward encoder, attention/no attention, in all combinations", "tf.reset_default_graph()\ntf.set_random_seed(1)\nwith tf.Session() as session:\n model = make_model(bidirectional=False, attention=False)\n session.run(tf.global_variables_initializer())\n loss_tracks['forward encoder, no attention'] = do_train(session, model)\n\n\ntf.reset_default_graph()\ntf.set_random_seed(1)\nwith tf.Session() as session:\n model = make_model(bidirectional=True, attention=False)\n session.run(tf.global_variables_initializer())\n loss_tracks['bidirectional encoder, no attention'] = do_train(session, model)\n\n\ntf.reset_default_graph()\ntf.set_random_seed(1)\nwith tf.Session() as session:\n model = make_model(bidirectional=False, attention=True)\n session.run(tf.global_variables_initializer())\n loss_tracks['forward encoder, with attention'] = do_train(session, model)\n\n \ntf.reset_default_graph()\ntf.set_random_seed(1)\nwith tf.Session() as session:\n model = make_model(bidirectional=True, attention=True)\n session.run(tf.global_variables_initializer())\n loss_tracks['bidirectional encoder, with attention'] = do_train(session, model)\n\npd.DataFrame(loss_tracks).plot(figsize=(13, 8))", "Naturally, attention helps a lot when the task is simply copying from inputs to outputs.\nTest GRU vs LSTM", "import time\n\ntf.reset_default_graph()\ntf.set_random_seed(1)\nwith tf.Session() as session:\n model = make_model(bidirectional=True, attention=True, cell_class=LSTMCell)\n session.run(tf.global_variables_initializer())\n t0 = time.time()\n lstm_track = do_train(session, model)\n lstm_took = time.time() - t0\n\ntf.reset_default_graph()\ntf.set_random_seed(1)\nwith tf.Session() as session:\n model = make_model(bidirectional=True, attention=True, cell_class=GRUCell)\n session.run(tf.global_variables_initializer())\n t0 = time.time()\n gru_track = do_train(session, model)\n gru_took = time.time() - t0\n \ngru = pd.Series(gru_track, name='gru')\nlstm = pd.Series(lstm_track, name='lstm')\ntracks_batch = pd.DataFrame(dict(lstm=lstm, gru=gru))\ntracks_batch.index.name = 'batch'\n\ngru.index = gru.index / gru_took\nlstm.index = lstm.index / lstm_took\ntracks_time = pd.DataFrame(dict(lstm=lstm, gru=gru)).ffill()\ntracks_time.index.name = 'time (seconds)'\n\ntracks_batch.plot(figsize=(8, 5), title='GRU vs LSTM loss, batch-time')\n\ntracks_time.plot(figsize=(8, 5), title='GRU vs LSTM loss, compute-time')", "GRU has fewer parameters, so training supposed to be faster? This test doesn't show it." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
brclark-usgs/flopy
examples/Notebooks/flopy3_mnw2package_example.ipynb
bsd-3-clause
[ "FloPy\nMNW2 package example", "import sys\nimport os\nimport numpy as np\ntry:\n import pandas as pd\nexcept:\n pass\nimport flopy\n\nprint(sys.version)\nprint('numpy version: {}'.format(np.__version__))\ntry:\n print('pandas version: {}'.format(pd.__version__))\nexcept:\n pass\nprint('flopy version: {}'.format(flopy.__version__))", "Make an MNW2 package from scratch", "m = flopy.modflow.Modflow('mnw2example', model_ws='temp')\ndis = flopy.modflow.ModflowDis(nrow=5, ncol=5, nlay=3, nper=3, top=10, botm=0, model=m)", "MNW2 information by node\n(this could be prepared externally from well reconds and read in from a csv or excel file)\n* this table has two multi-node wells, the first (well1) consisting of two nodes that are manually specified\n(where the variable rw is specified by node)\n* node that some variables that are constant for the whole well are also included (losstype, zpump, etc.)", "node_data = pd.DataFrame([[1, 1, 9.5, 7.1, 'well1', 'skin', -1, 0, 0, 0, 1., 2., 5., 6.2],\n [1, 1, 7.1, 5.1, 'well1', 'skin', -1, 0, 0, 0, 0.5, 2., 5., 6.2],\n [3, 3, 9.1, 3.7, 'well2', 'skin', -1, 0, 0, 0, 1., 2., 5., 4.1]], \n columns=['i', 'j', 'ztop', 'zbotm', 'wellid', 'losstype', 'pumploc', 'qlimit', 'ppflag', 'pumpcap', \n 'rw', 'rskin', 'kskin', 'zpump'])\nnode_data", "convert the DataFrame to a rec array for compatibility with flopy", "node_data = node_data.to_records()\nnode_data", "Stress period information\n(could also be developed externally)", "stress_period_data = pd.DataFrame([[0, 'well1', 0],\n [1, 'well1', 100.0],\n [0, 'well2', 0],\n [1, 'well2', 1000.]], columns=['per', 'wellid', 'qdes'])\nstress_period_data\n\npers = stress_period_data.groupby('per')\nstress_period_data = {i: pers.get_group(i).to_records() for i in [0, 1]}\nstress_period_data", "Make ModflowMnw2 package object\n\nnote that extraneous columns in node_data and stress_period_data are ignored\nif itmp is positive, it must equal the number of active wells being specified in stress_period_data, otherwise the package class will raise an error.", "mnw2 = flopy.modflow.ModflowMnw2(model=m, mnwmax=2,\n node_data=node_data, \n stress_period_data=stress_period_data, \n itmp=[2, 2, -1], # reuse second per pumping for last stress period\n )\n\n# \"nodtot\" is computed automatically\nmnw2.nodtot\n\npd.DataFrame(mnw2.node_data)\n\npd.DataFrame(mnw2.stress_period_data[0])\n\npd.DataFrame(mnw2.stress_period_data[1])\n\ntmp = flopy.modflow.ModflowMnw2(model=m,\n itmp=[1, 1, -1], # reuse second per pumping for last stress period\n )", "empty node_data and stress_period_data tables can also be generated by the package class, and then filled", "node_data = tmp.get_empty_node_data(3)\nnode_data", "Mnw objects\nat the base of the flopy mnw2 module is the Mnw object class, which describes a single multi-node well.\nA list or dict of Mnw objects can be used to build a package (using the example above):\nflopy.modflow.ModflowMnw2(model=m, mnwmax=2,\n mnw=&lt;dict or list of Mnw objects&gt;,\n itmp=[1, 1, -1], # reuse second per pumping for last stress period\n )\nor if node_data and stress_period_data are supplied, the Mnw objects are created on initialization of the ModflowMnw2 class instance, and assigned to the .mnw attribute, as items in a dictionary keyed by wellid.", "mnw2.mnw\n\nmnw2.mnw['well1'].__dict__", "Note that Mnw object attributes for variables that vary by node are lists (e.g. rw above)\nEach Mnw object has its own node_data and stress_period_data", "pd.DataFrame(mnw2.mnw['well1'].node_data)", "Instead of a dict keyed by stress period, Mnw.stress_period_data is a recarray with pumping data listed by stress period for that well\n\nnote that data for period 2, where itmp < 1, is shown (was copied from s.p. 1 during construction of the Mnw object)", "pd.DataFrame(mnw2.mnw['well2'].stress_period_data)", "Build the same package using only the Mnw objects", "mnw2fromobj = flopy.modflow.ModflowMnw2(model=m, mnwmax=2,\n mnw=mnw2.mnw,\n itmp=[2, 2, -1], # reuse second per pumping for last stress period\n )\n\npd.DataFrame(mnw2fromobj.node_data)\n\npd.DataFrame(mnw2fromobj.stress_period_data[0])\n\npd.DataFrame(mnw2fromobj.stress_period_data[1])", "By default, the node_data and stress_period_data tables attached to the ModflowMnw2 package class are definitive\n\non writing of the package output (mnw2.write_file()), the Mnw objects are regenerated from the tables. This setting is controlled by the default argument use_tables=True. To write the package file using the Mnw objects (ignoring the tables), use mnw2.write_file(use_tables=False).", "per1 = flopy.modflow.ModflowMnw2.get_empty_stress_period_data(itmp=2)\nper1", "Write an MNW2 package file and inspect the results", "mnw2.write_file(os.path.join('temp/test.mnw2'))\n\njunk = [print(l.strip('\\n')) for l in open('temp/test.mnw2').readlines()]", "Load some example MNW2 packages", "path = os.path.join('..', '..', 'examples', 'data', 'mnw2_examples')\ncpth = os.path.join('..', '..', 'autotest', 'temp')\nm = flopy.modflow.Modflow('MNW2-Fig28', model_ws=cpth)\ndis = flopy.modflow.ModflowDis.load(os.path.join(path, 'MNW2-Fig28.dis'), m)\n\nm.get_package_list()\n\nmnw2pth = os.path.join(path, 'MNW2-Fig28.mnw2')\nmnw2 = flopy.modflow.ModflowMnw2.load(mnw2pth, m)\n\npd.DataFrame(mnw2.node_data)\n\npd.DataFrame(mnw2.stress_period_data[0])\n\nmnw2.mnw\n\npd.DataFrame(mnw2.mnw['Well-A'].stress_period_data)\n\npath = os.path.join('..', '..', 'examples', 'data', 'mnw2_examples')\ncpth = os.path.join('temp')\nm = flopy.modflow.Modflow('br', model_ws=cpth)\nmnw2 = flopy.modflow.ModflowMnw2.load(path + '/BadRiver_cal.mnw2', m)\n\ndf = pd.DataFrame(mnw2.node_data)\ndf.loc[:, df.sum(axis=0) != 0]" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
google/eng-edu
ml/pc/exercises/image_classification_part3.ipynb
apache-2.0
[ "Copyright 2018 Google LLC.", "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Cat vs. Dog Image Classification\nExercise 3: Feature Extraction and Fine-Tuning\nEstimated completion time: 30 minutes\nIn Exercise 1, we built a convnet from scratch, and were able to achieve an accuracy of about 70%. With the addition of data augmentation and dropout in Exercise 2, we were able to increase accuracy to about 80%. That seems decent, but 20% is still too high of an error rate. Maybe we just don't have enough training data available to properly solve the problem. What other approaches can we try?\nIn this exercise, we'll look at two techniques for repurposing feature data generated from image models that have already been trained on large sets of data, feature extraction and fine tuning, and use them to improve the accuracy of our cat vs. dog classification model.\nFeature Extraction Using a Pretrained Model\nOne thing that is commonly done in computer vision is to take a model trained on a very large dataset, run it on your own, smaller dataset, and extract the intermediate representations (features) that the model generates. These representations are frequently informative for your own computer vision task, even though the task may be quite different from the problem that the original model was trained on. This versatility and repurposability of convnets is one of the most interesting aspects of deep learning.\nIn our case, we will use the Inception V3 model developed at Google, and pre-trained on ImageNet, a large dataset of web images (1.4M images and 1000 classes). This is a powerful model; let's see what the features that it has learned can do for our cat vs. dog problem.\nFirst, we need to pick which intermediate layer of Inception V3 we will use for feature extraction. A common practice is to use the output of the very last layer before the Flatten operation, the so-called \"bottleneck layer.\" The reasoning here is that the following fully connected layers will be too specialized for the task the network was trained on, and thus the features learned by these layers won't be very useful for a new task. The bottleneck features, however, retain much generality.\nLet's instantiate an Inception V3 model preloaded with weights trained on ImageNet:", "import os\n\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import Model", "Now let's download the weights:", "!wget --no-check-certificate \\\n https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 \\\n -O /tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5\n\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3\n\nlocal_weights_file = '/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'\npre_trained_model = InceptionV3(\n input_shape=(150, 150, 3), include_top=False, weights=None)\npre_trained_model.load_weights(local_weights_file)", "By specifying the include_top=False argument, we load a network that doesn't include the classification layers at the top—ideal for feature extraction.\nLet's make the model non-trainable, since we will only use it for feature extraction; we won't update the weights of the pretrained model during training.", "for layer in pre_trained_model.layers:\n layer.trainable = False", "The layer we will use for feature extraction in Inception v3 is called mixed7. It is not the bottleneck of the network, but we are using it to keep a sufficiently large feature map (7x7 in this case). (Using the bottleneck layer would have resulting in a 3x3 feature map, which is a bit small.) Let's get the output from mixed7:", "last_layer = pre_trained_model.get_layer('mixed7')\nprint('last layer output shape:', last_layer.output_shape)\nlast_output = last_layer.output", "Now let's stick a fully connected classifier on top of last_output:", "from tensorflow.keras.optimizers import RMSprop\n\n# Flatten the output layer to 1 dimension\nx = layers.Flatten()(last_output)\n# Add a fully connected layer with 1,024 hidden units and ReLU activation\nx = layers.Dense(1024, activation='relu')(x)\n# Add a dropout rate of 0.2\nx = layers.Dropout(0.2)(x)\n# Add a final sigmoid layer for classification\nx = layers.Dense(1, activation='sigmoid')(x)\n\n# Configure and compile the model\nmodel = Model(pre_trained_model.input, x)\nmodel.compile(loss='binary_crossentropy',\n optimizer=RMSprop(lr=0.0001),\n metrics=['acc'])", "For examples and data preprocessing, let's use the same files and train_generator as we did in Exercise 2.\nNOTE: The 2,000 images used in this exercise are excerpted from the \"Dogs vs. Cats\" dataset available on Kaggle, which contains 25,000 images. Here, we use a subset of the full dataset to decrease training time for educational purposes.", "!wget --no-check-certificate \\\n https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip -O \\\n /tmp/cats_and_dogs_filtered.zip\n\nimport os\nimport zipfile\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nlocal_zip = '/tmp/cats_and_dogs_filtered.zip'\nzip_ref = zipfile.ZipFile(local_zip, 'r')\nzip_ref.extractall('/tmp')\nzip_ref.close()\n\n# Define our example directories and files\nbase_dir = '/tmp/cats_and_dogs_filtered'\ntrain_dir = os.path.join(base_dir, 'train')\nvalidation_dir = os.path.join(base_dir, 'validation')\n\n# Directory with our training cat pictures\ntrain_cats_dir = os.path.join(train_dir, 'cats')\n\n# Directory with our training dog pictures\ntrain_dogs_dir = os.path.join(train_dir, 'dogs')\n\n# Directory with our validation cat pictures\nvalidation_cats_dir = os.path.join(validation_dir, 'cats')\n\n# Directory with our validation dog pictures\nvalidation_dogs_dir = os.path.join(validation_dir, 'dogs')\n\ntrain_cat_fnames = os.listdir(train_cats_dir)\ntrain_dog_fnames = os.listdir(train_dogs_dir)\n\n# Add our data-augmentation parameters to ImageDataGenerator\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n rotation_range=40,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\n# Note that the validation data should not be augmented!\nval_datagen = ImageDataGenerator(rescale=1./255)\n\ntrain_generator = train_datagen.flow_from_directory(\n train_dir, # This is the source directory for training images\n target_size=(150, 150), # All images will be resized to 150x150\n batch_size=20,\n # Since we use binary_crossentropy loss, we need binary labels\n class_mode='binary')\n\n# Flow validation images in batches of 20 using val_datagen generator\nvalidation_generator = val_datagen.flow_from_directory(\n validation_dir,\n target_size=(150, 150),\n batch_size=20,\n class_mode='binary')", "Finally, let's train the model using the features we extracted. We'll train on all 2000 images available, for 2 epochs, and validate on all 1,000 validation images.", "history = model.fit_generator(\n train_generator,\n steps_per_epoch=100,\n epochs=2,\n validation_data=validation_generator,\n validation_steps=50,\n verbose=2)", "You can see that we reach a validation accuracy of 88–90% very quickly. This is much better than the small model we trained from scratch.\nFurther Improving Accuracy with Fine-Tuning\nIn our feature-extraction experiment, we only tried adding two classification layers on top of an Inception V3 layer. The weights of the pretrained network were not updated during training. One way to increase performance even further is to \"fine-tune\" the weights of the top layers of the pretrained model alongside the training of the top-level classifier. A couple of important notes on fine-tuning:\n\nFine-tuning should only be attempted after you have trained the top-level classifier with the pretrained model set to non-trainable. If you add a randomly initialized classifier on top of a pretrained model and attempt to train all layers jointly, the magnitude of the gradient updates will be too large (due to the random weights from the classifier), and your pretrained model will just forget everything it has learned.\nAdditionally, we fine-tune only the top layers of the pre-trained model rather than all layers of the pretrained model because, in a convnet, the higher up a layer is, the more specialized it is. The first few layers in a convnet learn very simple and generic features, which generalize to almost all types of images. But as you go higher up, the features are increasingly specific to the dataset that the model is trained on. The goal of fine-tuning is to adapt these specialized features to work with the new dataset.\n\nAll we need to do to implement fine-tuning is to set the top layers of Inception V3 to be trainable, recompile the model (necessary for these changes to take effect), and resume training. Let's unfreeze all layers belonging to the mixed7 module—i.e., all layers found after mixed6—and recompile the model:", "from tensorflow.keras.optimizers import SGD\n\nunfreeze = False\n\n# Unfreeze all models after \"mixed6\"\nfor layer in pre_trained_model.layers:\n if unfreeze:\n layer.trainable = True\n if layer.name == 'mixed6':\n unfreeze = True\n\n# As an optimizer, here we will use SGD \n# with a very low learning rate (0.00001)\nmodel.compile(loss='binary_crossentropy',\n optimizer=SGD(\n lr=0.00001, \n momentum=0.9),\n metrics=['acc'])", "Now let's retrain the model. We'll train on all 2000 images available, for 50 epochs, and validate on all 1,000 validation images. (This may take 15-20 minutes to run.)", "history = model.fit_generator(\n train_generator,\n steps_per_epoch=100,\n epochs=50,\n validation_data=validation_generator,\n validation_steps=50,\n verbose=2)", "We are seeing a nice improvement, with the validation loss going from ~1.7 down to ~1.2, and accuracy going from 88% to 92%. That's a 4.5% relative improvement in accuracy.\nLet's plot the training and validation loss and accuracy to show it conclusively:", "%matplotlib inline\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n# Retrieve a list of accuracy results on training and validation data\n# sets for each training epoch\nacc = history.history['acc']\nval_acc = history.history['val_acc']\n\n# Retrieve a list of list results on training and validation data\n# sets for each training epoch\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\n# Get number of epochs\nepochs = range(len(acc))\n\n# Plot training and validation accuracy per epoch\nplt.plot(epochs, acc)\nplt.plot(epochs, val_acc)\nplt.title('Training and validation accuracy')\n\nplt.figure()\n\n# Plot training and validation loss per epoch\nplt.plot(epochs, loss)\nplt.plot(epochs, val_loss)\nplt.title('Training and validation loss')", "Congratulations! Using feature extraction and fine-tuning, you've built an image classification model that can identify cats vs. dogs in images with over 90% accuracy.\nClean Up\nRun the following cell to terminate the kernel and free memory resources:", "import os, signal\nos.kill(os.getpid(), signal.SIGKILL)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Atomahawk/flagging-suspicious-blockchain-transactions
lab_notebooks/PyGraphistry btc 2013-2014.ipynb
mit
[ "Pygraphistry Viz", "# Imports\nimport graphistry\nimport numpy as np\nimport pandas as pd\nfrom py2neo import Graph, Path\n\ngraphistry.register(key='48a82a78fdd442482cec24fe06051c905e2a382d581852a4ba645927c736acbcfe7256e22873a5c97cff6b8bd37c836b')", "---------------------------", "# Static - Connect to the database\n# graph = Graph('http://neo4j:nakama@ec2-34-212-133-23.us-west-2.compute.amazonaws.com:7474')\n\n# tx = graph.cypher.begin()\n# for name in [\"Alice\", \"Bob\", \"Carol\"]:\n# tx.append(\"CREATE (person:Person {name:{name}}) RETURN person\", name=name)\n# alice, bob, carol = [result.one for result in tx.commit()]\n\n# friends = Path(alice, \"KNOWS\", bob, \"KNOWS\", carol)\n# graph.create(friends)\n\n# graph.data(\"MATCH (a:address) --> (b:incoming_payment) --> (c:transaction) RETURN LIMIT 25\")\n\n# rows = pandas.read_csv('transactions.csv')[:1000]\n# graphistry.hypergraph(rows)['graph'].plot()\n\n# Retrieve all the paper metadata\n# btc_metadata = pd.read_sql_query('SELECT * FROM Papers', conn)\n\n# df = pd.DataFrame(graph.data(\"MATCH (n:transaction) Return n LIMIT 25\"))", "df.head()\nRetrieve citations data\ncitations = pd.read_csv('citations.txt', names = ['source', 'target', 'label'])\nDedupe Citations\ncitations = citations.drop_duplicates(subset=['source', 'target'])\nClean Citations IDs\ncitations['target'] = citations['target'].str.strip('.')\ncitations['source'] = citations['source'].astype(str).str.strip('.')\nUnique subjects\nsubjects = arxiv_metadata.primary_subject.unique()\nsubject_colors = dict(zip(subjects, range(0, len(subjects))))\narxiv_metadata['color'] = arxiv_metadata.primary_subject.map(lambda x: subject_colors[x])\ncitations.info()\nmetadata_merge = citations.merge(arxiv_metadata,\n left_on='source',\n right_on='id').merge(arxiv_metadata,\n left_on='target',\n right_on='id', \n suffixes=('_from', '_to'))\nmetadata_merge.info()\ncitations = pd.read_csv('Projects/ArXiv/data/citations/citations.txt', names = ['source', 'target', 'label'])\n# links = pd.read_csv('./lesmiserables.csv')\ncitations.head()\nSet up the plotter\nplotter = graphistry.bind(source=\"source\", destination=\"target\")\nplotter.plot(citations)\ncitations[\"label\"] = citations.value.map(lambda v: \"#Meetings: %d\" % v)\nplotter = plotter.bind(edge_weight=\"label\")\nplotter.plot(citations)\nSet up igraph for easy metadata etc\nig = plotter.pandas2igraph(citations)\nig = plotter.pandas2igraph(metadata_merge)\nAdd the Arxiv Metadata\nvertex_metadata = pd.DataFrame(ig.vs['nodeid'], columns=['id']).merge(arxiv_metadata, how='left', on='id')\nig.vs['primary_subject'] = vertex_metadata['primary_subject']\nig.vs['color'] = vertex_metadata['color']\nig.vs['title'] = vertex_metadata['title']\nig.vs['year'] = vertex_metadata['year']\nig.vs['month'] = vertex_metadata['month']\nig.vs['category'] = vertex_metadata['category']\nig.vs['pagerank'] = ig.pagerank()\nig.vs['community'] = ig.community_infomap().membership\nig.vs['in_degree'] = ig.indegree()\nplotter.bind(point_size='in_degree', point_color='color').plot(ig)\nplotter.bind(point_color='community', point_size='pagerank').plot(ig)\nSilk Road Bitcoin Embezzling Visualization", "transactions = pd.read_csv('transactions.csv')\ntransactions['Date'] = pd.to_datetime(transactions['Date'],unit='ms') #coerce date format\ntransactions[:3]\n\nprint('DataFrame headers: {}' .format(list(transactions.columns)))\n\ntransactions.columns[-1]\n\n# 'taint' is weighted as 5\ntransactions['isTainted'].unique()\n\n# for item in transactions[transactions['isTainted'] == 5].isTainted:\n# item = 10\n\n# for column in transactions.columns[-1]:\n# transactions[transactions == 5] = 10\n\ntransactions.shape\n\ntransactions.info()\n\n# transaction window\nprint(transactions['Date'].sort_values().head(1), '\\n')\nprint(transactions['Date'].sort_values().tail(1))", "Visualization 1: Quick Visualization & Analysis\nTask: Spot the embezzling\n1. Use the histogram tool to filter for only tainted transactions\n2. Turn on the Setting \"Prune Isolated Nodes\" to hide wallets with no remaining transactions\n3. Use the filters or excludes tool to only show transactions over 1000 or 1000. \n4. Verify that money flowed from Ross Ulbricht to Carl Force, and explore where else it flowed.", "g = graphistry.edges(transactions).bind(source='Source', destination='Destination')\n\ng.plot()", "Visualization 2: Summarizing Wallets", "# Compute how much wallets received in new df 'wallet_in'\nwallet_in = transactions\\\n.groupby('Destination')\\\n.agg({'isTainted': lambda x: 1 if x.sum() > 0 else 0, 'Amount $': np.sum})\\\n.reset_index().rename(columns={'Destination': 'wallet', 'isTainted': 'isTaintedWallet'})\n# rename destination to wallet\n# rename isTainted to isTaintedWallet\n\n#not all wallets received money, tag these\nwallet_in['Receivables'] = True\n\nwallet_in[:3]\n\nwallet_in['isTaintedWallet'].unique()\n\n# Compute how much wallets sent in new df 'wallet_out'\nwallet_out = transactions\\\n .groupby('Source')\\\n .agg({'isTainted': np.sum, 'Amount $': np.max})\\\n .reset_index().rename(columns={'Source': 'wallet', 'isTainted': 'isTaintedWallet'})\n# rename source to wallet\n# rename isTainted to isTaintedWallet\n\n#not all wallets received money, tag these\nwallet_out['Payables'] = True\n\nwallet_out[:3]\n\nwallet_out['isTaintedWallet'].unique()\n\n# Join Data\nwallets = pd.merge(wallet_in, wallet_out, how='outer')\nwallets['Receivables'] = wallets['Receivables'].fillna(False)\nwallets['Payables'] = wallets['Payables'].fillna(False)\nprint('# Wallets only sent or only received', len(wallet_in) + len(wallet_out) - len(wallets))\nwallets[:3]\n\ntmp = wallets\n\n# colors at: http://staging.graphistry.com/docs/legacy/api/0.9.2/palette.html#Paired\ndef convert_to_colors(value):\n if value == 0:\n return 36005 # magenta\n else:\n return 42005 # orange\n\ntmp['isTaintedWallet'] = tmp['isTaintedWallet'].apply(convert_to_colors)\n\ntmp['isTaintedWallet'].unique()", "Plot\nBind color to whether tainted", "g.nodes(tmp).bind(node='wallet', point_color='isTaintedWallet').plot()", "Plain-no-audio.mov" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
amccaugh/phidl
docs/tutorials/movement.ipynb
mit
[ "Moving, rotating, mirroring\nThere are several actions we can take to move and rotate PHIDL objects. These actions include movement, rotation, and reflection. There are several types of PHIDL objects (Device, DeviceReference, Port, Polygon, CellArray, Label, and Group) but they all can be moved and manipulated in the same ways.\nBasic movement and rotation\nWe'll start by creating a blank Device and some shapes. We'll add the shapes to the Device as references", "import phidl.geometry as pg\nfrom phidl import quickplot as qp\nfrom phidl import Device\n\n# Start with a blank Device\nD = Device()\n\n# Create some more shape Devices\nT = pg.text('hello', size = 10, layer = 1)\nE = pg.ellipse(radii = (10,5))\nR = pg.rectangle(size = (10,3), layer = 2)\n\n# Add the shapes to D as references\ntext = D << T\nellipse = D << E\nrect1 = D << R\nrect2 = D << R\n\nqp(D) # quickplot it!", "Now let's practice moving and rotating the objects:", "text.move([10,4]) # Translate by dx = 10, dy = 4\nellipse.move(origin = [1,1], destination = [2,2.5]) # Translate by dx = 1, dy = 1.5\nrect1.move([1,1], [5,5], axis = 'y') # Translate by dx = 0, dy = 4 (motion only along y-axis)\nrect2.movey(4) # Same as specifying axis='y' in the move() command\nrect2.movex(4) # Same as specifying axis='x'' in the move() command\nellipse.movex(30,40) # Moves \"from\" x=30 \"to\" x=40 (i.e. translates by dx = 10)\n\nrect1.rotate(45) # Rotate the first waveguide by 45 degrees around (0,0)\nrect2.rotate(-30, center = [1,1]) # Rotate the second waveguide by -30 degrees around (1,1)\n\ntext.mirror(p1 = [1,1], p2 = [1,3]) # Reflects across the line formed by p1 and p2\n\nqp(D) # quickplot it!", "Working with properties\nEach Device and DeviceReference object has several properties which can be used to learn information about the object (for instance where it's center coordinate is). Several of these properties can actually be used to move the geometry by assigning them new values.\nAvailable properties are:\n\nxmin / xmax: minimum and maximum x-values of all points within the object\nymin / ymax: minimum and maximum y-values of all points within the object\nx: centerpoint between minimum and maximum x-values of all points within the object\ny: centerpoint between minimum and maximum y-values of all points within the object\nbbox: bounding box (see note below) in format ((xmin,ymin),(xmax,ymax))\ncenter: center of bounding box", "print('bounding box:')\nprint(text.bbox) # Will print the bounding box of text in terms of [(xmin, ymin), (xmax, ymax)]\nprint('xsize and ysize:')\nprint(text.xsize) # Will print the width of text in the x dimension\nprint(text.ysize) # Will print the height of text in the y dimension\nprint('center:')\nprint(text.center) # Gives you the center coordinate of its bounding box\nprint('xmax')\nprint(ellipse.xmax) # Gives you the rightmost (+x) edge of the ellipse bounding box", "Let's use these properties to manipulate our shapes to arrange them a little better", "# First let's center the ellipse\nellipse.center = [0,0] # Move the ellipse such that the bounding box center is at (0,0)\n\n# Next, let's move the text to the left edge of the ellipse\ntext.y = ellipse.y # Move the text so that its y-center is equal to the y-center of the ellipse\ntext.xmax = ellipse.xmin # Moves the ellipse so its xmax == the ellipse's xmin\n\n# Align the right edge of the rectangles with the x=0 axis\nrect1.xmax = 0\nrect2.xmax = 0\n\n# Move the rectangles above and below the ellipse\nrect1.ymin = ellipse.ymax + 5\nrect2.ymax = ellipse.ymin - 5\n\nqp(D)", "In addition to working with the properties of the references inside the Device, we can also manipulate the whole Device if we want. Let's try mirroring the whole Device D:", "print(D.xmax) # Prints out '10.0'\n\nD.mirror((0,1)) # Mirror across line made by (0,0) and (0,1)\n\nqp(D)", "A note about bounding boxes\nWhen we talk about bounding boxes, we mean it in the sense of the smallest enclosing box which contains all points of the geometry. So the bounding box for the device D looks like this:", "# The phidl.geometry library has a handy bounding-box function\n# which takes a bounding box and creates a rectangle shape for it\ndevice_bbox = D.bbox\nD << pg.bbox(device_bbox, layer = 3)\n\nqp(D)", "When we query the properties of D, they will be calculated with respect to this bounding-rectangle. For instance:", "print('Center of Device D:')\nprint(D.center)\n\nprint('X-max of Device D:')\nprint(D.xmax)", "Chaining commands\nMany of the movement/manipulation functions return the object they manipulate. We can use this to chain multiple commands in a single line.\nFor instance these two expressions:\nrect1.rotate(angle = 37)\nrect1.move([10,20])\n...are equivalent to this single-line expression\nrect1.rotate(angle = 37).move([10,20])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
hiteshagrawal/python
udacity/nano-degree/L1_Starter_Code.ipynb
gpl-2.0
[ "Before we get started, a couple of reminders to keep in mind when using iPython notebooks:\n\nRemember that you can see from the left side of a code cell when it was last run if there is a number within the brackets.\nWhen you start a new notebook session, make sure you run all of the cells up to the point where you last left off. Even if the output is still visible from when you ran the cells in your previous session, the kernel starts in a fresh state so you'll need to reload the data, etc. on a new session.\nThe previous point is useful to keep in mind if your answers do not match what is expected in the lesson's quizzes. Try reloading the data and run all of the processing steps one by one in order to make sure that you are working with the same variables and data that are at each quiz stage.\n\nLoad Data from CSVs", "import unicodecsv\n\n## Longer version of code (replaced with shorter, equivalent version below)\n\n# enrollments = []\n# f = open('enrollments.csv', 'rb')\n# reader = unicodecsv.DictReader(f)\n# for row in reader:\n# enrollments.append(row)\n# f.close()\n\ndef readme(filename):\n with open(filename, 'rb') as f:\n reader = unicodecsv.DictReader(f)\n return list(reader)\n\n#####################################\n# 1 #\n#####################################\n\n## Read in the data from daily_engagement.csv and project_submissions.csv \n## and store the results in the below variables.\n## Then look at the first row of each table.\nenrollments = readme('enrollments.csv')\ndaily_engagement = readme('daily_engagement.csv')\nproject_submissions = readme('project_submissions.csv') ", "Fixing Data Types", "from datetime import datetime as dt\n\n# Takes a date as a string, and returns a Python datetime object. \n# If there is no date given, returns None\ndef parse_date(date):\n if date == '':\n return None\n else:\n return dt.strptime(date, '%Y-%m-%d')\n \n# Takes a string which is either an empty string or represents an integer,\n# and returns an int or None.\ndef parse_maybe_int(i):\n if i == '':\n return None\n else:\n return int(i)\n\n# Clean up the data types in the enrollments table\nfor enrollment in enrollments:\n enrollment['cancel_date'] = parse_date(enrollment['cancel_date'])\n enrollment['days_to_cancel'] = parse_maybe_int(enrollment['days_to_cancel'])\n enrollment['is_canceled'] = enrollment['is_canceled'] == 'True'\n enrollment['is_udacity'] = enrollment['is_udacity'] == 'True'\n enrollment['join_date'] = parse_date(enrollment['join_date'])\n \nenrollments[0]\n\n# Clean up the data types in the engagement table\nfor engagement_record in daily_engagement:\n engagement_record['lessons_completed'] = int(float(engagement_record['lessons_completed']))\n engagement_record['num_courses_visited'] = int(float(engagement_record['num_courses_visited']))\n engagement_record['projects_completed'] = int(float(engagement_record['projects_completed']))\n engagement_record['total_minutes_visited'] = float(engagement_record['total_minutes_visited'])\n engagement_record['utc_date'] = parse_date(engagement_record['utc_date'])\n \ndaily_engagement[0]\n\n# Clean up the data types in the submissions table\nfor submission in project_submissions:\n submission['completion_date'] = parse_date(submission['completion_date'])\n submission['creation_date'] = parse_date(submission['creation_date'])\n\nproject_submissions[0]", "Note when running the above cells that we are actively changing the contents of our data variables. If you try to run these cells multiple times in the same session, an error will occur.\nInvestigating the Data", "#####################################\n# 2 #\n#####################################\n\n## Find the total number of rows and the number of unique students (account keys)\n## in each table.\nunique_enrolled_students = set()\nfor enrollment in enrollments:\n unique_enrolled_students.add(enrollment['account_key'])\nlen(unique_enrolled_students)", "Problems in the Data", "#####################################\n# 3 #\n#####################################\n\n## Rename the \"acct\" column in the daily_engagement table to \"account_key\".\nunique_engagement_students = set()\nfor engagement_record in daily_engagement:\n unique_engagement_students.add(engagement_record['account_key'])\nlen(unique_engagement_students)\n\ndaily_engagement[1]", "Missing Engagement Records", "#####################################\n# 4 #\n#####################################\n\n## Find any one student enrollments where the student is missing from the daily engagement table.\n## Output that enrollment.\n\nfor enrollment in enrollments:\n student = enrollment['account_key']\n if student not in unique_engagement_students:\n print enrollment\n break", "Checking for More Problem Records", "#####################################\n# 5 #\n#####################################\n\n## Find the number of surprising data points (enrollments missing from\n## the engagement table) that remain, if any.\nnum_problem_students = 0\nfor enrollment in enrollments:\n student = enrollment['account_key']\n if (student not in unique_engagement_students and \n enrollment['join_date'] != enrollment['cancel_date']):\n print enrollment\n num_problem_students += 1\n\nnum_problem_students", "Tracking Down the Remaining Problems", "# Create a set of the account keys for all Udacity test accounts\nudacity_test_accounts = set()\nfor enrollment in enrollments:\n if enrollment['is_udacity']:\n udacity_test_accounts.add(enrollment['account_key'])\nlen(udacity_test_accounts)\n\n# Given some data with an account_key field, removes any records corresponding to Udacity test accounts\ndef remove_udacity_accounts(data):\n non_udacity_data = []\n for data_point in data:\n if data_point['account_key'] not in udacity_test_accounts:\n non_udacity_data.append(data_point)\n return non_udacity_data\n\n# Remove Udacity test accounts from all three tables\nnon_udacity_enrollments = remove_udacity_accounts(enrollments)\nnon_udacity_engagement = remove_udacity_accounts(daily_engagement)\nnon_udacity_submissions = remove_udacity_accounts(project_submissions)\n\nprint len(non_udacity_enrollments)\nprint len(non_udacity_engagement)\nprint len(non_udacity_submissions)", "Refining the Question", "#####################################\n# 6 #\n#####################################\n\n## Create a dictionary named paid_students containing all students who either\n## haven't canceled yet or who remained enrolled for more than 7 days. The keys\n## should be account keys, and the values should be the date the student enrolled.\n\n# paid_students = {}\n# for enrollments in non_udacity_enrollments:\n# if enrollments['days_to_cancel'] is None or enrollments['days_to_cancel'] > 7 :\n# paid_students[enrollments['account_key']] = enrollments['join_date']\n\n# len(paid_students) \n\npaid_students = {}\nfor enrollment in non_udacity_enrollments:\n if (not enrollment['is_canceled'] or\n enrollment['days_to_cancel'] > 7):\n account_key = enrollment['account_key']\n enrollment_date = enrollment['join_date']\n if (account_key not in paid_students or\n enrollment_date > paid_students[account_key]):\n paid_students[account_key] = enrollment_date\nlen(paid_students)", "Getting Data from First Week", "# Takes a student's join date and the date of a specific engagement record,\n# and returns True if that engagement record happened within one week\n# of the student joining.\n# def within_one_week(join_date, engagement_date):\n# time_delta = engagement_date - join_date\n# return time_delta.days < 7\n\n\ndef within_one_week(join_date, engagement_date):\n time_delta = engagement_date - join_date\n return time_delta.days >= 0 and time_delta.days < 7\n\n#####################################\n# 7 #\n#####################################\n\n## Create a list of rows from the engagement table including only rows where\n## the student is one of the paid students you just found, and the date is within\n## one week of the student's join date.\n\ndef remove_free_trial_cancels(data):\n new_data = []\n for data_point in data:\n if data_point['account_key'] in paid_students:\n new_data.append(data_point)\n return new_data\n\npaid_enrollments = remove_free_trial_cancels(non_udacity_enrollments)\npaid_engagement = remove_free_trial_cancels(non_udacity_engagement)\npaid_submissions = remove_free_trial_cancels(non_udacity_submissions)\n\nprint len(paid_enrollments)\nprint len(paid_engagement)\nprint len(paid_submissions)\n\npaid_engagement_in_first_week = []\n\nfor engagement_record in paid_engagement:\n account_key = engagement_record['account_key']\n join_date = paid_students[account_key]\n engagement_record_date = engagement_record['utc_date']\n\n if within_one_week(join_date, engagement_record_date):\n paid_engagement_in_first_week.append(engagement_record)\n\nprint len(paid_engagement_in_first_week)\n\nprint paid_engagement_in_first_week[1:10]", "Exploring Student Engagement", "from collections import defaultdict\n\n# Create a dictionary of engagement grouped by student.\n# The keys are account keys, and the values are lists of engagement records.\nengagement_by_account = defaultdict(list)\nfor engagement_record in paid_engagement_in_first_week:\n account_key = engagement_record['account_key']\n engagement_by_account[account_key].append(engagement_record)\n\n# Create a dictionary with the total minutes each student spent in the classroom during the first week.\n# The keys are account keys, and the values are numbers (total minutes)\ntotal_minutes_by_account = {}\nfor account_key, engagement_for_student in engagement_by_account.items():\n total_minutes = 0\n for engagement_record in engagement_for_student:\n total_minutes += engagement_record['total_minutes_visited']\n total_minutes_by_account[account_key] = total_minutes\n\nimport numpy as np\n\n# Summarize the data about minutes spent in the classroom\ntotal_minutes = total_minutes_by_account.values()\nprint 'Mean:', np.mean(total_minutes)\nprint 'Standard deviation:', np.std(total_minutes)\nprint 'Minimum:', np.min(total_minutes)\nprint 'Maximum:', np.max(total_minutes)", "Debugging Data Analysis Code", "#####################################\n# 8 #\n#####################################\n\n## Go through a similar process as before to see if there is a problem.\n## Locate at least one surprising piece of data, output it, and take a look at it.\n\n\nstudent_with_max_minutes = None\nmax_minutes = 0\n\nfor student, total_minutes in total_minutes_by_account.items():\n if total_minutes > max_minutes:\n max_minutes = total_minutes\n student_with_max_minutes = student\n\nmax_minutes\n#Alternatively, you can find the account key with the maximum minutes using this shorthand notation:\n#max(total_minutes_by_account.items(), key=lambda pair: pair[1])\n\nfor engagement_record in paid_engagement_in_first_week:\n if engagement_record['account_key'] == student_with_max_minutes:\n print engagement_record", "Lessons Completed in First Week", "#####################################\n# 9 #\n#####################################\n\n## Adapt the code above to find the mean, standard deviation, minimum, and maximum for\n## the number of lessons completed by each student during the first week. Try creating\n## one or more functions to re-use the code above.\n\n# def total_num(data):\n# for account_key, engagement_for_student in engagement_by_account.items():\n# total_lessons = 0\n# for engagement_record in engagement_for_student:\n# total_lessons += engagement_record[data]\n# total_lessons_by_account[account_key] = total_lessons\n\n# total_lessons_by_account = {}\n# for account_key, engagement_for_student in engagement_by_account.items():\n# total_lessons = 0\n# for engagement_record in engagement_for_student:\n# total_lessons += engagement_record['lessons_completed']\n# total_lessons_by_account[account_key] = total_lessons\n\n# print len(total_lessons_by_account) \n# print total_lessons_by_account['619']\n\n# student_with_max_lesson = None\n# max_lessons_completed = 0\n# for student, lessons_completed in total_lessons_by_account.items():\n# if lessons_completed > max_lessons_completed:\n# max_lessons_completed = lessons_completed\n# student_with_max_lesson = student\n \n# print max_lessons_completed, student_with_max_lesson \n\n# total_lessons = total_lessons_by_account.values()\n# print 'Mean:', np.mean(total_lessons)\n# print 'Standard deviation:', np.std(total_lessons)\n# print 'Minimum:', np.min(total_lessons)\n# print 'Maximum:', np.max(total_lessons)\n\nfrom collections import defaultdict\n\ndef group_data(data, key_name):\n grouped_data = defaultdict(list)\n for data_point in data:\n key = data_point[key_name]\n grouped_data[key].append(data_point)\n return grouped_data\n\nengagement_by_account = group_data(paid_engagement_in_first_week,\n 'account_key')\n\ndef sum_grouped_items(grouped_data, field_name):\n summed_data = {}\n for key, data_points in grouped_data.items():\n total = 0\n for data_point in data_points:\n total += data_point[field_name]\n summed_data[key] = total\n return summed_data\n\n\n\nimport numpy as np\n\ndef describe_data(data):\n print 'Mean:', np.mean(data)\n print 'Standard deviation:', np.std(data)\n print 'Minimum:', np.min(data)\n print 'Maximum:', np.max(data)\n\n\ntotal_minutes_by_account = sum_grouped_items(engagement_by_account,\n 'total_minutes_visited')\n \ndescribe_data(total_minutes_by_account.values())\n\nlessons_completed_by_account = sum_grouped_items(engagement_by_account,\n 'lessons_completed')\ndescribe_data(lessons_completed_by_account.values())\n\nprint engagement_by_account['619']\n", "Number of Visits in First Week", "######################################\n# 10 #\n######################################\n\n## Find the mean, standard deviation, minimum, and maximum for the number of\n## days each student visits the classroom during the first week.\n\nfor engagement_record in paid_engagement:\n if engagement_record['num_courses_visited'] > 0:\n engagement_record['has_visited'] = 1\n else:\n engagement_record['has_visited'] = 0\n\n# days_visited_by_account = sum_grouped_items(engagement_by_account,\n# 'has_visited')\n# describe_data(days_visited_by_account.values()) \n \ndef sum_grouped_items_record(grouped_data, field_name):\n summed_data = {}\n for key, data_points in grouped_data.items():\n total = 0\n for data_point in data_points:\n #total += data_point[field_name]\n if data_point[field_name] > 0: #Means student has visited the classroom\n total += 1\n summed_data[key] = total\n return summed_data\n\ndays_visited_by_account = sum_grouped_items_record(engagement_by_account,\n 'num_courses_visited')\ndescribe_data(days_visited_by_account.values())", "Splitting out Passing Students", "######################################\n# 11 #\n######################################\n\n## Create two lists of engagement data for paid students in the first week.\n## The first list should contain data for students who eventually pass the\n## subway project, and the second list should contain data for students\n## who do not.\n\n# subway_project_lesson_keys = ['746169184', '3176718735']\n\n# passing_engagement =\n# non_passing_engagement =\n\npaid_submissions[2]\n# {u'account_key': u'256',\n# u'assigned_rating': u'PASSED',\n# u'completion_date': datetime.datetime(2015, 1, 20, 0, 0),\n# u'creation_date': datetime.datetime(2015, 1, 20, 0, 0),\n# u'lesson_key': u'3176718735',\n# u'processing_state': u'EVALUATED'}\n\nsubway_project_lesson_keys = ['746169184', '3176718735']\n\npass_subway_project = set()\n\nfor submission in paid_submissions:\n project = submission['lesson_key']\n rating = submission['assigned_rating'] \n\n if ((project in subway_project_lesson_keys) and\n (rating == 'PASSED' or rating == 'DISTINCTION')):\n pass_subway_project.add(submission['account_key'])\n\nlen(pass_subway_project)\n\npassing_engagement = []\nnon_passing_engagement = []\n\nfor engagement_record in paid_engagement_in_first_week:\n if engagement_record['account_key'] in pass_subway_project:\n passing_engagement.append(engagement_record)\n else:\n non_passing_engagement.append(engagement_record)\n\nprint len(passing_engagement)\nprint len(non_passing_engagement)", "Comparing the Two Student Groups", "######################################\n# 12 #\n######################################\n\n## Compute some metrics you're interested in and see how they differ for\n## students who pass the subway project vs. students who don't. A good\n## starting point would be the metrics we looked at earlier (minutes spent\n## in the classroom, lessons completed, and days visited).", "Making Histograms", "######################################\n# 13 #\n######################################\n\n## Make histograms of the three metrics we looked at earlier for both\n## students who passed the subway project and students who didn't. You\n## might also want to make histograms of any other metrics you examined.", "Improving Plots and Sharing Findings", "######################################\n# 14 #\n######################################\n\n## Make a more polished version of at least one of your visualizations\n## from earlier. Try importing the seaborn library to make the visualization\n## look better, adding axis labels and a title, and changing one or more\n## arguments to the hist() function." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
LSSTC-DSFP/LSSTC-DSFP-Sessions
Sessions/Session06/Day2/IntroToParaViewSolutions.ipynb
mit
[ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib notebook", "First and foremost, you should have already installed ParaView on your machine. If not, you can download the package from paraview.org.\nAn Introduction to ParaView\nversion 0.1\n\nAdam A Miller (Northwestern CIERA/Adler Planetarium)\n01 May 2018\nOur data do not live in 2 dimensions... \n[As the third grade version of me would say, \"Ummmm, Mr. Miller? Duh!\"]\nAnd yet, a fundamental limitation we that consisently place upon ourselves is to insist that we represent the data in 2D when communicating with the public and our colleagues. \nIn a sense, we are doing science with 1 arm tied behind our backs...\nOur data also do not live in 3 dimensions.\nBut I can think of a distinct advantage to moving to 3D representations: our spatial representation of the Universe can be fully described in 3 dimensions. \nThus, if we want to describe the positions of stars in a cluster (Gaia?), or show variations in density within a Giant Molecular Cloud, or examine the surface of a minor planet or asteroid, these tasks are all acomplished much better in 3D.\n(ignoring special relativity for now...)\nWhy interactive?\nIt would not be unreasonable to take the following stance: the heavy lifting in exploring a data set should be done by the researchers. In this sense, \"final\" plots or renditions can be presented to an audience via a slide or paper that summarize all of the salient features.\nWhat if, however, you wish to enable science, or discovery, for amatuers (e.g., the Zooniverse, or even in the classroom (see James's talk from yesterday)? \nIt is unfair to ask this audience to calculate integrals, or to develop even a small fraction of the domain expertise that you and your colleagues (PhDs and PhD candidates) have. Interactivity provides a natural way for this audience to explore the data.\nFurthermore, it may even help you in your quest for discovery and understaning.\nSo, today –– ParaView\nParaView is an open source platform that was specifically designed for data analysis and visualization. Briefly, ParaView is interesting (for us) for several reasons: \n\nGeneral purpose and can easily be used on laptop or HPC\nNaturally utilizes multiple processors for handling large data sets (in particular 3D)\nIncludes a scripting interface (via Python)\n\nUltimately, we want to turn data (images, measurements, spectra, etc) into some rendering to represent the data in a way that provides insight or better understanding [for all viz, not just ParaView].\nParaView takes VTK, the Visualization Toolkit, data as an input (later you will create some VTK files).\nVTK uses a basic data-flow paradigm, in which data flows through the system while being transformed at each step [via modules known as algorithms]. Algorithms have input ports to take data, and output ports to produce output:\n\nSources do not have input ports, but have outputs [i.e. to get data into the system, e.g., reading a file]\nSinks convert input into graphics such that they can be rendered \nFilters are intermediate algorithms, that convert input into output\n\nConnecting sources, sinks, and filters can create arbitrarily complicated visual renderings. \n<img style=\"display: block; margin-left: auto; margin-right: auto\" src=\"./images/pv_visualization_model.png\" align=\"middle\">\n<div align=\"right\"> <font size=\"-3\">(credit: ParaView Guide) </font></div>\n\nFollowing this brief introduction, we will start the visual exploration with ParaView. \nProblem 1) Creating an Interactive Sphere\nWe will begin by creating a simple 3D object (a sphere). While there is nothing spectacular about a sphere, I'll note that it is not particularly easy to represent a(n interactive) sphere in matplotlib.\nProblem 1a\nOpen paraview. \nCreate a sphere. [Sources $\\rightarrow$ Sphere]\nAt this stage you will notice that nothing is yet visible in the layout panel. However, a few things have happened.\nThere is now a pipeline module in the pipeline browser (upper left panel). Properties of the sphere can be adjusted in the properties panel (lower left). Finally, the Apply button in the properties panel is now active.\nWhile this is not the case for this particular data set, because data ingestion and manipulation can be highly time consuming, ParaView allows you to perform those operations prior to rendering the data. \nThe Apply button allows you to accept those changes before proceeding. \nProblem 1b\nRender the sphere [click Apply].\nUse your mouse to inspect, rotate, and examine the sphere.\nProblem 1c\nAdjust the center of the sphere to the position [0,0,1], and adjust the radius to 1. \nProblem 1d\nMake the sphere appear more \"spherical\" by adjusting theta resolution to 100. \nThere are a variety of filters that can be applied to the data (in the Filters menu). The available filters are dependent on the data type provided to ParaView.\nProblem 1e\nShrink the size of the mesh cells on the surface of the sphere. [Filters $\\rightarrow$ Alphabetical $\\rightarrow$ Shrink]\nCan you see the shrunken mesh cells?\nProblem 2) Python Scripting a Sphere\nParaView provides a python scripting interface. The package, pvpython, makes it possible to script up everything that one might want to do with paraview, using python (!). \nThe ability to script these tasks is hugely important for reproducibility (and more advanced versions of automated analysis). \nUnfortunately, (as far as I can tell) pvpython is only compatible with python version 2.x, and it will not run within our DSFP conda environment. Forunately, ParaView ships with an internal python interpreter, so we are going to use that for now.\nProblem 2a\nOpen the python interpreter. [View $\\rightarrow$ Python Shell]\nThis will open a python (likely v2.7.10) instance just below the layout panel.\nUnfortunately, at this stage there are many panels, and not a lot of room to inspect the visualization. Ideally a lot of this work would be done on larger screens, but we will for now work with what we have.\n[If space is really at a premium on your screen, you can remove the pipeline browser and properties window as everything will be scripted for the remainder of this problem.]\nBefore we proceed - remove the previously created sphere from the layout panel. [Click on Shrink, then click the delete button in the properties tab. Click on Sphere1, then click the delete button in the properties tab.]\nProblem 2b\nCreate the sphere data source using the python interpreter.\nSphere()\n\nAs before, we have created an active source in the pipeline. However, the sphere has not been rendered.\nProblem 2c\nShow the active source. \nShow() # prepare the display\nRender() # render the visualization\n\nProblem 2d\nAdjust the properties of the sphere to match those in Problem 1.\nSetProperties(Radius=1.0)\nSetProperties(Center=[0,0,1])\n\nIn addition to SetProperties for the data, you can also use SetDisplayProperties to adjust the geometric represenation of the data.\nProblem 2e\nSet the opacity of the sphere to 0.2.\nSetDisplayProperties(0.2)\n\nAs before, we can also create filters via pvpython. Again, we will shrink the size of the mesh cells on the sphere.\nProblem 2f\nShrink the mesh cells on the surface of the sphere (using only python commands).\nShrink()\n\nHint - don't forget to render the object.\nDoes this look like the rendering that we created previously?\nWhen scripting ParaView, the input data set is not automatically hidden after creating a new output. Instead, these actions must be done explicitly. There are a few different ways to handle this (below is an example that leverages an object oriented approach – meaning you need to start over to follow this).\nProblem 2g\nUsing python remove the sphere instance from the rendition of the sphere.\nsphereInstance = Sphere()\nsphereInstance.Radius = 1.0\nsphereInstance.Center[2] = 1.0\nprint sphereInstance.Center\n\nsphereDisplay = Show(sphereInstance) \nview = Render () \nsphereDisplay.Opacity = 0.7\n\nRender(view)\n\nshrinkInstance = Shrink(Input=sphereInstance , ShrinkFactor=1.0)\nprint shrinkInstance.ShrinkFactor\nHide(sphereInstance)\nshrinkDisplay = Show(shrinkInstance) \nRender()\n\nProblem 3) Scripting An Interactive Sphere\nFinally, it is possible to capture the commands executed via the GUI as a python script. In this way it is easy to reproduce an interactive session.\nProblem 3a\nTrace your interactive commands [Tools $\\rightarrow$ Start Trace]\nProblem 3b\nRecrete a sphere following the steps from Problem 1.\nHow does your trace compare to the script that you developed in Problem 2?\nBreak Out Problem\nOf the 3 modes covered in Problems 1, 2, and 3 - which is to be preferred?\n[spend a few minutes discussing with your neighbor]\nProblem 4) Getting Data Into ParaView\nSpheres are all well and good, but what we truly care about is visualizing our astronomical data.\nUnfortunately, this is not as simple as plot(x,y) or even plot(x,y,z) as you might expect for something designed to play nicely with python.\nInstead, we need to package the data as Visualization Toolkit, or vtk, files.\nEven more unfortunately, vtk are binary files that are known to be somewhat challenging to work with.\nNevertheless, many of the most common/powerful 3D rendering programs (ParaView, Mayavi, etc), utilize vtk data.\nIf I am being completely honest – and I always try to be completely honest – I don't know a lot about vtk, or why it's an especially useful format for these programs. \nNevertheless, I will try to provide you with some insight (i.e. repeat what other people have said) on the vtk format.\nBriefly, the vtk representation of data requires the specification of a geometry or topology, and then the data set properties are specified within the chosen geometry. vtk can handle points or cells data in five different formats: structured points, structured grid, rectilinear grid, unstructured grid, and polygonal data.\nStructured points is the most simple format, wherein only the mesh dimensions (nx, ny, nz), mesh origin (x0, y0, z0), and cell dimensions (dx, dy, dz) are specified.\n<img style=\"display: block; margin-left: auto; margin-right: auto\" src=\"./images/Structured_points.png\" align=\"middle\">\n<div align=\"right\"> <font size=\"-3\">(credit: https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python) </font></div>\n\nRectilinear grid is also regularly spaced, but the spacing is not uniform. Thus, nodes must be specified along the given axes Ox, Oy, and Oz. \n<img style=\"display: block; margin-left: auto; margin-right: auto\" src=\"./images/Rectilinear_grid.png\" align=\"middle\">\n<div align=\"right\"> <font size=\"-3\">(credit: https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python) </font></div>\n\nStructured grid is not regular or uniform. Thus, the nodes for every point within the mesh must be specified.\n<img style=\"display: block; margin-left: auto; margin-right: auto\" src=\"./images/Structured_grid.png\" align=\"middle\">\n<div align=\"right\"> <font size=\"-3\">(credit: https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python) </font></div>\n\nUnstructured grid is a structured grid, but it can handle cell data (not just point data).\nPolygonal data is the most complicated (and thus provides a great deal of representational freedom), but we are going to ignore this for now.\nAs I mentioned previously, formatting data as vtk files is a bit of a pain. Fortuantely, there is a python package pyevtk that makes the process relatively painless.\nYou should have already installed pyevtk, but if not you can run: \npip install pyevtk", "from pyevtk.hl import gridToVTK, pointsToVTK", "We will start with a relatively straightforward example of creating a collection of points in 3 dimentions\nProblem 4a\nCreate 3 arrays, x, y, z, of 500 points each that consist of random draws from independent gaussian distributions with mean 0 and standard deviation 20.\nEvalute the temperature and density on the grid provided below.\n$$T = 2.4/x + 0.04y^2 + 5/z$$\n$$\\rho = 1.11x^2 + 3.1y + 0.6z^2$$", "np.random.seed(23)\n\nx = np.random.normal(0,20,size=500)\ny = np.random.normal(0,20,size=500)\nz = np.random.normal(0,20,size=500)\n\ntemp = 2.4/x + 0.04*y**2 + 5/z\npressure = 1.11*x**2 + 3.1*y + 0.6*z**2", "Problem 4b\nExecute the cell below to create a .vtu file that can be read by ParaView.\nOpen the file in ParaView. Represent the pressure data as Point Gaussian, and change the color to match that of the data.\nFrom the ParaView render, how would you describe the pressure of the system? What about the temperature?", "pointsToVTK(\"./points\", x, y, z, data = {\"temp\" : temp, \"pressure\" : pressure})", "I can already think of a few ways in which this representation would be useful (especially now that there is Gaia data available), but the real power of ParaView comes via the volume renderings (which require \"cell\" data as opposed to \"point\" data). \nWe will start with random data on a structured grid. Recall that this means we need to specify the mesh dimensions and cell widths before evaluating data on the grid. [A more interesting example follows]", "nx, ny, nz = 6, 6, 2 # number of cells\nlx, ly, lz = 1.0, 1.0, 1.0 # length on each size\ndx, dy, dz = lx/nx, ly/ny, lz/nz # size of the cells\nncells = nx * ny * nz # total number of cells\nnpoints = (nx + 1) * (ny + 1) * (nz + 1) # points defined by boundaries, hence n + 1", "From here we can establish the coordinates of the cell and the point data.", "x = np.arange(0, lx + 0.1*dx, dx, dtype='float64') \ny = np.arange(0, ly + 0.1*dy, dy, dtype='float64') \nz = np.arange(0, lz + 0.1*dz, dz, dtype='float64') ", "Problem 4c\nAssign random values for the pressure and tempurature and export the data to a file called structure.vtr.\nOpen structure.vtr in ParaView. Represent the data as a surface with edges, and color the pressure data.\nNote - pay attention to the shape of the pressure and temperature data.", "pressure = np.random.rand(ncells).reshape( (nx, ny, nz)) \n\ntemp = np.random.rand(npoints).reshape( (nx + 1, ny + 1, nz + 1)) \n\ngridToVTK(\"./structured\", x, y, z, cellData = {\"pressure\" : pressure}, pointData = {\"temp\" : temp})", "Now we will create a slightly more complicated volume rendition of the pressure and temperature.\nProblem 4d\nCreate a grid with 25 cells on a side and equal size cells of length = 0.4. Create cell data for the pressure and point data for the temparture according to the functions given below.\n$$\\rho = \\left(\\cos(x) + \\cos(y) + \\cos(z)\\right)e^{-(x + y + z)^2/5}$$\n$$T = x + y + z$$\nHint - avoid writing any loops for this problem.", "nx, ny, nz = 25, 25, 25 # number of cells\nlx, ly, lz = 10, 10, 10 # length on each size\ndx, dy, dz = lx/nx, ly/ny, lz/nz # size of the cells\n\nx = np.arange(-lx/2, lx/2 + 0.1*dx, dx, dtype='float64') \ny = np.arange(-ly/2, ly/2 + 0.1*dy, dy, dtype='float64') \nz = np.arange(-lz/2, lz/2 + 0.1*dz, dz, dtype='float64')\n\nxp, yp, zp = np.meshgrid(x[:-1] + np.diff(x)/2,\n y[:-1] + np.diff(y)/2,\n z[:-1] + np.diff(z)/2,\n indexing='ij')\nxt, yt, zt = np.meshgrid(x,y,z,\n indexing='ij')\n\npressure = (np.cos(xp) + np.cos(yp) + np.cos(zp))*np.exp(-(xp + yp + zp)**2/5)", "Problem 4e\nCreate a vtk file with the pressure and temperature data. Open the resuling file in ParaView.\nExamine the volume rendering of the data. Does the visualization make sense given the input data?\nHint - the x, y, and z coordinates should all be specified as 3d arrays.", "gridToVTK(\"./structured\", xt, yt, zt, cellData = {\"pressure\" : pressure}, pointData = {\"temp\" : temp})", "For simplicity, we have focused on the most ordered methods of producing vtk files. As we have demonstrated, pyevtk provides a simple interface to convert NumPy arrays into vtk binary files. \nProblem 5) Example Data Analysis - K means\nIn addition to 3D rendering, ParaView provies utilities for performing (light) statistical analysis on data sets.\nAs a demonstration of this we will revisit one of our commonly used data sets (the famous Iris machine learning data).\nLoad the iris data into a pandas DataFrame via seaborn.", "import seaborn as sns\niris = sns.load_dataset(\"iris\")", "Problem 5a\nAs a point of comparison, quickly visualize this data in 2D. \nHint - seaborn makes this easy and possible with a single line of code.", "g = sns.PairGrid(iris, hue=\"species\", hue_kws={\"cmap\": [\"Blues\", \"Oranges\", \"Greens\"]})\ng = g.map_diag(plt.hist, lw=3)\ng = g.map_lower(sns.kdeplot, lw=1)\ng = g.map_upper(plt.scatter)", "As you can see: the iris data feature 3 classes in a four dimentional data set. We will now prepare this data for visualization in ParaView.\nProblem 5b\nSelect 3 of the variables to serve as the (x, y, z) spatial coordinates for the data set. Note - the specific selection does not particularly matter, though 2 features are clearly better than the others.", "x = np.array(iris[\"petal_length\"])\ny = np.array(iris[\"petal_width\"])\nz = np.array(iris[\"sepal_length\"])", "Problem 5c \nExport the iris data as a vtk file for ParaView. Include both the species and the four features as data for the vtk file. Finally, be sure to randomize the order of the data prior to writing the vtk file.\nHint 1 - convert the species to a numerical representation.\nHint 2 - you may find np.random.choice helpful.", "species = np.array(iris[\"species\"].map({\"setosa\":1, \n \"versicolor\":2,\n \"virginica\":3}))\npetal_width = np.array(iris[\"petal_width\"])\npetal_length = np.array(iris[\"petal_length\"])\nsepal_width = np.array(iris[\"sepal_width\"])\nsepal_length = np.array(iris[\"sepal_length\"])\n\nshuffle = np.random.choice(range(len(x)), size=len(x), replace=False)\n\n\npointsToVTK(\"./iris\", x[shuffle], y[shuffle], z[shuffle], \n data = {\"species\" : species[shuffle], \n \"sepal_length\": sepal_length[shuffle], \n \"sepal_width\": sepal_width[shuffle],\n \"petal_length\": petal_length[shuffle],\n \"petal_width\": petal_width[shuffle]})", "Problem 5d\nOpen the iris data in ParaView. Represent the points as point gaussians, and color the points based on their species.\nRotating the data on the screen - it is possible to get a better sense of the distance between versicolor and virginica classes. \nNow we will attempt to cluster the data.\nProblem 5e\nCreate a K means statistical filter for the data. [Filters $\\rightarrow$ Statistics $\\rightarrow$ K means]\nRemove species from the variable of interest (we don't want to cluster on the correct answer). Increase the training fraction to 0.5, and reduce the tolerance to 0. Then select Apply.\nTurn on the rendering of the Assessed Data (and turn off iris.vtu). Now color the data via ClosestId(0). \nHow do your clusters compare to the correct answer? Can you improve on this initial attempt at clustering?\nSolution to 5e\nThe clusters in this case look a little ridiculous. That is because we fixed $k = 5$. If we instead fit for 3 clusters, we find clusters that better resemble the true classifications, however there are still a number of misidentifications.\nBreak Out Problem\nWhich visualization technique is to be preferred: seaborn or ParaView? \n[spend a few minutes discussing with your neighbor]\nProblem 5f\nClick the + symbol next to Layout #1. Select Plot Matrix View. Be sure that all 5 data values are selected, then click Apply. \nNow which method do you prefer?\nProblem 6) Rendering Basic Astronomical Data\nIf you have not already, download the files Density.vtk and Temperature.vtk from the data repo. These data were generated as part of a cosmological simulation run by Pascal Paschos at Northwestern University.\nNote, you'll need to unpack the tarball\ntar -zxvf pv_vtk.tar.gz\n\nProblem 6a\nOpen the Density.vtk and Temperature.vtk files in ParaView.\nSelect the \"eye\" next to Density to render the density information.\nProblem 6b\nChange the representation to Volume. The structure in this data is more complicated than our previous examples, take a moment to explore the contents of the data cube.\nProblem 6c\nTurn off the Density render, and turn on the Temperature render. Again, change the represenation to Volume. \nHow does the density data compare to the temperature data?\nProblem 6d\nChange the colorbar for the Temperature. \nIn the properties tab, under the Coloring header, select the choose presets folder (this is the folder with heart over it). \nUsing your previous knowledge of visualization, select a terrible colorbar.\nReplace the terrible colorbar with a useful colorbar.\nProblem 6e\nChange the colorbar for the Density data.\nProblem 6f\nRender both the Temperature and Density data. \nCan you see both volume renditions? If not, adjust the color schemes for each until you can.\nProblem 7) Slicing the Data Cube\nProblem 7a\nSelect the density data. Then slice the data using the tool in the top bar. When happy with the slice that you have selected, click \"apply\".\nNote - you can adjust the position of the slice by dragging the outline, and you can adjust the angle of the slice with the arrow.\nProblem 7b\nHide the density and temperature data, and examine the slice. If you are unhappy with your previous selection, adjust it.\nProblem 7c\nAdd a contour to the slice. This can be done using the contour button (multiple concentric half-spheres).\nSet the range for the contours from -2 to 2.5, and choose 6 steps for the contours. If you need to, adjust the color of the contours so they are visible relative to the background colors.\nProblem 7d\nAdd a clip to the temperature data. This can be done using the clip button (between the slice and contour buttons).\nAdjust the position of the clip to match that of the density contour.\nNote - you may need to slide the position of the clip slightly so that the contours remain visible.\nProblem 7e\nRotate the slice + clip to an angle of your choosing. Then save your rendering of the data to a png file.\nWarning - in some instances on my machine this lead to ParaView crashing, be sure you have everything set up as you would like prior to exporting to png.\nFinally, in closing, I will note that it is possible in ParaView to export your rendering to x3dom, which is a format accepted by AAS journals to include interactive 3d plots in publications.\nChallenge Problem\nConvert the data that your brought with you to the DSFP into a ParaView readable vtk format." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
gully/adrasteia
notebooks/adrasteia_03-03_cross_match.ipynb
mit
[ "Gaia\nReal data!\ngully\nSept 14, 2016\nOutline:\n\nMore exploring\n\nImport these first-- I auto import them every time!:", "#! cat /Users/gully/.ipython/profile_default/startup/start.ipy\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%config InlineBackend.figure_format = 'retina'\n%matplotlib inline\n\nimport pandas as pd\n\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord", "1. Retrieve existing catalogs\nRetrieve Data file from here:\nhttps://github.com/BrownDwarf/ApJdataFrames/blob/master/data/Luhman2012/tbl1_plusSimbad.csv", "d1 = pd.read_csv('../../ApJdataFrames/data/Luhman2012/tbl1_plusSimbad.csv') #local version\n\nd1 = d1[~d1.RA.isnull()]\n\nd1.columns\n\nc1 = SkyCoord(d1.RA.values, d1.DEC.values, unit=(u.hourangle, u.deg), frame='icrs')", "2. Read in the Gaia data", "df_list = []", "This takes a finite amount of RAM, but should be fine for modern laptops.", "for i in range(16):\n df_list.append(pd.read_csv('../data/TgasSource_000-000-{:03d}.csv'.format(i)))\n\ntt = pd.concat(df_list, ignore_index=True)\n\nplt.figure(figsize=(10,4))\nax = sns.jointplot(tt.ra, tt.dec, kind='hex', size=8)\nax.ax_joint.plot(c1.ra.deg, c1.dec.deg, '.', alpha=0.5)\n\ncg = SkyCoord(tt.ra.values, tt.dec.values, unit=(u.deg, u.deg), frame='icrs')", "Match", "idx, d2d, blah = c1.match_to_catalog_sky(cg)\n\nvec_units = d2d.to(u.arcsecond)\nvec = vec_units.value\n\nbins = np.arange(0, 4, 0.2)\nsns.distplot(vec, bins=bins, kde=False),\nplt.xlim(0,4)\nplt.xlabel('match separation (arcsec)')", "Forced to match to nearest neighbor", "len(set(idx)), idx.shape[0]", "... yielding some redundancies in cross matching", "tt_sub = tt.iloc[idx]\ntt_sub = tt_sub.reset_index()\ntt_sub = tt_sub.drop('index', axis=1)\n\nd1 = d1.reset_index()\nd1 = d1.drop('index', axis=1)\n\nx1 = pd.concat([d1, tt_sub], axis=1)\n\nx1.shape\n\ncol_order = d1.columns.values.tolist() + tt_sub.columns.values.tolist()\nx1 = x1[col_order]\nx0 = x1.copy()\n\nx0['xmatch_sep_as'] = vec\n\nx0['Gaia_match'] = vec < 2.0 #Fairly liberal, 1.0 might be better.\n\nplt.figure(figsize=(8,4))\nbins = np.arange(2, 14, 0.2)\nsns.distplot(x0.parallax[x0.Gaia_match], bins=bins)\n#sns.distplot(1.0/(x0.parallax[x0.Gaia_match]/1000.0))\nplt.xlabel('Parallax (mas)')\nplt.savefig('../results/luhman_mamajek2012.png', dpi=300)\n\nx0.Gaia_match.sum(), len(d1)", "112 out of 862 have Gaia parallaxes... that seems high for some reason?", "plt.figure(figsize=(10,4))\nax = sns.jointplot(tt.ra, tt.dec, kind='hex', size=8, xlim=(230,255), ylim=(-40,-10))\nax.ax_joint.plot(c1.ra.deg, c1.dec.deg, '.', alpha=0.5)\nax.ax_joint.scatter(x0.ra[x0.Gaia_match], x0.dec[x0.Gaia_match], \n s=x0.parallax[x0.Gaia_match]**3*0.2, c='r',alpha=0.5)", "The end." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ElMejorEquipoDeLaSerena/VariableStarsClassification
Final.ipynb
mit
[ "Classifying Variable Stars\nDerive a set of features for a set of light curves of variable stars of known class. Train Machine Learning (ML) algorithms on a sample of this data set and then apply the algorithms to a set of light curves of unknown class to predict what type of variable star they are. Validate classified data.\nSecond La Serena School for Data Science 2014\nWilfred Tyler Gee\nAracelly Herrera\nCarolina Núñez\nDavid Valenzuela", "import os.path\nimport sys\nimport urllib\n\nimport numpy as np\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pylab as pl\n\nfrom sklearn import svm, cross_validation, metrics\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.naive_bayes import GaussianNB\n\nimport scipy.stats as stats\n\n%matplotlib inline\n\nfrom astroML.time_series import lomb_scargle\n\n# Set some default font properties\nmatplotlib.rcParams['figure.figsize'] = (10.0, 8.0)\nmatplotlib.rcParams['font.size'] = 18", "Load Data\nOur catalog data comes mostly from the Catalina Sky Survey (http://catalinadata.org).", "# Load our matched data from the catalog\ncat_data = 'catalog_data_final/matched_data.csv'\ncatalog = np.genfromtxt(cat_data, dtype=None, names=True, delimiter=',')\n\nprint(\"Columns in our catalog:\")\nfor i, name in enumerate(catalog.dtype.names):\n print(\"{:2d} {}\".format(i, name))", "Setup our Features", "def get_irq(rows):\n mags = rows['Mag']\n return np.percentile(mags, 75) - np.percentile(mags, 25)\n\ndef get_skew(rows):\n skew = stats.skew(rows['Mag']) \n return skew\n\ndef get_median(rows):\n return np.median(rows['Mag'])\n\ndef build_features(save_file='features.txt', limit=False):\n \"\"\"\n Builds our feature array. Loops over entire loaded catalog\n and creates one row per object. Current features include:\n \n * Period (from catalog)\n * Magnitude\n * Median\n * Amplitude\n * Skew\n * IQR\n \n If you want a limited set (for faster building), pass\n limit=True\n \"\"\"\n\n obj_id_array = np.unique(catalog['ID'])\n\n features = np.zeros( (len(obj_id_array), 11) )\n\n # For each object\n for i in range(len(obj_id_array)):\n obj_id = obj_id_array[i]\n \n rows = catalog[(catalog['ID'] == obj_id)]\n \n if limit & i > 500:\n break\n \n features[i,0] = obj_id # Object ID\n features[i,1] = rows['Var_Type'][0] # Class\n features[i,2] = 0. # Predicted Class \n features[i,3] = rows['RA'][0] # RA_J2000\n features[i,4] = rows['Decl'][0] # Dec\n features[i,5] = rows['Period_days'][0] # Period\n features[i,6] = rows['Mag'][0] # Mag\n features[i,7] = get_median(rows) # Median\n features[i,8] = rows['Amplitude'][0] # Amplitude\n features[i,9] = get_skew(rows) # Skew\n features[i,10] = get_irq(rows) # IQR\n \n # Save the files\n np.savetxt('features.txt', features, delimiter=',', \n header=\"Object ID, Class, RAJ2000, Dec, Period, Mag, Amplitude, Median, Skew, IQR\")\n \n return features\n\n# Get our features. Either build them from scratch or load from file\n# If we build them from scratch, we also pass in the file name to save.\n\nfeatures = None\n\nfeatures_file = 'features.txt'\nif os.path.isfile(features_file):\n features = np.loadtxt(features_file, dtype=None, delimiter=\",\")\nelse:\n features = build_features(save_file=features_file)\n \n# Display a row the features\nfeatures[0]", "Separate Training and Test Data\nWe use a poor-man's version of the holdout method to split the initial set of data into training and test. During the training below, we use various methods to further split the training data into a training and validating set. Once we have trained and validated the data, we perform a prediction on the unlabeled test data.", "# Start to build the table we will use\n\n# For Training data, we want to get a certain amount of our\n# table, we slice off a percentage\nrow_start = 0\nrow_end = len(features) - int(.25 * len(features))\n\n# Our features table also contains meta information, so we slice\nfeature_column_start = 5\n\n# Our collections of data to train and test\nlabeled_svm = features[row_start:row_end,feature_column_start:]\nunlabeled_svm = features[row_end:,feature_column_start:]\ntest_all = features[row_end:]\n\n# The correct classes corresponding to the training and test\nclasses = features[row_start:row_end,1]\nunlabeled_classes = features[row_end:,1]\n\n#labeled_svm, classes, unlabeled_svm, unlabeled_classes = cross_validation.train_test_split(\n# features[:,feature_column_start], features[:,1], test_size=1./3.)", "Display the number of each class that we have in the training and test data", "types = { 1: 'EW', 2: 'EA', 3: 'beta Lyrae'}\n\n# How many of each Variable Type do we have\nprint(\"Type\\t\\t\\tType\\t\\tNumber\".format())\nprint(\"{}\".format('-'*45))\nfor x in range(1,4):\n print(\"Labeled Class size:\\t {:14s} {}\".format(types[x],len(classes[classes == x])))\n print(\"Unlabelel Class size:\\t {:14s} {}\\n\".format(types[x],len(unlabeled_classes[unlabeled_classes == x])))", "Figure out what training size is optimal\nHere we use a set Test size but are trying to determine the optimal size for our training set. We loop over a number of sizes and determine the accuracy from each.\nHere we do an initial split on the data and then try to predict with our classifier. Because of the simple split done with train_test_split we do not expect to achieve a high accuracy.", "# Create our (linear) classifier\nclf = svm.LinearSVC()\nclf.fit(labeled_svm, classes)\n\nX_train, X_test, y_train, y_test = cross_validation.train_test_split (labeled_svm, classes, test_size=1./3.)\n\nprint(\"training set = {} {}\".format(X_train.shape, y_train.shape))\nprint(\"test size = {} {}\".format(X_test.shape, y_test.shape))\n\nclf.fit(X_train, y_train)\npred_class = clf.predict(X_test)\nN_match = (pred_class == y_test).sum()\nprint(\"N_match = {}\".format(N_match))\nacc = 1. * N_match / len(pred_class)\nprint(\"Accuracy = {}\".format(acc))", "Here we use a slightly more intelligent split of the labeled data (with StratifiedShuffleSplit) to attempt to achieve a higher accuracy", "ss = cross_validation.StratifiedShuffleSplit(classes, 5, test_size = 1./3.)\nscores = cross_validation.cross_val_score(clf, labeled_svm, classes, cv=ss)\nprint(\"Accuracy = {} +- {}\".format(scores.mean(),scores.std()))", "See if there is a better training size that we can use", "step = 100\nstop = int(len(labeled_svm) * (2./3.))\nNs = np.arange(500, stop , step)\n\n#print \"Attempted N sizes = {}\".format(Ns)\nscores = np.zeros(len(Ns))\nstds = np.zeros(len(Ns))\nfor i in range(len(Ns)):\n N = Ns[i]\n ss = cross_validation.StratifiedShuffleSplit(classes, 5, test_size = 1./3., train_size = N)\n scores_i = cross_validation.cross_val_score(clf, labeled_svm, classes, cv=ss)\n scores[i] = scores_i.mean()\n stds[i] = scores_i.std()\n\n \n# Get our optimal n\noptimal_n = Ns[np.argmax(scores)]\n \n# Plot our results, including optimal n\npl.clf()\nfig = pl.figure()\nax = fig.add_subplot(1,1,1)\nax.errorbar (Ns, scores, yerr = stds)\nax.set_xlabel(\"N\")\nax.set_ylabel(\"Accuracy\")\npl.title(\"Optimal: $n={}$\".format(optimal_n)) \npl.show()", "Figure out the optimal C\nAs an exercise, we compute the optimal $C$ scaling parameter for the equation below. We are still using the linear SVC so this is mostly an example of how one would compute this parameter. A graph of the possible $C$ values are displayed as well as the best estimator. \n$\\min_{\\bf{w}, \\xi, b}\\left{\\frac{1}{2}||\\bf{w}||^2 + C\\sum_{i=1}^n\\xi_i\\right}$\nsubject to $y_i(\\bf{w\\cdot x}_i-b)\\geq 1-\\xi_i$, $\\xi_i\\geq 0$", "C_range = 10. ** np.arange(-5, 5)\nparam_grid = dict(C=C_range)\n\n# We use a test_size of None because we already figured out our optimal train_size\nss = cross_validation.StratifiedShuffleSplit(classes, 5, test_size = None, train_size = optimal_n)\n\ngrid = GridSearchCV(svm.LinearSVC(), param_grid=param_grid, cv=ss)\ngrid.fit (labeled_svm, classes)\n\n# plot the scores of the grid grid_scores_ contains parameter settings and scores\n# grid_scores_ contains parameter settings and scores \nscore_dict = grid.grid_scores_\n# We extract just the scores\nscores = [x[1] for x in score_dict]\n\npl.clf()\nfig = pl.figure()\nax = fig.add_subplot(1,1,1)\nax.plot (C_range, scores)\nax.set_xscale(\"log\")\nax.set_xlabel(\"$C$\")\nax.set_ylabel(\"Accuracy\")\nax.set_title(\"Optimal: $C={}$\".format(grid.best_estimator_.C))\npl.show ()\n\nprint \"The best classifier is: {}\".format(grid.best_estimator_)", "Support Vector Classification (SVC)\nWe use the sklean.svm.svc module with a few different kernels to attempt some fits.\nThe equation governing this classification is:\n$K(\\bf{x}, \\bf{x}') = e^{-\\gamma||\\bf{x}-\\bf{x}'||^2}$\nThrefore we need to obatin both $C$ and $\\gamma$, so we test a range of values and create a heat-map to show optimal.\nDetermine Best Kernel Classifier\nRun through the different kernels for an SVC to determine the best. Possible kernels are:\n\nlinear\npoly \nrbf\nsigmoid\nprecomputed\n\nWe perform an exhaustive grid search across all the kernels with a range of different values. We compute these separately so that we can then compare each with an ROC. The grid search would handle this internally if we ran them all together but we want to generate an ROC plot so we do it separate.\nGet the optimal SVC\nHere we define the parameters for each of the kernel types and then run them through an exhaustive grid search. This is similar to the examples above where we ran through a list of $C$ and $\\gamma$ values but here the GridSearchCV takes care of all the details. This gets us our optimal classifier based on this exhaustive search.\nOne area of uncertainty is the selection of our ranges below. We lifted the $C$ and $\\gamma$ ranges from Guillermo's Hands-On (todo: get reference) and just copied that over for the coefficient range. We totall guessed at the degree range and would need to explore that more.", "def get_optimal_svc(param_grid=list(), plot=False):\n \n # Do the grid search\n grid = GridSearchCV(svm.SVC(), param_grid=param_grid, cv=ss)\n grid.fit (labeled_svm, classes)\n \n if plot:\n score_dict = grid.grid_scores_\n \n # We extract the scores\n scores = [x[1] for x in score_dict]\n scores = np.array(scores).reshape(len(C_range), len(gamma_range))\n\n # Make a nice figure\n pl.figure(figsize=(8, 6))\n pl.subplots_adjust(left=0.15, right=0.95, bottom=0.15, top=0.95)\n pl.imshow(scores, interpolation='nearest', cmap=pl.cm.gist_heat)\n pl.xlabel('$\\gamma$')\n pl.ylabel('$C$')\n pl.title('Heatmap for optimal: $C={}$ and $\\gamma={}$\\n'.format(grid.best_estimator_.C, grid.best_estimator_.gamma))\n pl.colorbar()\n pl.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)\n pl.yticks(np.arange(len(C_range)), C_range)\n pl.show()\n\n print \"The best classifier for this run is: {}\".format(grid.best_estimator_)\n return grid.best_estimator_\n\n# Setup our ranges\nC_range = 10. ** np.arange(-4, 4)\ngamma_range = 10. ** np.arange(-4, 4)\ndeg_range = np.arange(1, 3)\ncoef_range = 10. ** np.arange(-4, 4)\n\nparam_grids = [\n #{ 'C': C_range, 'kernel': ['linear'] },\n #{ 'C': C_range, 'gamma': gamma_range, 'degree': deg_range, 'coef0': coef_range, 'kernel': ['poly'] },\n { 'C': C_range, 'gamma': gamma_range, 'kernel': ['rbf'] },\n #{ 'C': C_range, 'gamma': gamma_range, 'coef0': coef_range, 'kernel': ['sigmoid'] },\n \n { 'kernel': ['linear'] },\n { 'kernel': ['poly'] },\n { 'kernel': ['sigmoid'] },\n]\n\nbest_svcs = list()\n\n# Run the param_grid one at a time (to compare ROC below)\n# Get the best clf for each kernel type\nfor param_grid in param_grids:\n print \"{}...\".format(param_grid.get('kernel'))\n clf = get_optimal_svc(param_grid=param_grid, plot=False)\n \n param_grid['clf'] = clf\n \n best_svcs.append(param_grid)", "Classify\nWe have obtained our best classifiers above, so we classify the data with each. We will then compare how these classifiers worked with a ROC.\nFirst we defined our classify and compute_roc functions and then use them below.", "def classify(clf=None, save_output=False):\n\n # Use the best estimator from above and fit with our trained data and classes\n clf.fit (labeled_svm, classes)\n\n # Attempt a prediction\n predicted_classes = clf.predict(unlabeled_svm)\n\n if save_output:\n # Creating an array to hold our new predicted values\n out = np.zeros ((len(pred_class), unlabeled_svm.shape[1] + 1))\n out[:,:unlabeled_svm.shape[1]] = unlabeled_svm[:][:]\n out[:, -1] = predicted_classes [:]\n\n classifications_filename = \"classifications_predicted_{}.csv\".format(clf.kernel)\n \n # Save our new classifications out to a file\n np.savetxt(classifications_filename, out, delimiter=\",\")\n \n return predicted_classes\n\ndef compute_roc(predicted_classes=None):\n # Get our mask array of true values\n predicted_correct_mask = np.array(unlabeled_classes == predicted_classes)\n\n # Get the scores from the classifier\n y_score = clf.decision_function(unlabeled_svm)\n\n # Perform the ROC to get false-positives and true-positives\n fpr, tpr, thresh = metrics.roc_curve(predicted_correct_mask,y_score[:,0])\n roc_auc = metrics.auc(fpr,tpr)\n \n return fpr, tpr, roc_auc\n\n# Loop over each SVC type, classify, then compute the ROC\nfor kernel in best_svcs:\n clf = kernel.get('clf')\n\n print \"{} \".format(clf.kernel)\n\n # Do the actual classifying\n print \"\\t Classifying...\"\n pred_class = classify(clf)\n \n # Compute the ROC\n print \"\\t Computer ROC...\"\n fpr, tpr, auc = compute_roc(pred_class)\n \n # Store the predicted classes and the ROC params\n kernel['predicted_classes'] = pred_class\n kernel['fpr'] = fpr\n kernel['tpr'] = tpr\n kernel['auc'] = auc ", "ROC Curve\nHere we determine which classifier was the \"best\" via an ROC Curve.\nNOTE: We could have just performed the exhaustive grid search above and it would have automatically compared all of these and output the absolute best, but because we wanted to generate an ROC we performed them separately.", "def plot_roc(classifier_list=None):\n # Plot the ROC for each kernel type\n pl.clf()\n fig = pl.figure()\n ax = fig.add_subplot(1,1,1)\n\n # Add a line for each kernel\n for classifier in classifier_list:\n fpr = classifier['fpr']\n tpr = classifier['tpr']\n auc = classifier['auc']\n ax.plot (fpr, tpr, label='{} (area = {:0.2f})'.format(classifier.get('kernel')[0], auc))\n\n plt.legend(loc=4)\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver Operating Characteristic Curve')\n pl.show ()\n\nplot_roc(classifier_list=best_svcs)", "After looking at the ROC curve above, we select the best estimator based on hightest area under the curve.", "aucs = np.array([x.get('auc') for x in best_svcs])\n\nbest_estimator = best_svcs[np.where(aucs.max())[0][0]]\n\npredicted_classes = best_estimator.get('predicted_classes')\n\n# Put the predicted class into the original data\ntest_all[:,2] = predicted_classes\n\n# Save predicted features\nnp.savetxt('predicted_features.csv', test_all, delimiter=',')", "Analysis\nShow some stats on the estimator with the best fit", "test_all = np.loadtxt('predicted_features.csv', dtype=None, delimiter=\",\")\n\n# Pull the predicated class out\npred_class = test_all[:,2]\nunlabeled_classes = test_all[:,1]\n\ndiff_index = np.where(pred_class != unlabeled_classes)\n\nlength_of_diff = len(diff_index[0])\nlength_of_unlabeled = len(unlabeled_classes)\n\ndiff_percentage = 1 - (float(length_of_diff)/float(length_of_unlabeled))\n\nprint \"{:.2%}\".format(diff_percentage)\n\nfeatures_dict = {\n'ID': 0,\n'Class': 1,\n'Predicted Class': 2,\n'RA_J2000': 3,\n'Dec': 4,\n'Period': 5,\n'Mag': 6,\n'Median': 7,\n'Amplitude': 8,\n'Skew': 9,\n'IQR': 10,\n}\ntest_all[0]\n\n# Get all misclassified objects index\ndiff_index = np.where(test_all[:,1] != test_all[:,2])\n\ndef plot_lightcurve(rows, predicted_class):\n Mag = rows[\"Mag\"]\n MJD = rows[\"MJD\"]\n T = rows[\"Period_days\"][0]\n time = rows[\"epoch_folding\"]\n cls = rows[\"Var_Type\"][0]\n \n index=sorted(range(len(time)), key=lambda k: time[k])\n Mag_new=Mag[index]\n\n double_mag = np.array(list(Mag_new)*2)\n x = [float(x)/len(Mag_new)for x in np.arange(0,len(double_mag))]\n\n plt.figure()\n plt.scatter(x, double_mag, alpha=0.25)\n\n plt.ylim(double_mag.max() + 0.5, double_mag.min() - 0.5)\n plt.xlim(0.0,2.0)\n \n plt.xlabel(\"Phase\")\n plt.ylabel(\"Magnitue\")\n \n plt.title(\"Class: {} Predicted Class: {}\".format(int(cls), int(predicted_class)))\n\n# Generate a light curve for a mis-classified object\n\nclass_table = np.zeros((3,3))\n\nfor i in range(len(diff_index[0])):\n # Get an object that was misclassified\n obj_idx = diff_index[0][i]\n obj = test_all[obj_idx]\n obj_id = obj[0]\n \n predicted_class = test_all[obj_idx,2]\n actual_class = test_all[obj_idx,1]\n \n row = int(actual_class) - 1\n col = int(predicted_class) - 1\n \n class_table[row][col] += 1\n \n # Get all the observations\n #obj_rows = catalog[np.where(catalog['ID'] == obj_id)]\n\n\n #plot_lightcurve(obj_rows, predicted_class)\n\nprint class_table\n\ntypes = { 1: 'EW', 2: 'EA', 3: 'beta Lyrae'}\n\n# How many of each Variable Type do we have\nprint \"Type\\t\\tTotal Number\\tNumber Misclassified\\tMisclassified Percent\".format()\nprint \"{}\".format('-'*77)\nfor x in range(1,4):\n\n total_num = len(unlabeled_classes[unlabeled_classes == x])\n total_miss = len(unlabeled_classes[unlabeled_classes == x]) # FIXME\n percent = float(class_table[x-1].sum()) / total_num\n \n print \"{:14s}\\t{:12d}\\t{:20d}\\t{:21.2%}\".format(\n types[x],total_num, total_miss, percent)", "If anything, we would expect the type 3 class (beta Lyrae) to be classified as a type 1 so it is interesing to see that they split evenly. However, we have such low numbers that it's probably not worth further analysis at this point.\nSample plots\nPlot an example of the different types (1 and 2) for three features. Incorrectly classified types are highlighted", "# Get our indices for the correctly and incorrectly classified types\n\ncorrect_idx = test_all[:,1] == test_all[:,2]\nincorrect_idx = test_all[:,1] != test_all[:,2]\n\n# Separate the correctly classified types\ntype_1_correct = test_all[test_all[correct_idx,1] == 1]\ntype_2_correct = test_all[test_all[correct_idx,1] == 2]\ntype_3_correct = test_all[test_all[correct_idx,1] == 3]\n\n# Separate the incorrectly classified types\ntype_1_incorrect = test_all[test_all[incorrect_idx,1] == 1]\ntype_2_incorrect = test_all[test_all[incorrect_idx,1] == 2]\ntype_3_incorrect = test_all[test_all[incorrect_idx,1] == 3]\n\nprint len(type_1_correct)\nprint len(type_2_correct)\nprint len(type_3_correct)\n\nprint len(type_1_incorrect)\nprint len(type_2_incorrect)\nprint len(type_3_incorrect)\n\npl.clf()\nfig = pl.figure(figsize = (10, 10))\nax = fig.gca(projection='3d')\n\nplot_list = ['Skew', 'Median', 'IQR']\n\n# Correct class type indexes\ntype_1_idx = np.where(test_all[correct_idx, 1] == 1)\ntype_2_idx = np.where(test_all[correct_idx, 1] == 2)\ntype_3_idx = np.where(test_all[correct_idx, 1] == 3)\n\n# Incorrect class type indexes\ntype_1_idx_x = np.where(test_all[incorrect_idx, 1] != 1)\ntype_2_idx_x = np.where(test_all[incorrect_idx, 1] != 2)\ntype_3_idx_x = np.where(test_all[incorrect_idx, 1] != 3)\n\n# Type 1 Correct\nparam1 = test_all[type_1_idx,features_dict.get(plot_list[0])]\nparam2 = test_all[type_1_idx,features_dict.get(plot_list[1])]\nparam3 = test_all[type_1_idx,features_dict.get(plot_list[2])]\nax.scatter(param1, param2, param3, marker='.', c='r', edgecolors='None', alpha=0.35, s=40, label=\"Type 1 Correct\")\n\n# Type 2 Correct\nparam1 = test_all[type_2_idx,features_dict.get(plot_list[0])]\nparam2 = test_all[type_2_idx,features_dict.get(plot_list[1])]\nparam3 = test_all[type_2_idx,features_dict.get(plot_list[2])]\nax.scatter(param1, param2, param3, marker='.', c='b', edgecolors='None', alpha=0.35, s=40, label=\"Type 2 Correct\")\n\n# Type 1 Not Correct\nparam1 = test_all[type_1_idx_x,features_dict.get(plot_list[0])]\nparam2 = test_all[type_1_idx_x,features_dict.get(plot_list[1])]\nparam3 = test_all[type_1_idx_x,features_dict.get(plot_list[2])]\nax.scatter(param1, param2, param3, marker='D', c='r', alpha=0.99, s=40, label=\"Type 1 Not Correct\")\n\n# Type 2 Not Correct\nparam1 = test_all[type_2_idx_x,features_dict.get(plot_list[0])]\nparam2 = test_all[type_2_idx_x,features_dict.get(plot_list[1])]\nparam3 = test_all[type_2_idx_x,features_dict.get(plot_list[2])]\nax.scatter(param1, param2, param3, marker='D', c='b', alpha=0.99, s=40, label=\"Type 2 Not Correct\")\n\n#ax.legend(loc=\"best\")\nax.set_xlabel (plot_list[0])\nax.set_ylabel (plot_list[1])\nax.set_zlabel (plot_list[2])\n\npl.show()", "HOWEVER,\nThis is not correct. When we tried to plot light curves from the sample points listed above, we realized that we were indexing incorrectly above:\n# Get incorrect indices\nincorrect_idx = test_all[:,1] != test_all[:,2]\ntype_1_idx_x = np.where(test_all[incorrect_idx, 1] == 1)\ntype_2_idx_x = np.where(test_all[incorrect_idx, 1] == 2)\ntype_3_idx_x = np.where(test_all[incorrect_idx, 1] == 3)\n\nSo, it turns out our comment is correct. :) Essentially, we are highlighting the correct number of misclassified points, but then pulling random points from our original list.\nExample Correct Light Curve", "# Type 1\nobj_idx = type_1_idx[0][0]\nobj = test_all[obj_idx]\nobj_id = obj[0]\n \npredicted_class = test_all[obj_idx,2]\n \n# Get all the observations\nobj_rows = catalog[np.where(catalog['ID'] == obj_id)]\n\nplot_lightcurve(obj_rows, predicted_class)\n\n# Type 2\nobj_idx = type_2_idx[0][0]\nobj = test_all[obj_idx]\nobj_id = obj[0]\n \npredicted_class = test_all[obj_idx,2]\n \n# Get all the observations\nobj_rows = catalog[np.where(catalog['ID'] == obj_id)]\n\nplot_lightcurve(obj_rows, predicted_class)", "TODO\n\nData\nMore data \nall types, but especially type 3 (EB - beta Lyrae)\nother catalogs\n\n\nData split\nTry random sampling, k-fold cross validation, bootstrap\nTest / Validation Split\nWe used a linear kernel to determine the optimal split size but eventually used the RBF kernel as our best classifier. Could have explored different split sizes with appropriate kernel\n\n\n\n\nData scrubbing\ne.g. How many periods do we have?\nHow many observations do we have?\n\n\n\n\nFeatures\nMore features\ncolors\n\n\nBetter feature selection\nUse the lomb-scargle for period\n\n\n\n\nClassifier\nBetter parameter selection for various kernels\nExplore different classifier models (random forest, etc)\nCompare against astroML (unless it just wraps sklearn)\n\n\nPerformance\nUse the HPC to run the kernel loop\n\n\nAnalysis\nMore and better analysis on incorrectly classified objects (outliers)\nError analysis all along the way\nConfusion matrix\n\n\nPretty plots (seaborn)\nConfusion matrix table improvement\nGrok More" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ovilab/lammps
python/examples/pylammps/interface_usage_bonds.ipynb
gpl-2.0
[ "Using LAMMPS with iPython and Jupyter\nLAMMPS can be run interactively using iPython easily. This tutorial shows how to set this up.\nInstallation\n\nDownload the latest version of LAMMPS into a folder (we will calls this $LAMMPS_DIR from now on)\n\nCompile LAMMPS as a shared library and enable exceptions and PNG support\n bash\n cd $LAMMPS_DIR/src\n make yes-molecule\n python Make.py -m mpi -png -s exceptions -a file\n make mode=shlib auto\n\n\nCreate a python virtualenv\n bash\n virtualenv testing\n source testing/bin/activate\n\n\nInside the virtualenv install the lammps package\n (testing) cd $LAMMPS_DIR/python\n (testing) python install.py\n (testing) cd # move to your working directory\n\n\nInstall jupyter and ipython in the virtualenv\n bash\n (testing) pip install ipython jupyter\n\n\nRun jupyter notebook\n bash\n (testing) jupyter notebook\n\n\nExample", "from lammps import IPyLammps\n\nL = IPyLammps()\n\n# 2d circle of particles inside a box with LJ walls\nimport math\n\nb = 0\nx = 50\ny = 20\nd = 20\n\n# careful not to slam into wall too hard\n\nv = 0.3\nw = 0.08\n \nL.units(\"lj\")\nL.dimension(2)\nL.atom_style(\"bond\")\nL.boundary(\"f f p\")\n\nL.lattice(\"hex\", 0.85)\nL.region(\"box\", \"block\", 0, x, 0, y, -0.5, 0.5)\nL.create_box(1, \"box\", \"bond/types\", 1, \"extra/bond/per/atom\", 6)\nL.region(\"circle\", \"sphere\", d/2.0+1.0, d/2.0/math.sqrt(3.0)+1, 0.0, d/2.0)\nL.create_atoms(1, \"region\", \"circle\")\nL.mass(1, 1.0)\n\nL.velocity(\"all create 0.5 87287 loop geom\")\nL.velocity(\"all set\", v, w, 0, \"sum yes\")\n\nL.pair_style(\"lj/cut\", 2.5)\nL.pair_coeff(1, 1, 10.0, 1.0, 2.5)\n\nL.bond_style(\"harmonic\")\nL.bond_coeff(1, 10.0, 1.2)\n\nL.create_bonds(\"all\", \"all\", 1, 1.0, 1.5)\n\nL.neighbor(0.3, \"bin\")\nL.neigh_modify(\"delay\", 0, \"every\", 1, \"check yes\")\n\nL.fix(1, \"all\", \"nve\")\n\nL.fix(2, \"all wall/lj93 xlo 0.0 1 1 2.5 xhi\", x, \"1 1 2.5\")\nL.fix(3, \"all wall/lj93 ylo 0.0 1 1 2.5 yhi\", y, \"1 1 2.5\")\n\nL.image(zoom=1.8)\n\nL.thermo_style(\"custom step temp epair press\")\nL.thermo(100)\noutput = L.run(40000)\nL.image(zoom=1.8)", "Queries about LAMMPS simulation", "L.system\n\nL.system.natoms\n\nL.system.nbonds\n\nL.system.nbondtypes\n\nL.communication\n\nL.fixes\n\nL.computes\n\nL.dumps\n\nL.groups", "Working with LAMMPS Variables", "L.variable(\"a index 2\")\n\nL.variables\n\nL.variable(\"t equal temp\")\n\nL.variables\n\nimport sys\n\nif sys.version_info < (3, 0):\n # In Python 2 'print' is a restricted keyword, which is why you have to use the lmp_print function instead.\n x = float(L.lmp_print('\"${a}\"'))\nelse:\n # In Python 3 the print function can be redefined.\n # x = float(L.print('\"${a}\"')\")\n \n # To avoid a syntax error in Python 2 executions of this notebook, this line is packed into an eval statement\n x = float(eval(\"L.print('\\\"${a}\\\"')\"))\nx\n\nL.variables['t'].value\n\nL.eval(\"v_t/2.0\")\n\nL.variable(\"b index a b c\")\n\nL.variables['b'].value\n\nL.eval(\"v_b\")\n\nL.variables['b'].definition\n\nL.variable(\"i loop 10\")\n\nL.variables['i'].value\n\nL.next(\"i\")\nL.variables['i'].value\n\nL.eval(\"ke\")", "Accessing Atom data", "L.atoms[0]\n\n[x for x in dir(L.atoms[0]) if not x.startswith('__')]\n\nL.atoms[0].position\n\nL.atoms[0].id\n\nL.atoms[0].velocity\n\nL.atoms[0].force\n\nL.atoms[0].type" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jdmcbr/geopandas
doc/source/docs/user_guide/interactive_mapping.ipynb
bsd-3-clause
[ "Interactive mapping\nAlongside static plots, geopandas can create interactive maps based on the folium library.\nCreating maps for interactive exploration mirrors the API of static plots in an explore() method of a GeoSeries or GeoDataFrame.\nLoading some example data:", "import geopandas\n\nnybb = geopandas.read_file(geopandas.datasets.get_path('nybb'))\nworld = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))\ncities = geopandas.read_file(geopandas.datasets.get_path('naturalearth_cities'))", "The simplest option is to use GeoDataFrame.explore():", "nybb.explore()", "Interactive plotting offers largely the same customisation as static one plus some features on top of that. Check the code below which plots a customised choropleth map. You can use \"BoroName\" column with NY boroughs names as an input of the choropleth, show (only) its name in the tooltip on hover but show all values on click. You can also pass custom background tiles (either a name supported by folium, a name recognized by xyzservices.providers.query_name(), XYZ URL or xyzservices.TileProvider object), specify colormap (all supported by matplotlib) and specify black outline.", "nybb.explore( \n column=\"BoroName\", # make choropleth based on \"BoroName\" column\n tooltip=\"BoroName\", # show \"BoroName\" value in tooltip (on hover)\n popup=True, # show all values in popup (on click)\n tiles=\"CartoDB positron\", # use \"CartoDB positron\" tiles\n cmap=\"Set1\", # use \"Set1\" matplotlib colormap\n style_kwds=dict(color=\"black\") # use black outline\n )", "The explore() method returns a folium.Map object, which can also be passed directly (as you do with ax in plot()). You can then use folium functionality directly on the resulting map. In the example below, you can plot two GeoDataFrames on the same map and add layer control using folium. You can also add additional tiles allowing you to change the background directly in the map.", "import folium\n\nm = world.explore(\n column=\"pop_est\", # make choropleth based on \"BoroName\" column\n scheme=\"naturalbreaks\", # use mapclassify's natural breaks scheme\n legend=True, # show legend\n k=10, # use 10 bins\n legend_kwds=dict(colorbar=False), # do not use colorbar\n name=\"countries\" # name of the layer in the map\n)\n\ncities.explore(\n m=m, # pass the map object\n color=\"red\", # use red color on all points\n marker_kwds=dict(radius=10, fill=True), # make marker radius 10px with fill\n tooltip=\"name\", # show \"name\" column in the tooltip\n tooltip_kwds=dict(labels=False), # do not show column label in the tooltip\n name=\"cities\" # name of the layer in the map\n)\n\nfolium.TileLayer('Stamen Toner', control=True).add_to(m) # use folium to add alternative tiles\nfolium.LayerControl().add_to(m) # use folium to add layer control\n\nm # show map" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
spacy-io/thinc
examples/05_visualizing_models.ipynb
mit
[ "Visualizing Thinc models (with shape inference)\nThis is a simple notebook showing how you can easily visualize your Thinc models and their inputs and outputs using Graphviz and pydot. If you're installing pydot via the notebook, make sure to restart your kernel (or Google Colab VM – see here for details) after installation.", "!pip install \"thinc>=8.0.0a0\" pydot graphviz svgwrite", "Let's start by defining a model with a number of layers chained together using the chain combinator:", "from thinc.api import chain, expand_window, Relu, Maxout, Linear, Softmax\n\nn_hidden = 32\ndropout = 0.2\n\nmodel= chain(\n expand_window(3),\n Relu(nO=n_hidden, dropout=dropout, normalize=True),\n Maxout(nO=n_hidden * 4),\n Linear(nO=n_hidden * 2),\n Relu(nO=n_hidden, dropout=dropout, normalize=True),\n Linear(nO=n_hidden),\n Relu(nO=n_hidden, dropout=dropout),\n Softmax(),\n)", "Here's the visualization we want to achieve for this model: the name of the layer or combination of layers and the input and output dimensions. Note that &gt;&gt; refers to a chaining of layers.\n\nThis means we need to add a node for each layer, edges connecting the nodes to the previous node (except for the first/last), and labels like \"name|(nO,nI)\" – for instance, \"maxout|(128,32)\". Here's a simple function that takes a Thinc layer (i.e. a Model instance) and returns a label with the layer name and its dimensions, if available:", "def get_label(layer):\n layer_name = layer.name\n nO = layer.get_dim(\"nO\") if layer.has_dim(\"nO\") else \"?\"\n nI = layer.get_dim(\"nI\") if layer.has_dim(\"nI\") else \"?\"\n return f\"{layer.name}|({nO}, {nI})\".replace(\">\", \"&gt;\")", "We can now use pydot to create a visualization for a given model. You can customize the direction of the notes by setting \"rankdir\" (e.g. \"TB\" for \"top to bottom\") and adjust the font and arrow styling. To make the visualization render nicely in a notebook, we can call into IPython's utilities.", "import pydot\nfrom IPython.display import SVG, display\n\ndef visualize_model(model):\n dot = pydot.Dot()\n dot.set(\"rankdir\", \"LR\")\n dot.set_node_defaults(shape=\"record\", fontname=\"arial\", fontsize=\"10\")\n dot.set_edge_defaults(arrowsize=\"0.7\")\n nodes = {}\n for i, layer in enumerate(model.layers):\n label = get_label(layer)\n node = pydot.Node(layer.id, label=label)\n dot.add_node(node)\n nodes[layer.id] = node\n if i == 0:\n continue\n from_node = nodes[model.layers[i - 1].id]\n to_node = nodes[layer.id]\n if not dot.get_edge(from_node, to_node):\n dot.add_edge(pydot.Edge(from_node, to_node))\n display(SVG(dot.create_svg()))", "Calling visualize_model on the model defined above will render the visualization. However, most dimensions will now show up as (?, ?), instead of the actual dimensions as shown in the graph above. That's because Thinc allows defining models with missing shapes and is able to infer the missing shapes from the data when you call model.initialize. The model visualized here doesn't define all its shapes, so the labels are incomplete.", "visualize_model(model)", "To fill in the missing shapes, we can call model.initialize with examples of the expected input X and output Y. Running visualize_model again now shows the complete shapes.", "import numpy\n\nX = numpy.zeros((5, 784), dtype=\"f\")\nY = numpy.zeros((54000, 10), dtype=\"f\")\nmodel.initialize(X=X, Y=Y)\n\nvisualize_model(model)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
googledatalab/notebooks
intro/Introduction to Python.ipynb
apache-2.0
[ "Introduction to Python\nThis notebook is primarily focused on introducing the specifics of using Python in an interactive environment such as Datalab. It is not intended to provide a complete tutorial to Python as a language. If you're completely new to Python, no problem! Python is quite straightforward, and there are lots of resources. The interactive step-by-step material at Codecademy might be of interest.\nTo get started, below is a code cell that contains a Python statement. You can run it by pressing Shift+Enter or clicking the the Run toolbar button with the cell selected.", "print(\"Hello World\")", "You can edit the cell above and re-execute it to iterate over it. You can also add additional code cells to enter new blocks of code.", "import sys\n\nnumber = 10\n\ndef square(n):\n return n * n", "The cell above created a variable named number and a function named square, and placed them into the global namespace. It also imported the sys module into the same namespace. This global namespace is shared across all the cells in the notebook.\nAs a result, the following cell should be able to access (as well as modify) them.", "print('The number is currently %d' % number)\nnumber = 11\nsys.stderr.write('And now it is %d' % number)\n\nsquare(number)", "By now, you've probably noticed a few interesting things about code cells:\n\n\nUpon execution, their results are shown inline in the notebook, after the code that produced the results. These results are included into the saved notebook. Results include outputs of print statements (text that might have been written out to stdout as well as stderr) and the final result of the cell.\n\n\nSome code cells do not have any visible output.\n\n\nCode cells have a distinguishing border on the left. This border is a washed out gray color when the notebook is first loaded, indicating that a cell has not been run yet; the border changes to a filled blue border after the cell runs.\n\n\nGetting Help\nPython APIs are usually accompanied by documentation. You can use ? to invoke help on a class or a method. For example, execute the cells below:", "str?\n\ng = globals()\ng.get?", "When run, these cells produce docstring content that is displayed in the help pane within the sidebar.\nThe code cells also provide auto-suggest. For example, press Tab after the '.' to see a list of members callable on the g variable that was just declared.", "# Intentionally incomplete for purposes of auto-suggest demo, rather than running unmodified.\ng.", "Function signature help is also available. For example, press Tab in the empty parentheses below.", "str()", "Note that help in Python relies on the interpreter being able to resolve the type of the expression that you are invoking help on.\nIf you have not yet executed code, you may be able to invoke help directly on the class or method you're interested in, rather than the variable itself. Try this.", "import datetime\n\ndatetime.datetime?", "Python Libraries\nDatalab includes the standard Python library and a set of libraries that you can easily import. Most of the libraries were installed using pip, the Python package manager, or pip3 for Python 3.", "%%bash\npip list --format=columns", "If you have suggestions for additional packages to include, please submit feedback proposing the inclusion of the packages in a future version.\nInstalling a Python Library\nYou can use pip to install your own Python 2 libraries, or pip3 to install Python 3 libraries.\nKeep in mind that this will install the library within the virtual machine instance being used for Datalab, and the library will become available to all notebooks and all users sharing the same instance.\nThe library installation is temporary. If the virtual machine instance is recreated, you will need to reinstall the library.\nThe example, below, installs scrapy, a library that helps in scraping web content.", "%%bash\napt-get update -y\napt-get install -y -q python-dev python-pip libxml2-dev libxslt1-dev zlib1g-dev libffi-dev libssl-dev\npip install -q scrapy", "Inspecting the Python evironment by running pip list, we should now see that Scrapy is installed and ready to use.", "%%bash\npip list --format=columns" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
babraham123/script-runner
notebooks/python_tutorial.ipynb
mit
[ "{\n \"nb_display_name\": \"Intro to Python\",\n \"nb_description\": \"A quick tutorial on the basics of Python\",\n \"nb_filename\": \"python_tutorial.ipynb\",\n \"params\":[\n {\n \"name\":\"testnum\",\n \"display_name\":\"Test num\",\n \"description\":\"\",\n \"input_type\":\"integer\"\n },\n {\n \"name\":\"teststr\",\n \"display_name\":\"Test str\",\n \"description\":\"\",\n \"input_type\":\"string\"\n }\n ]\n}", "All the IPython Notebooks in this lecture series are available at https://github.com/rajathkumarmp/Python-Lectures\nThe Zen Of Python", "import this", "Variables\nA name that is used to denote something or a value is called a variable. In python, variables can be declared and values can be assigned to it as follows,", "x = 2\ny = 5\nxy = 'Hey'\n\nprint x+y, xy", "Multiple variables can be assigned with the same value.", "x = y = 1\n\nprint x,y", "Operators\nArithmetic Operators\n| Symbol | Task Performed |\n|----|---|\n| + | Addition |\n| - | Subtraction |\n| / | division |\n| % | mod |\n| * | multiplication |\n| // | floor division |\n| ** | to the power of |", "1+2\n\n2-1\n\n1*2\n\n1/2", "0? This is because both the numerator and denominator are integers but the result is a float value hence an integer value is returned. By changing either the numerator or the denominator to float, correct answer can be obtained.", "1/2.0\n\n15%10", "Floor division is nothing but converting the result so obtained to the nearest integer.", "2.8//2.0", "Relational Operators\n| Symbol | Task Performed |\n|----|---|\n| == | True, if it is equal |\n| != | True, if not equal to |\n| < | less than |\n| > | greater than |\n| <= | less than or equal to |\n| >= | greater than or equal to |", "z = 1\n\nz == 1\n\nz > 1", "Bitwise Operators\n| Symbol | Task Performed |\n|----|---|\n| & | Logical And |\n| l | Logical OR |\n| ^ | XOR |\n| ~ | Negate |\n| >> | Right shift |\n| << | Left shift |", "a = 2 #10\nb = 3 #11\n\nprint a & b\nprint bin(a&b)\n\n5 >> 1", "0000 0101 -> 5 \nShifting the digits by 1 to the right and zero padding\n0000 0010 -> 2", "5 << 1", "0000 0101 -> 5 \nShifting the digits by 1 to the left and zero padding\n0000 1010 -> 10\nBuilt-in Functions\nPython comes loaded with pre-built functions\nConversion from one system to another\nConversion from hexadecimal to decimal is done by adding prefix 0x to the hexadecimal value or vice versa by using built in hex( ), Octal to decimal by adding prefix 0 to the octal value or vice versa by using built in function oct( ).", "hex(170)\n\n0xAA\n\noct(8)\n\n010", "int( ) accepts two values when used for conversion, one is the value in a different number system and the other is its base. Note that input number in the different number system should be of string type.", "print int('010',8)\nprint int('0xaa',16)\nprint int('1010',2)", "int( ) can also be used to get only the integer value of a float number or can be used to convert a number which is of type string to integer format. Similarly, the function str( ) can be used to convert the integer back to string format", "print int(7.7)\nprint int('7')", "Also note that function bin( ) is used for binary and float( ) for decimal/float values. chr( ) is used for converting ASCII to its alphabet equivalent, ord( ) is used for the other way round.", "chr(98)\n\nord('b')", "Simplifying Arithmetic Operations\nround( ) function rounds the input value to a specified number of places or to the nearest integer.", "print round(5.6231) \nprint round(4.55892, 2)", "complex( ) is used to define a complex number and abs( ) outputs the absolute value of the same.", "c =complex('5+2j')\nprint abs(c)", "divmod(x,y) outputs the quotient and the remainder in a tuple(you will be learning about it in the further chapters) in the format (quotient, remainder).", "divmod(9,2)", "isinstance( ) returns True, if the first argument is an instance of that class. Multiple classes can also be checked at once.", "print isinstance(1, int)\nprint isinstance(1.0,int)\nprint isinstance(1.0,(int,float))", "cmp(x,y)\n|x ? y|Output|\n|---|---|\n| x < y | -1 |\n| x == y | 0 |\n| x > y | 1 |", "print cmp(1,2)\nprint cmp(2,1)\nprint cmp(2,2)", "pow(x,y,z) can be used to find the power $x^y$ also the mod of the resulting value with the third specified number can be found i.e. : ($x^y$ % z).", "print pow(3,3)\nprint pow(3,3,5)", "range( ) function outputs the integers of the specified range. It can also be used to generate a series by specifying the difference between the two numbers within a particular range. The elements are returned in a list (will be discussing in detail later.)", "print range(3)\nprint range(2,9)\nprint range(2,27,8)", "Accepting User Inputs\nraw_input( ) accepts input and stores it as a string. Hence, if the user inputs a integer, the code should convert the string to an integer and then proceed.", "abc = raw_input(\"Type something here and it will be stored in variable abc \\t\")\n\ntype(abc)", "input( ), this is used only for accepting only integer inputs.", "abc1 = input(\"Only integer can be stored in variable abc \\t\")\n\ntype(abc1)", "Note that type( ) returns the format or the type of a variable or a number" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
spacy-io/thinc
examples/06_predicting_like_terms.ipynb
mit
[ "Predicting Like Polynomial Terms\nRemember in Algebra how you had to combine \"like terms\" to simplify problems? \nYou'd see expressions such as 60 + 2x^3 - 6x + x^3 + 17x in which there are 5 total terms but only 4 are \"like terms\". \n2x^3 and x^3 are like, and -6x and 17x are like, while 60 doesn't have any like siblings.\nCan we teach a model to predict that there are 4 like terms in the above expression?\nLet's give it a shot using Mathy to generate math problems and thinc to build a regression model that outputs the number of like terms in each input problem.", "!pip install \"thinc>=8.0.0a0\" mathy", "Sketch a Model\nBefore we get started it can be good to have an idea of what input/output shapes we want for our model.\nWe'll convert text math problems into lists of lists of integers, so our example (X) type can be represented using thinc's Ints2d type.\nThe model will predict how many like terms there are in each sequence, so our output (Y) type can represented with the Floats2d type.\nKnowing the thinc types we want enables us to create an alias for our model, so we only have to type out the verbose generic signature once.", "from typing import List\nfrom thinc.api import Model\nfrom thinc.types import Ints2d, Floats1d\n\nModelX = Ints2d\nModelY = Floats1d\nModelT = Model[List[ModelX], ModelY]", "Encode Text Inputs\nMathy generates ascii-math problems and we have to encode them into integers that the model can process. \nTo do this we'll build a vocabulary of all the possible characters we'll see, and map each input character to its index in the list.\nFor math problems our vocabulary will include all the characters of the alphabet, numbers 0-9, and special characters like *, -, ., etc.", "from typing import List\nfrom thinc.api import Model\nfrom thinc.types import Ints2d, Floats1d\nfrom thinc.api import Ops, get_current_ops\n\nvocab = \" .+-/^*()[]-01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\ndef encode_input(text: str) -> ModelX:\n ops: Ops = get_current_ops()\n indices: List[List[int]] = []\n for c in text:\n if c not in vocab:\n raise ValueError(f\"'{c}' missing from vocabulary in text: {text}\")\n indices.append([vocab.index(c)])\n return ops.asarray2i(indices)", "Try It\nLet's try it out on some fixed data to be sure it works.", "outputs = encode_input(\"4+2\")\nassert outputs[0][0] == vocab.index(\"4\")\nassert outputs[1][0] == vocab.index(\"+\")\nassert outputs[2][0] == vocab.index(\"2\")\nprint(outputs)", "Generate Math Problems\nWe'll use Mathy to generate random polynomial problems with a variable number of like terms. The generated problems will act as training data for our model.", "from typing import List, Optional, Set\nimport random\nfrom mathy.problems import gen_simplify_multiple_terms\n\ndef generate_problems(number: int, exclude: Optional[Set[str]] = None) -> List[str]:\n if exclude is None:\n exclude = set()\n problems: List[str] = []\n while len(problems) < number:\n text, complexity = gen_simplify_multiple_terms(\n random.randint(2, 6),\n noise_probability=1.0,\n noise_terms=random.randint(2, 10),\n op=[\"+\", \"-\"],\n )\n assert text not in exclude, \"duplicate problem generated!\"\n exclude.add(text)\n problems.append(text)\n return problems", "Try It", "generate_problems(10)", "Count Like Terms\nNow that we can generate input problems, we'll need a function that can count the like terms in each one and return the value for use as a label.\nTo accomplish this we'll use a few helpers from mathy to enumerate the terms and compare them to see if they're like.", "from typing import Optional, List, Dict\nfrom mathy import MathExpression, ExpressionParser, get_terms, get_term_ex, TermEx\nfrom mathy.problems import mathy_term_string\n\nparser = ExpressionParser()\n\ndef count_like_terms(input_problem: str) -> int:\n expression: MathExpression = parser.parse(input_problem)\n term_nodes: List[MathExpression] = get_terms(expression)\n node_groups: Dict[str, List[MathExpression]] = {}\n for term_node in term_nodes:\n ex: Optional[TermEx] = get_term_ex(term_node)\n assert ex is not None, f\"invalid expression {term_node}\"\n key = mathy_term_string(variable=ex.variable, exponent=ex.exponent)\n if key == \"\":\n key = \"const\"\n if key not in node_groups:\n node_groups[key] = [term_node]\n else:\n node_groups[key].append(term_node)\n like_terms = 0\n for k, v in node_groups.items():\n if len(v) <= 1:\n continue\n like_terms += len(v)\n return like_terms", "Try It", "assert count_like_terms(\"4x - 2y + q\") == 0\nassert count_like_terms(\"x + x + z\") == 2\nassert count_like_terms(\"4x + 2x - x + 7\") == 3", "Generate Problem/Answer pairs\nNow that we can generate problems, count the number of like terms in them, and encode their text into integers, we have the pieces required to generate random problems and answers that we can train a neural network with.\nLet's write a function that will return a tuple of: the problem text, its encoded example form, and the output label.", "from typing import Tuple\nfrom thinc.api import Ops, get_current_ops\n\ndef to_example(input_problem: str) -> Tuple[str, ModelX, ModelY]:\n ops: Ops = get_current_ops()\n encoded_input = encode_input(input_problem)\n like_terms = count_like_terms(input_problem)\n return input_problem, encoded_input, ops.asarray1f([like_terms])", "Try It", "text, X, Y = to_example(\"x+2x\")\nassert text == \"x+2x\"\nassert X[0] == vocab.index(\"x\")\nassert Y[0] == 2\nprint(text, X, Y)", "Build a Model\nNow that we can generate X/Y values, let's define our model and verify that it can process a single input/output.\nFor this we'll use Thinc and the define_operators context manager to connect the pieces together using overloaded operators for chain and clone operations.", "from typing import List\nfrom thinc.model import Model\nfrom thinc.api import concatenate, chain, clone, list2ragged\nfrom thinc.api import reduce_sum, Mish, with_array, Embed, residual\n\ndef build_model(n_hidden: int, dropout: float = 0.1) -> ModelT:\n with Model.define_operators({\">>\": chain, \"|\": concatenate, \"**\": clone}):\n model = (\n # Iterate over each element in the batch\n with_array(\n # Embed the vocab indices\n Embed(n_hidden, len(vocab), column=0)\n # Activate each batch of embedding sequences separately first\n >> Mish(n_hidden, dropout=dropout)\n )\n # Convert to ragged so we can use the reduction layers\n >> list2ragged()\n # Sum the features for each batch input\n >> reduce_sum()\n # Process with a small resnet\n >> residual(Mish(n_hidden, normalize=True)) ** 4\n # Convert (batch_size, n_hidden) to (batch_size, 1)\n >> Mish(1)\n )\n return model", "Try It\nLet's pass an example through the model to make sure we have all the sizes right.", "text, X, Y = to_example(\"14x + 2y - 3x + 7x\")\nm = build_model(12)\nm.initialize([X], m.ops.asarray(Y, dtype=\"f\"))\nmY = m.predict([X])\nprint(mY.shape)\nassert mY.shape == (1, 1)", "Generate Training Datasets\nNow that we can generate examples and we have a model that can process them, let's generate random unique training and evaluation datasets.\nFor this we'll write another helper function that can generate (n) training examples and respects an exclude list to avoid letting examples from the training/test sets overlap.", "from typing import Tuple, Optional, Set, List\n\nDatasetTuple = Tuple[List[str], List[ModelX], List[ModelY]]\n\ndef generate_dataset(\n size: int,\n exclude: Optional[Set[str]] = None,\n) -> DatasetTuple:\n ops: Ops = get_current_ops()\n texts: List[str] = generate_problems(size, exclude=exclude)\n examples: List[ModelX] = []\n labels: List[ModelY] = []\n for i, text in enumerate(texts):\n text, x, y = to_example(text)\n examples.append(x)\n labels.append(y)\n\n return texts, examples, labels", "Try It\nGenerate a small dataset to be sure everything is working as expected", "texts, x, y = generate_dataset(10)\nassert len(texts) == 10\nassert len(x) == 10\nassert len(y) == 10", "Evaluate Model Performance\nWe're almost ready to train our model, we just need to write a function that will check a given trained model against a given dataset and return a 0-1 score of how accurate it was.\nWe'll use this function to print the score as training progresses and print final test predictions at the end of training.", "from typing import List\nfrom wasabi import msg\n\ndef evaluate_model(\n model: ModelT,\n *,\n print_problems: bool = False,\n texts: List[str],\n X: List[ModelX],\n Y: List[ModelY],\n):\n Yeval = model.predict(X)\n correct_count = 0\n print_n = 12\n if print_problems:\n msg.divider(f\"eval samples max({print_n})\")\n for text, y_answer, y_guess in zip(texts, Y, Yeval):\n y_guess = round(float(y_guess))\n correct = y_guess == int(y_answer)\n print_fn = msg.fail\n if correct:\n correct_count += 1\n print_fn = msg.good\n if print_problems and print_n > 0:\n print_n -= 1\n print_fn(f\"Answer[{int(y_answer[0])}] Guess[{y_guess}] Text: {text}\")\n if print_problems:\n print(f\"Model predicted {correct_count} out of {len(X)} correctly.\")\n return correct_count / len(X)\n", "Try It\nLet's try it out with an untrained model and expect to see a really sad score.", "texts, X, Y = generate_dataset(128)\nm = build_model(12)\nm.initialize(X, m.ops.asarray(Y, dtype=\"f\"))\n# Assume the model should do so poorly as to round down to 0\nassert round(evaluate_model(m, texts=texts, X=X, Y=Y)) == 0", "Train/Evaluate a Model\nThe final helper function we need is one to train and evaluate a model given two input datasets. \nThis function does a few things:\n\nCreate an Adam optimizer we can use for minimizing the model's prediction error.\nLoop over the given training dataset (epoch) number of times.\nFor each epoch, make batches of (batch_size) examples. For each batch(X), predict the number of like terms (Yh) and subtract the known answers (Y) to get the prediction error. Update the model using the optimizer with the calculated error.\nAfter each epoch, check the model performance against the evaluation dataset.\nSave the model weights for the best score out of all the training epochs.\nAfter all training is done, restore the best model and print results from the evaluation set.", "from thinc.api import Adam\nfrom wasabi import msg\nimport numpy\nfrom tqdm.auto import tqdm\n\ndef train_and_evaluate(\n model: ModelT,\n train_tuple: DatasetTuple,\n eval_tuple: DatasetTuple,\n *,\n lr: float = 3e-3,\n batch_size: int = 64,\n epochs: int = 48,\n) -> float:\n (train_texts, train_X, train_y) = train_tuple\n (eval_texts, eval_X, eval_y) = eval_tuple\n msg.divider(\"Train and Evaluate Model\")\n msg.info(f\"Batch size = {batch_size}\\tEpochs = {epochs}\\tLearning Rate = {lr}\")\n\n optimizer = Adam(lr)\n best_score: float = 0.0\n best_model: Optional[bytes] = None\n for n in range(epochs):\n loss = 0.0\n batches = model.ops.multibatch(batch_size, train_X, train_y, shuffle=True)\n for X, Y in tqdm(batches, leave=False, unit=\"batches\"):\n Y = model.ops.asarray(Y, dtype=\"float32\")\n Yh, backprop = model.begin_update(X)\n err = Yh - Y\n backprop(err)\n loss += (err ** 2).sum()\n model.finish_update(optimizer)\n score = evaluate_model(model, texts=eval_texts, X=eval_X, Y=eval_y)\n if score > best_score:\n best_model = model.to_bytes()\n best_score = score\n print(f\"{n}\\t{score:.2f}\\t{loss:.2f}\")\n\n if best_model is not None:\n model.from_bytes(best_model)\n print(f\"Evaluating with best model\")\n score = evaluate_model(\n model, texts=eval_texts, print_problems=True, X=eval_X, Y=eval_y\n )\n print(f\"Final Score: {score}\")\n return score\n", "We'll generate the dataset first, so we can iterate on the model without having to spend time generating examples for each run. This also ensures we have the same dataset across different model runs, to make it easier to compare performance.", "train_size = 1024 * 8\ntest_size = 2048\nseen_texts: Set[str] = set()\nwith msg.loading(f\"Generating train dataset with {train_size} examples...\"):\n train_dataset = generate_dataset(train_size, seen_texts)\nmsg.good(f\"Train set created with {train_size} examples.\")\nwith msg.loading(f\"Generating eval dataset with {test_size} examples...\"):\n eval_dataset = generate_dataset(test_size, seen_texts)\nmsg.good(f\"Eval set created with {test_size} examples.\")\ninit_x = train_dataset[1][:2]\ninit_y = train_dataset[2][:2]", "Finally, we can build, train, and evaluate our model!", "model = build_model(64)\nmodel.initialize(init_x, init_y)\ntrain_and_evaluate(\n model, train_dataset, eval_dataset, lr=2e-3, batch_size=64, epochs=16\n)", "Intermediate Exercise\nThe model we built can train up to ~80% given 100 or more epochs. Improve the model architecture so that it trains to a similar accuracy while requiring fewer epochs or a smaller dataset size.", "from typing import List\nfrom thinc.model import Model\nfrom thinc.types import Array2d, Array1d\nfrom thinc.api import chain, clone, list2ragged, reduce_mean, Mish, with_array, Embed, residual\n\ndef custom_model(n_hidden: int, dropout: float = 0.1) -> Model[List[Array2d], Array2d]:\n # Put your custom architecture here\n return build_model(n_hidden, dropout)\n\nmodel = custom_model(64)\nmodel.initialize(init_x, init_y)\ntrain_and_evaluate(\n model, train_dataset, eval_dataset, lr=2e-3, batch_size=64, epochs=16\n)", "Advanced Exercise\nRewrite the model to encode the whole expression with a BiLSTM, and then generate pairs of terms, using the BiLSTM vectors. Over each pair of terms, predict whether the terms are alike or unlike.", "from dataclasses import dataclass\nfrom thinc.types import Array2d, Ragged\nfrom thinc.model import Model\n\n\n@dataclass\nclass Comparisons:\n data: Array2d # Batch of vectors for each pair\n indices: Array2d # Int array of shape (N, 3), showing the (batch, term1, term2) positions\n\ndef pairify() -> Model[Ragged, Comparisons]:\n \"\"\"Create pair-wise comparisons for items in a sequence. For each sequence of N\n items, there will be (N**2-N)/2 comparisons.\"\"\"\n ...\n\ndef predict_over_pairs(model: Model[Array2d, Array2d]) -> Model[Comparisons, Comparisons]:\n \"\"\"Apply a prediction model over a batch of comparisons. Outputs a Comparisons\n object where the data is the scores. The prediction model should predict over\n two classes, True and False.\"\"\"\n ...\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
VandyAstroML/Vanderbilt_Computational_Bootcamp
notebooks/Week_14/14_Astropy.ipynb
mit
[ "Week 14 - Astropy\nToday's Agenda\n\nUseful functions of Astropy\nUnits\nTime\nCoordinates\nFITS files\nAnalytic functions\nAstroPy Tables and different formats\n\nAstropy is a package that is meant to provide a lot of basic functionality for astronomy work in Python\nThis can be roughly broken up into two areas. One is astronomical calculations:\n* unit and physical quantity conversions\n* physical constants specific to astronomy\n* celestial coordinate and time transformations\nThe other is file type and structures:\n* FITS files, implementing the former standalone PyFITS interface\n* Virtual Observatory (VO) tables\n* common ASCII table formats, e.g. for online catalogues or data supplements of scientific publications\n* Hierarchical Data Format (HDF5) files\nAstroPy normallly comes with the Anaconda installation. But in case you happen to not have it installed it on your computer, you can simply do a \nsh\n pip install --no-deps astropy\nYou can always update it via \nsh\n conda update astropy\nThis is just a glimpse of all the features that AstroPy has:\n<img src=\"./images/astropy_sections.png\" alt=\"Astropy Features\" width=\"600\">\nFor purposes of today, we'll focus just on what astropy can do for units, time, coordinates, image manipulation, and more.", "# Importing Modules\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nsns.set_context(\"notebook\")\n\nimport astropy\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy import constants as const", "Units\nAstropy.units introduces units and allows for unit conversions. It doesn't, however, correctly handle spherical coordinates, but the astropy.coordinates package will address this later.\nThese units can be used to create objects that are made up of both a value and a unit, and basic math can be easily carried out with these. We can add the .unit and .value properties to get the units and numerical values, respectively.", "d=42*u.meter\nt=6*u.second\nv=d/t\nprint v\nprint v.unit", "Astropy includes a large number of units, and this can include imperial units as well if desired by importing and enabling imperial units. The .find_equivalent_units() function will also return all the other units that are already defined in astropy. Below we do a quick list of the units that are defined for time and length units", "from astropy.units import imperial\nimperial.enable()\nprint( u.s.find_equivalent_units() )\nprint( u.m.find_equivalent_units() )", "The package also provides constants, with the units included. The full list of units can be found here. We can take a quick look at c and G below, and see that these are objects which have value, uncertainty, and units.", "print const.c\nprint const.G", "Astropy has an aditional function that will allow for unit conversions. So we can, for example, create an object that is the distance to Mars, and then convert that to kilometers or miles. A brief note is that if you try to convert a pure unit (like the 4th line below) into another unit, you'll get a unitless value representing the conversion between the two.\nThis can also be used to convert constants into other units, so we can convert the speed of light to the somewhat useful pc/yr or the entirely unuseful furlong/fortnight", "Mars=1.5*u.AU\nprint Mars.to('kilometer')\nprint Mars.to('mile')\nprint u.AU.to('kilometer')\nprint const.c.to('pc/yr')\nprint const.c.to('fur/fortnight')", "To use this more practically, we can calculate the time it will take for light to reach the earth just by dividing 1 AU by the speed of light, as done below. Since AU is a unit, and c is in m/s, we end up with an answer that is (AU*m/s). By using .decompose() we can simplify that expression, which in this case will end up with an answer that is just in seconds. Finally, we can then convert that answer to minutes to get the answer of about 8 1/3 minutes that is commonly used. None of this required our doing the conversions where we might've slipped up.", "time=1*u.AU/const.c\nprint(time)\n\ntime_s=time.decompose()\nprint(time_s)\n\ntime_min=time_s.to(u.minute)\nprint(time_min)", "Time\nAstropy handles time in a similar way to units, with creating Time objects. These objects have two main properties.\nThe format is simply how the time is displayed. This is the difference between, for example, Julian Date, Modified Julian Date, and ISO time (YYYY-MM-DD HH:MM:SS). The second is the scale, and is the difference between terrestrial time vs time at the barycenter of the solar system.\nWe can start off by changing a time from one format to many others. We can also subtract times and we will get a timedelta unit.", "from astropy.time import Time\nt=Time(57867.346424, format='mjd', scale='utc')\nt1=Time(58867.346424, format='mjd', scale='utc')\nprint t.mjd\nprint t.iso\nprint t.jyear\nt1-t", "Coordinates\nCoordinates again work by using an object time defined for this purpose. We can establish a point in the ICRS frame (this is approximately the equatorial coordinate) by defining the ra and dec. Note that here we are using u.degree in specifying the coordinates.\nWe can then print out the RA and dec, as well as change the units displayed. In the last line, we can also convert from ICRS equatorial coordinates to galactic coordinates.", "c = SkyCoord(ra=10.68458*u.degree, dec=41.26917*u.degree, frame='icrs')\nprint c\nprint c.ra\nprint c.dec\nprint c.ra.hour\nprint c.ra.hms\nprint c.galactic", "Slightly practical application of this\nUsing some of these astropy functions, we can do some fancier applications. Starting off, we import a listing of stars with RA and dec from the attached table, and store them in the coordinate formats that are used by astropy. We then use matplotlib to plot this, and are able to easily convert them into radians thanks to astropy. This plot is accurate, but it lacks reference for where these points are.", "hosts={}\ndata=np.loadtxt('./data/planets.tab', dtype='str', delimiter='\\t')\nprint data[0]\nhosts['ra_hours']=data[1:,9].astype(float)\nhosts['ra']=data[1:,6].astype(float)\nhosts['dec']=data[1:,8].astype(float)\n#print hosts['ra_hours']\n#print hosts['dec']\n\nimport astropy.units as u\nimport astropy.coordinates as coord\nfrom astropy.coordinates import SkyCoord\nra = coord.Angle(hosts['ra']*u.degree)\nra = ra.wrap_at(180*u.degree)\ndec = coord.Angle(hosts['dec']*u.degree)\n\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111, projection=\"mollweide\")\nplt.title('Map of Exoplanets')\nax.scatter(ra.radian, dec.radian)\nax.set_xticklabels(['14h','16h','18h','20h','22h','0h','2h','4h','6h','8h','10h'])\nax.grid(True)\nplt.show()", "To fix this, we will add some references to this by adding a few more sets of data points. The first is relatively simple, we put in a line at the celestial equator. This just has to be a set of points that are all at declination of 0, and from -180 to +180 degrees in RA. These are a and b in the below.\nWe also want to add the planes of the ecliptic and the galaxy on this. For both, we use coordinate objects and provide numpy arrays where one coordinate is at zero, and the other goes from 0 to 360. With astropy we can then easily convert from each coordinate system to ICRS. There's some for loops to modify the plotting, but the important thing is that this will give us a plot that has not just the locations of all the planets that we've plotted, but will also include the celestial equator, galactic plane, and ecliptic plane on it.", "a=coord.Angle((np.arange(361)-180)*u.degree)\nb=coord.Angle(np.zeros(len(a))*u.degree)\nnumpoints=360\ngalaxy=SkyCoord(l=coord.Angle((np.arange(numpoints))*u.degree), b=coord.Angle(np.zeros(numpoints)*u.degree), frame='galactic')\necliptic=SkyCoord(lon=coord.Angle((np.arange(numpoints))*u.degree), lat=coord.Angle(np.zeros(numpoints)*u.degree), frame='geocentrictrueecliptic')\necl_eq=ecliptic.icrs\ngal_eq=galaxy.icrs\n#print gal_eq\nfixed_ra=[]\nfor item in gal_eq.ra.radian:\n if item < np.pi:\n fixed_ra.append(item)\n else:\n fixed_ra.append(item-2*np.pi)\ni=np.argmin(fixed_ra)\nfixed_dec=[x for x in gal_eq.dec.radian]\n\nfixed_ra_eq=[]\nfor item in ecl_eq.ra.radian:\n if item < np.pi:\n fixed_ra_eq.append(item)\n else:\n fixed_ra_eq.append(item-2*np.pi)\nj=np.argmin(fixed_ra_eq)\nfixed_dec_eq=[x for x in ecl_eq.dec.radian]\n\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111, projection=\"mollweide\")\nplt.title('Map of Exoplanets')\nax.scatter(ra.radian, dec.radian)\nax.plot(a.radian, b.radian, color='r', lw=2)\n#ax.scatter(gal_eq.ra.radian, gal_eq.dec.radian, color='g')\nax.plot(fixed_ra[i:]+fixed_ra[:i], fixed_dec[i:]+fixed_dec[:i], color='g', lw=2)\nax.plot(fixed_ra_eq[j:]+fixed_ra_eq[:j], fixed_dec_eq[j:]+fixed_dec_eq[:j], color='m', lw=2)\nax.set_xticklabels(['14h','16h','18h','20h','22h','0h','2h','4h','6h','8h','10h'])\nax.grid(True)\nplt.show()", "Reading in FITS files\nOne of the useful things with Astropy is that you can use it for reading in FITS files, and extracting info such as bands, exposure times, intrument information, etc.\nIn this example, we will read in a FITS image file, and extract its information", "# We will use `wget` to download the necessary file to the `data` folder.\n!wget 'http://star.herts.ac.uk/~gb/python/656nmos.fits' -O ./data/hst_image.fits", "Now we can extract some of the information stored in the FITS file.", "from astropy.io import fits\nfilename = './data/hst_image.fits'\nhdulist = fits.open(filename)", "The returned object, hdulist, (an instance of the HDUList class) behaves like a Python list, and each element maps to a Header-Data Unit (HDU) in the FITS file. You can view more information about the FITS file with:", "hdulist.info()", "As we can see, this file contains two HDUs. The first contains the image, the second a data table. To access the primary HDU, which contains the main data, you can then do:", "hdu = hdulist[0]", "To read the header of the FITS file, you can read hdulist. The following shows the different keys for the header", "np.sort(hdulist[0].header.keys())", "As we can see, this file contains two HDUs. The first contains the image, the second a data table.\nLet's look at the image of the FITS file.\nThe hdu object then has two important attributes: data, which behaves like a Numpy array, can be used to access the data, and header, which behaves like a dictionary, can be used to access the header information. First, we can take a look at the data:", "hdu.data.shape", "This tells us that it is a 1600-by-1600 pixel image. We can now take a peak at the header. To access the primary HDU, which contains the main data, you can then do:", "hdu.header", "We can access individual header keywords using standard item notation:", "hdu.header['INSTRUME']\n\nhdu.header['EXPTIME']", "We can plot the image using matplotlib:", "plt.figure(figsize=(10,10))\nplt.imshow(np.log10(hdu.data), origin='lower', cmap='gray', vmin=1.5, vmax=3)", "You can also add new fields to the FITS file", "hdu.header['MODIFIED'] = '2014-12-01' # adds a new keyword", "and we can also change the data, for example subtracting a background value:", "hdu.data = hdu.data - 0.5", "This only changes the FITS file in memory. You can write to a file with:", "hdu.writeto('./data/hubble-image-background-subtracted.fits', overwrite=True)\n\n!ls ./data", "Analytic Functions\nAstropy comes with some built-in analytic functions, e.g. the blackbody radiation function.\nBlackbody Radiation\nBlackbody flux is calculated with Planck law (Rybicki & Lightman 1979)\n$$B_{\\lambda}(T) = \\frac{2 h c^{2} / \\lambda^{5}}{exp(h c / \\lambda k T) - 1}$$\n$$B_{\\nu}(T) = \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}$$", "from astropy.analytic_functions import blackbody_lambda, blackbody_nu\n\ndef Planck_func(temp, lam_arr, opt='lam'):\n \"\"\"\n Computes the Blackbody radiation curve of a blackbody of a given temperature `temp`.\n \n Parameters\n ----------\n temp: float or array-like\n temperature(s) of the blackbody\n \n lam_arr: float or array_like\n aray of wavelenths to evaluate the Planck function.\n \n opt: str, optional (default = 'lam')\n Option for returning either the flux of `lambda` (wavelength) or `nu` (frequency).\n Options:\n - `lam`: Return flux for `lambda' or wavelength\n - `nu` : Returns flux for `nu` (frequency)\n \"\"\"\n wavelengths = lam_arr * u.AA\n temperature = temp * u.K\n with np.errstate(all='ignore'):\n flux_lam = blackbody_lambda(wavelengths, temperature)\n flux_nu = blackbody_nu(wavelengths, temperature)\n \n if opt=='lam':\n return flux_lam\n if opt=='nu':\n return flux_nu", "Let's plot the Planck function for two bodies with temperatures $T_1 = 8000\\ K$ and $T_2 = 6000\\ K$", "lam_arr = np.arange(1e2, 2e4)\nnu_arr = (const.c/(lam_arr * u.AA)).to(1./u.s).value\n\nfig = plt.figure(figsize=(15,8))\nax1 = fig.add_subplot(121, axisbg='white')\nax2 = fig.add_subplot(122, axisbg='white')\nax1.set_xlabel(r'$\\lambda$ (Ansgstrom)', fontsize=25)\nax1.set_ylabel(r'$B_{\\lambda}(T)$', fontsize=25)\nax2.set_xlabel(r'$\\nu$ (\\textrm{s}^{-1})', fontsize=25)\nax2.set_ylabel(r'$B_{\\nu}(T)$', fontsize=25)\nax2.set_xscale('log')\n\ntemp_arr = [6e3, 8e3, 1e4, 1.2e4]\nfor temp in temp_arr:\n ax1.plot(lam_arr, Planck_func(temp, lam_arr=lam_arr, opt='lam'), label='T = {0} K'.format(int(temp)))\n ax2.plot(nu_arr , Planck_func(temp, lam_arr=lam_arr, opt='nu' ), label='T = {0} K'.format(int(temp)))\n ax1.legend(loc=1, prop={'size':20})", "AstroPy Tables\nRead files\nYou can use Astropy to read tables from data files. We'll use it to read the sources.dat file, which contains columns and rows of data", "!head ./data/sources.dat\n\nfrom astropy.io import ascii\nsources_tb = ascii.read('./data/sources.dat')\n\nprint( sources_tb )", "Write to files\nYou can also write directoy to a file using the data in the AstroPy table.\nLet's create a new AstroPy Table:", "from astropy.table import Table, Column, MaskedColumn\nx = np.random.uniform(low=10, high=20, size=(1000,))\ny = np.random.uniform(low=100, high=50, size=(x.size,))\nz = np.random.uniform(low=30, high=50, size=(x.size,))\ndata = Table([x, y], names=['x', 'y'])\nprint(data)\n\nascii.write(data, './data/astropy_data.tb', overwrite=True)", "Let's see what's in the astropy_data.tb file", "!head ./data/astropy_data.tb", "You can also specify the delimiter of the file. For example, we can separate it with a comma.", "ascii.write(data, './data/astropy_data_2.tb', delimiter=',', overwrite=True)\n\n!head ./data/astropy_data_2.tb", "AstroPy Tables to other Formats\nThe AstroPy tables can also be converted to multiple formats\nto Pandas DataFrames\nA nice feature of AstroPy Tables is that you can export your data into different formats. \nFor example, you can export it as a Pandas Dataframe. \nSee here for more info on how to use pandas with Astropy: http://docs.astropy.org/en/stable/table/pandas.html", "df = data.to_pandas()\ndf.head()", "And to compare, let's see the AstroPy Tables format", "data", "to LaTeX tables\nA nice thing about AstroPy is that you can convert your data into LaTeX tables. This is easily done with writing it to a file. You can then copy it and use it on your next publication", "import sys\nascii.write(data[0:10], sys.stdout, format='latex')", "To save it as a file, you can do this:", "ascii.write(data, './data/astropy_data_latex.tex', format='latex')\n\n# I'm only showing the first 10 lines\n!head ./data/astropy_data_latex.tex", "to CSV files", "ascii.write(data, './data/astropy_data_csv.csv', format='csv', fast_writer=False) \n\n!head ./data/astropy_data_csv.csv", "Other formats\nAstroPy tables come with a great support for many different types of files.\nThis is a list of the supported files that you can import/export AstroPy tables.\n\nData tables and Column types\nYou can also use AstroPy tables to preserve the metadata of a column. For example, you can keep the units of each column, so that you use the data later on, and still be able to use unit conversions, etc. for this.", "t = Table(masked=True)\nt['x'] = MaskedColumn([1.0, 2.0], unit='m', dtype='float32')\nt['x'][1] = np.ma.masked\nt['y'] = MaskedColumn([False, True], dtype='bool')\n\nt", "Now we can save it into a ecsv file. This type of file will preserve the type of units, and more, for each of the columns", "from astropy.extern.six.moves import StringIO\nfh = StringIO()\nt.write(fh, format='ascii.ecsv') \ntable_string = fh.getvalue() \nprint(table_string) \n\nTable.read(table_string, format='ascii') ", "Or you can dump it into a file", "t.write('./data/astropy_data_ecsv.ecsv', format='ascii.ecsv', overwrite=True)", "And you can now read it in", "data_ecsv = ascii.read('./data/astropy_data_ecsv.ecsv', format='ecsv')\ndata_ecsv\n\ndata_ecsv['x']", "Resources\nFor further reading and exercises, you can check out:\n- Astropy Documents: http://docs.astropy.org/en/stable/index.html\n- Programming and Statistics - Python Notes (Notes from a class at the University of Hertfordshire) https://star.herts.ac.uk/~gb/python/\n- HST FITS files for fun: https://www.spacetelescope.org/projects/fits_liberator/m17data/\n- AstroPy on Aperture Photometry: http://photutils.readthedocs.io/en/stable/photutils/aperture.html\n- AstroPy Tutorials http://www.astropy.org/astropy-tutorials/\n- Photutils - AstroPy package for doing Photometry http://photutils.readthedocs.io/en/stable/index.html" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
geekandtechgirls/Women-In-Django
Soluciones.ipynb
gpl-3.0
[ "Soluciones a los ejercicios propuestos\nNivel básico\n1.\nHaz un pequeño programa que le pida al usuario introducir dos números ($x_1$ y $x_2$), calcule la siguiente operación y muestre el resultado de la misma ($x$):\n$$ x = \\frac{20 * x_1 - x_2}{x_2 + 3} $$\nSi intentas operar con el resultado de la función input obtendrás un error que te informa que no se pueden restar dos datos de tipo str. Usa la función int para convertir los datos introducidos por teclado a datos numéricos.", "x1 = int(input(\"Introduce un número: \"))\nx2 = int(input(\"Y ahora otro: \"))\nx = (20 * x1 - x2)/(x2 + 3)\n\nprint(\"x =\",x)", "2.\nHaz un programa que le pida al usuario un número (de ninjas). Si dicho número es menor que 50 y es par, el programa imprimirá \"puedo con ellos!\", en caso contrario imprimirá \"no me vendría mal una ayudita...\"\nNota: para saber si un número es par o no debes usar el operador $\\%$ y para saber si dos condiciones se cuplen a la vez, el operador lógico and", "num = int(input(\"Introduce número de ninjas: \"))\nif num < 50 and num%2==0:\n print(\"Puedo con ellos!\")\nelse:\n print(\"No me vendría mal una ayudita...\")", "3.\nHaz un bucle while que imprima todos los números desde el 0 hasta un número que introduzca el usuario. Si el número que introduce es negativo puedes tomar dos decisiones: pedirle que introduzca un número positivo o contar hacia atrás, tú eliges!", "num = int(input(\"Intoduce un número: \"))\n\n# Opción 1: si el usuario introduce un número negativo pedir otro número\nwhile num < 0:\n num = int(input(\"Introduce un número: \"))\n\ni = 0\nwhile i <= num:\n print(i)\n i += 1\n\nnum = int(input(\"Intoduce un número: \"))\n\n# Opción 2: si el usuario introduce un número negativo, contar hacia atrás\nsign = lambda x: (1, -1)[x < 0]\n\ni = 0\ns = sign(num)\nwhile i*s <= num*s:\n print(i)\n i += s", "4.\nGenera con range los números pares del 0 al 10, ambos inclusive. ¿Qué cambiarías para generar del 2 al 10?", "# Para generar del 0 al 10 ambos inclusive:\nfor i in range(0,11):\n print(i)\n\n# Para generar del 2 al 10 sólo con números pares\nfor i in range(2, 11, 2):\n print(i)", "5.\n¿Cuál es la diferencia entre la sentencia break y la sentencia continue?\nCuando en un bucle se lee una instrucción break o una instrucción continue, se interumpe la iteración actual. Ahora bien, en el caso de break, se abandona el bucle y en el caso de continue se pasa a la siguiente iteración. Por ejemplo, el siguiente bucle imprime si un número es par o impar:", "for num in range(2,10):\n if num % 2 == 0:\n print(num, \"es par!\")\n continue\n print(num, \"es impar!\")", "6.\nHaz una lista de la compra e imprime los siguientes elementos:\n\nPenúltimo elemento\nDel segundo al cuarto elemento\nLos tres últimos\nTodos!\n\nPor último, elimina el tercer elemento de la lista usando la sentencia del", "lista_compra = ['Leche', 'Chocolate', 'Arroz', 'Macarrones']\n\nprint(\"Penúltimo elemento: \", lista_compra[-2])\nprint(\"Del segundo al cuarto elemento: \", lista_compra[1:5])\nprint(\"Los tres últimos elementos: \", lista_compra[-3:])\nprint(\"Todos: \", lista_compra)\n\ndel lista_compra[2]\nprint(lista_compra)", "7.\nCrea una lista con todos los números pares del 0 al 10 en una única línea.", "# solución 1:\n[x for x in range(10) if x%2==0]\n\n# solución 2:\nlist(range(0,10,2))", "8.\nCrea la siguiente matriz en una línea:\n$$ M_{2 \\times 3} = \\left( \\begin{matrix} 1 & 2 & 3 \\ \n4 & 5 & 6 \\end{matrix} \\right)$$", "[[j for j in range(i*i, i*i+3)] for i in range(1,3)]", "9.\nVuelve a hacer la lista de la compra que hiciste en el último ejercicio, pero esta vez guarda cada elemento de la lista de la compra junto con su precio. Después, imprime los siguientes elementos:\n\nEl precio del tercer elemento.\nEl nombre del último elemento.\nTanto el nombre como el precio del primer elemento.", "tuplas_compra = [('Leche', 2), ('Chocolate', 1), ('Arroz', 1.5), \n ('Macarrones', 2.1)]\nprint(\"Precio del tercer elemento: \", tuplas_compra[2][1])\nprint(\"Nombre del último elemento: \", tuplas_compra[-1][0])\nprint(\"Nombre y precio del primer elemento\", tuplas_compra[0])", "10.\n¿Es buena idea usar la función set para eliminar los elementos repetidos de una lista?\nAl usar la función set para eliminar los elementos repetidos de una lista perdemos el orden original de nuestra lista. Además, no funcionará si nuestra lista es de diccionarios o de listas, debido a que no son objetos hashables.\n11.\nUsando la tupla que creaste en el ejercicio sobre tuplas, crea un diccionario de tu lista de la compra. Una vez tengas el diccionario creado:\n\nImprime todos los elementos que vayas a comprar creando la siguiente frase con la función format: \"he comprado __ y me ha costado __\".\nConsulta si has añadido un determinado elemento (por ejemplo un cartón de leche) a la lista de la compra\nElimina un elemento usando la función del", "dict_compra = dict(tuplas_compra)\n\nfor compra in dict_compra.items():\n print(\"he comprado {} y me ha costado {}\".format(compra[0], compra[1]))\n\nprint('He comprado leche?', 'Leche' in dict_compra)\n\ndel dict_compra['Arroz']\nprint(dict_compra)", "Nivel medio\n1.\nAhora que hemos visto cómo crear arrays a partir de un objeto y otros para crear arrays con tipos prefijados, crea distintos arrays con las funciones anteriores para 1D, 2D y 3D e imprímelas por pantalla. Prueba a usar distintos tipos para ver cómo cambian los arrays. Si tienes dudas sobre cómo usarlos, puedes consultar la documentación oficial.", "import numpy as np\n\nprint(np.ones(5, dtype=np.int8))\n\nprint(np.random.random(5))\n\nprint(np.full(shape=(3,3), fill_value=4, dtype=np.int8))\n\nprint(np.arange(6))\n\nprint(np.linspace(start=1, stop=6, num=10))\n\nprint(np.eye(N=2))\n\nprint(np.identity(n=3, dtype=np.int8))", "2.\nGracias a las distintas formas de indexar un array que nos permite NumPy, podemos hacer operaciones de forma vectorizada, evitando los bucles. Esto supone un incremento en la eficiencia del código y tener un código más corto y legible. Para ello, vamos a realizar el siguiente ejercicio.\nGenera una matriz aleatoria cuadrada de tamaño 1000. Una vez creada, genera una nueva matriz donde las filas y columnas 0 y $n-1$ estén repetidas 500 veces y el centro de la matriz quede exactamente igual a la original. Un ejemplo de esto lo podemos ver a continuación:\n$$ \\left( \\begin{matrix} \n1 & 2 & 3 \\ \n2 & 3 & 4 \\ \n3 & 4 & 5\n\\end{matrix} \\right) \\Longrightarrow \\left( \\begin{matrix} \n1 & 1 & 1 & 2 & 3 & 3 & 3 \\ \n1 & 1 & 1 & 2 & 3 & 3 & 3 \\ \n1 & 1 & 1 & 2 & 3 & 3 & 3 \\ \n2 & 2 & 2 & 3 & 4 & 4 & 4 \\ \n3 & 3 & 3 & 4 & 5 & 5 & 5 \\ \n3 & 3 & 3 & 4 & 5 & 5 & 5 \\ \n3 & 3 & 3 & 4 & 5 & 5 & 5 \\end{matrix} \\right) $$\nImpleméntalo usando un bucle for y vectorizando el cálculo usando lo anteriormente visto para ver la diferencias de tiempos usando ambas variantes. Para medir el tiempo, puedes usar el módulo time.", "from time import time\n\ndef clona_cols_rows(size=1000, clone=500, print_matrix=False,\n create_random=True):\n if create_random:\n m = np.random.random((size,size))\n else:\n m = np.arange(size*size).reshape(size,size)\n \n n = np.zeros((size+clone*2, size+clone*2))\n \n antes = time()\n # en primer lugar, copiamos m en el centro de n\n for i in range(size):\n for j in range(size):\n n[i+clone, j+clone] = m[i,j]\n # después, copiamos la primera fila/columna en las \n # primeras clone filas/columnas\n for i in range(clone):\n n[i,clone:clone+size] = m[0]\n n[clone:clone+size, i] = m[:,0]\n # una vez copiada la primera fila/columna, pasamos a \n # copiar la última/columna\n for i in range(clone+size, size+clone*2):\n n[i, clone:clone+size] = m[-1]\n n[clone:clone+size, i] = m[:,-1]\n # por último, copiamos los valores de los extremos en las esquinas\n for i in range(clone):\n n[i, :clone] = np.full(clone, m[0,0])\n n[i, size+clone:] = np.full(clone, m[0,-1])\n n[i+size+clone, :clone] = np.full(clone, m[-1,0])\n n[i+size+clone, size+clone:] = np.full(clone, m[-1,-1])\n despues = time()\n \n if print_matrix:\n print(m)\n print(n)\n return despues-antes\n\nclona_cols_rows(size=3, clone=2, print_matrix=True, create_random=False)\n\nprint(\"Tiempo con bucle for: \", clona_cols_rows(), \" s\")\n\ndef clona_vec_cols_rows(size=1000, clone=500, print_matrix=False,\n create_random=True):\n if create_random:\n m = np.random.random((size,size))\n else:\n m = np.arange(size*size).reshape(size,size)\n \n n = np.zeros((size+clone*2, size+clone*2))\n \n antes=time()\n # en primer lugar, insertamos m en el centro de n\n n[clone:clone+size, clone:clone+size] = m\n # Copiamos la primera fila de m, en las primeras filas\n # de n, y la última fila de m en las últimas filas de n\n n[:clone, clone:clone+size] = m[0]\n n[size+clone:, clone:size+clone] = m[-1]\n # Lo mismo para las columnas\n n[:, :clone] = np.repeat(n[:,clone],clone).reshape(2*clone+size, clone)\n n[:, size+clone:] = np.repeat(n[:,-(clone+1)],clone).reshape(2*clone+size, clone)\n\n despues=time()\n \n if print_matrix:\n print(m)\n print(n)\n return despues-antes\n\nclona_vec_cols_rows(size=3, clone=2, print_matrix=True, create_random=False)\n\nprint(\"Tiempo vectorizando: \", clona_vec_cols_rows(), \" s\")", "3.\nUna matriz de rotación $R$ es una matriz que representa una rotación en el espacio euclídeo. Esta matriz $R$ se representa como\n$$ R = \\left( \\begin{matrix} \\cos\\theta & -\\sin\\theta \\ \n\\sin\\theta & -\\cos\\theta \n\\end{matrix} \\right) $$ \ndonde $\\theta$ es el número de ángulos rotados en sentido antihorario.\nEstas matrices son muy usadas en geometría, informática o física. Un ejemplo de uso de estas matrices puede ser el cálculo de una rotación de un objeto en un sistema gráfico, la rotación de una cámara respecto a un punto en el espacio, etc.\nEstas matrices tienen como propiedades que son matrices ortogonales (su inversa y su traspuesta son iguales) y su determinante es igual a 1. Por tanto, genera un array y muestra si ese array es una matriz de rotación.", "R = np.random.random((2,2))\n\nif (R.T == np.linalg.inv(R)).all() and np.linalg.det(R) == 1:\n print(\"Matriz de rotación!\")\nelse:\n print(\"No es matriz de rotación u_u\")", "4.\nDados el array que se ve a continuación, realiza los siguientes apartados:", "array1 = np.array([ -1., 4., -9.])", "Multiplica array1 por $\\frac{\\pi}{4}$ y calcula el seno del array resultante.\n\n\nGenera un nuevo array cuyo valor sea el doble del resultado anterior mas el vector array1.\n\n\nCalcula la norma del vector resultante. Para ello, consulta la documentación para ver qué función realiza esta tarea, y ten en cuenta los parámetros que recibe.", "array2 = np.sin(array1 * np.pi/4)\narray2\n\narray3 = array2 * 2 + array1\narray3\n\nnp.linalg.norm(array3)", "5.\nDada la siguiente matriz, realiza los siguientes apartados:", "n_array1 = np.array([[ 1., 3., 5.], [7., -9., 2.], [4., 6., 8.]])", "Calcula la media y la desviación típica de la matriz.\nObtén el elemento mínimo y máximo de la matriz.\nCalcula el determinante, la traza y la traspuesta de la matriz.\nCalcula la descomposición en valores singulares de la matriz.\nCalcula el valor de la suma de los elementos de la diagonal principal de la matriz.", "media = np.mean(n_array1)\ndesv_tipica = np.std(n_array1)\n\nprint(\"Media =\", media, \" y desv típica =\", desv_tipica)\n\nmaximo = np.max(n_array1)\nminimo = np.min(n_array1)\n\nprint(\"Máximo =\", maximo, \" y minimo =\", minimo)\n\ndet = np.linalg.det(n_array1)\ntraza = np.trace(n_array1)\ntraspuesta = n_array1.T\n\nU, S, V = np.linalg.svd(n_array1)\nprint(U)\nprint(S)\nprint(V)\n\nresult = np.diag(array1).sum()\nprint(\"Resultado: \", result)", "6.\nA veces, es necesario en nuestro problema, tener que eliminar los elementos repetidos de una lista, dejando aquellos que solo aparezcan una sola vez. Es muy común, que muchos usuarios llamen a la función set para esta tarea, haciendo de la lista un conjunto sin elementos repetidos, ordenándolos y luego, el resultado de esto, volverlo a convertir en una lista. Esto, puede no estar mal del todo, pero puede ser que en el caso peor, puede que estemos haciendo un gasto inútil de memoria, tiempo y cálculos, para que, en el caso de que no haya elementos repetidos, sólo obtengamos una lista ordenada.\nEs por ello, por lo que existe otra forma de hacerlo. Utilizando lo ya visto, obtén una lista sin elementos repetidos que mantengan el orden de la lista original. Para hacerlo aún más divertido, no uses más de 4 líneas.", "a = [1,1,1,2,5,3,4,8,5,8]\nb = []\nlist(filter(lambda x: b.append(x) if not x in b else False, a))\nprint(\"Lista original:\\t\\t\", a)\nprint(\"Lista sin repetidos:\\t\", b)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
chungjjang80/FRETBursts
notebooks/Example - 2CDE Method.ipynb
gpl-2.0
[ "Example - 2CDE Method\nThis notebook is part of smFRET burst analysis software FRETBursts.\n\nThis notebook implements the 2CDE method from Tomov 2012.\nFor a complete tutorial on burst analysis see \nFRETBursts - us-ALEX smFRET burst analysis.", "from fretbursts import *\nfrom fretbursts.phtools import phrates\nsns = init_notebook(apionly=True)\nsns.__version__\n\n# Tweak here matplotlib style\nimport matplotlib as mpl\nmpl.rcParams['font.sans-serif'].insert(0, 'Arial')\nmpl.rcParams['font.size'] = 12\n%config InlineBackend.figure_format = 'retina'", "Load Data", "url = 'http://files.figshare.com/2182601/0023uLRpitc_NTP_20dT_0.5GndCl.hdf5'\ndownload_file(url, save_dir='./data')\n\nfilename = \"data/0023uLRpitc_NTP_20dT_0.5GndCl.hdf5\"\n\nd = loader.photon_hdf5(filename)\nloader.alex_apply_period(d)\nd.calc_bg(fun=bg.exp_fit, time_s=20, tail_min_us='auto', F_bg=1.7)\nd.burst_search()\n\nds1 = d.select_bursts(select_bursts.size, th1=30)\nds = ds1.select_bursts(select_bursts.naa, th1=30)\n\nalex_jointplot(ds)\n\nph = d.ph_times_m[0]\n\ntau = 100e-6/d.clk_p\ntau", "KDE considerations\nIn computing a KDE, the kernel can have different shapes. In the original\n2CDE publication the \"laplace distribution\" kernel is used.\nIn the next sections we will show the 2CDE results\nusing both \"laplace distribution\" Gaussian kernel.\nHere, we simply plot the two kernels for comparison:", "tau = 1\ntau2 = 2 * (tau**2)\n\nxx = np.arange(-4*tau, 4*tau, tau/100.)\ny1 = np.exp(-np.abs(xx) / tau)\ny2 = np.exp(-xx**2 / tau2)\n\nplt.plot(xx,y1, label=r'$\\exp \\left( - \\frac{|t|}{\\tau} \\right)$')\nplt.plot(xx, y2, label=r'$\\exp \\left( - \\frac{t^2}{2\\tau^2} \\right)$')\nplt.axvline(2*tau, color='k')\nplt.axvline(-2*tau, color='k')\nplt.xlabel('t')\nplt.legend(fontsize=22, bbox_to_anchor=(1.05, 1), loc=2)\nplt.title(r'$\\tau = %d$' % tau, fontsize=22);", "Notes on Kernel Shape\nThe Gaussian kernel gives a more accurate rate estimation with very little dependence on the position where the KDE is evaluated. On the contrary, with symmetric exponential kernel (laplace distribution), there is always a strong dependence on the evaluation position. In particular, when rates are estimated at the timestamps positions, the rates are systematically over-estimated (i.e. the peak is always sampled).\nFor a Gaussian kernel, given a $\\tau$, the rate estimation will be accurate for rates higher than $1/(2\\,\\tau)$ counts-per-second. For lower rates, the estimation will strongly depend on where the KDE is evaluated. A similar condition can be also found for the exponential kernel, but this case the rate will aways be strongly dependent on the position.\n2CDE\nKDE and nbKDE Definitions\nFollowing Tomov 2012 notation, we define KDE as (Tomov 2012, eq. 4):\n$$KDE_{X_i}^Y \\left(t_{(CHX)i}, t{{CHY}} \\right) = \n\\sum_j^{N_{CHY}} \\exp \\left( - \\frac{\\lvert t_{(CHX)i} - t{(CHY)_j} \\rvert}{\\tau}\\right) $$\nand nbKDE as (Tomov 2012, eq. 5):\n$$nbKDE_{X_i}^X \\left(t_{{CHX}} \\right) = \\left(1 + \\frac{2}{N_{CHX}} \\right) \\cdot\n\\sum_{j, \\;j\\ne i}^{N_{CHX}} \\exp \\left( - \\frac{\\lvert t_{(CHX)i} - t{(CHX)_j} \\rvert}{\\tau}\\right) $$\nThese quantities can be computed for any slice of the timestamp arrays. \nIn the implementation of FRET-2CDE, they will be computed on slices of\ntimestamps corresponding to each burst. \nIn this context, $N_{CHX}$, (in the multiplicative correction factor of nbKDE), \nis the number of photons in the current burst (selecting only photons in the $X$ channel).\nFRET-2CDE Definition\nTo compute FRET-2CDE we need to define (Tomov 2012, eq. 6):\n$$(E)D = \\frac{1}{N{CHD}} \\sum_{i=1}^{N_{CHD}} \\frac{KDE_{Di}^A}{KDE_{Di}^A + nbKDE_{Di}^D} $$\nand the symmetric estimator (Tomov 2012, eq. 7):\n$$(1 - E)A = \\frac{1}{N{CHA}} \\sum_{i=1}^{N_{CHA}} \\frac{KDE_{Ai}^D}{KDE_{Ai}^D + nbKDE_{Ai}^A} $$\nThen FRET-2CDE is defined as (Tomov 2012, eq. 8):\n$$ FRET-2CDE \\left( t_{CHD}, t_{CHA} \\right) = \n110 - 100 \\cdot \\left[ (E)_D + (1 - E)_A \\right]\n$$\nThese quantities are computed for each burst, so that $N_{CHD}$ ($N_{CHA}$) are\nthe number of photons in the DexDem (AemDex) channel during current burst.\nFRET-2CDE Functions\nTo implement the FRET-2CDE, we use the following FRETBursts function:\n\nphrates.kde_laplace() (documentation)\n\nThis function computes the local photon rate using KDE with a laplace distribution kernel. \nFRETBursts provides similar functions to use a Gaussian or rectangular kernel (kde_gaussian and\nkde_rect\nrespectively).\nHere we define two functions to compute FRET-2CDE using the laplace kernel\n(as in the original paper) and Gaussian kernel:", "def calc_fret_2cde(tau, ph, mask_d, mask_a, bursts):\n \"\"\"\n Compute FRET-2CDE for each burst.\n\n FRET-2CDE is a quantity that tends to be around 10 for bursts which have no\n dynamics, while it has larger values (e.g. 30..100) for bursts with\n millisecond dynamics.\n\n References:\n Tomov et al. BJ (2012) doi:10.1016/j.bpj.2011.11.4025\n\n Arguments:\n tau (scalar): time-constant of the exponential KDE\n ph (1D array): array of all-photons timestamps.\n mask_d (bool array): mask for DexDem photons\n mask_a (bool array): mask for DexAem photons\n bursts (Bursts object): object containing burst data\n (start-stop indexes are relative to `ph`).\n\n Returns:\n FRET_2CDE (1D array): array of FRET_2CDE quantities, one element\n per burst. This array contains NaN in correspondence of bursts\n containing to few photons to compute FRET-2CDE.\n \"\"\"\n # Computing KDE burst-by-burst would cause inaccuracies at the burst edges.\n # Therefore, we first compute KDE on the full timestamps array and then\n # we take slices for each burst.\n # These KDEs are evaluated on all-photons array `ph` (hence the Ti suffix)\n # using D or A photons during D-excitation (argument ph[mask_d] or ph[mask_a]).\n KDE_DTi = phrates.kde_laplace(ph[mask_d], tau, time_axis=ph)\n KDE_ATi = phrates.kde_laplace(ph[mask_a], tau, time_axis=ph)\n\n FRET_2CDE = []\n for ib, burst in enumerate(bursts):\n burst_slice = slice(int(burst.istart), int(burst.istop) + 1)\n if ~mask_d[burst_slice].any() or ~mask_a[burst_slice].any():\n # Either D or A photon stream has no photons in current burst,\n # thus FRET_2CDE cannot be computed. Fill position with NaN.\n FRET_2CDE.append(np.nan)\n continue\n\n # Take slices of KDEs for current burst\n kde_adi = KDE_ATi[burst_slice][mask_d[burst_slice]]\n kde_ddi = KDE_DTi[burst_slice][mask_d[burst_slice]]\n kde_dai = KDE_DTi[burst_slice][mask_a[burst_slice]]\n kde_aai = KDE_ATi[burst_slice][mask_a[burst_slice]]\n\n # nbKDE does not include the \"center\" timestamp which contributes 1.\n # We thus subtract 1 from the precomputed KDEs.\n # The N_CHD (N_CHA) value in the correction factor is the number of\n # timestamps in DexDem (DexAem) stream falling within the current burst.\n N_CHD = mask_d[burst_slice].sum()\n N_CHA = mask_a[burst_slice].sum()\n nbkde_ddi = (1 + 2/N_CHD) * (kde_ddi - 1)\n nbkde_aai = (1 + 2/N_CHA) * (kde_aai - 1)\n\n # N_CHD (N_CHA) in eq. 6 (eq. 7) of (Tomov 2012) is the number of photons\n # in DexDem (DexAem) in current burst. Thus the sum is a mean.\n ED = np.mean(kde_adi / (kde_adi + nbkde_ddi)) # (E)_D\n EA = np.mean(kde_dai / (kde_dai + nbkde_aai)) # (1 - E)_A\n\n # Compute fret_2cde for current burst\n fret_2cde = 110 - 100 * (ED + EA)\n FRET_2CDE.append(fret_2cde)\n return np.array(FRET_2CDE)\n\ndef calc_fret_2cde_gauss(tau, ph, mask_d, mask_a, bursts):\n \"\"\"\n Compute a modification of FRET-2CDE using a Gaussian kernel.\n \n Reference: Tomov et al. BJ (2012) doi:10.1016/j.bpj.2011.11.4025\n \n Instead of using the exponential kernel (i.e. laplace distribution)\n of the original paper, here we use a Gaussian kernel.\n Photon density using Gaussian kernel provides a smooth estimate\n regardless of the evaluation time. On the contrary, the \n laplace-distribution kernel has discontinuities in the derivative \n (cuspids) on each time point corresponding to a timestamp.\n Using a Gaussian kernel removes the need of using the heuristic \n correction (pre-factor) of nbKDE.\n \n Arguments:\n tau (scalar): time-constant of the exponential KDE\n ph (1D array): array of all-photons timestamps.\n mask_d (bool array): mask for DexDem photons\n mask_a (bool array): mask for DexAem photons\n bursts (Bursts object): object containing burst data\n \n Returns:\n FRET_2CDE (1D array): array of FRET_2CDE quantities, one element \n per burst. This array contains NaN in correspondence of bursts \n containing to few photons to compute FRET-2CDE.\n \"\"\"\n # Computing KDE burst-by-burst would cause inaccuracies at the edges\n # So, we compute KDE for the full timestamps\n KDE_DTi = phrates.kde_gaussian(ph[mask_d], tau, time_axis=ph)\n KDE_ATi = phrates.kde_gaussian(ph[mask_a], tau, time_axis=ph)\n\n FRET_2CDE = []\n for ib, burst in enumerate(bursts):\n burst_slice = slice(int(burst.istart), int(burst.istop) + 1)\n if ~mask_d[burst_slice].any() or ~mask_a[burst_slice].any():\n # Either D or A photon stream has no photons in current burst, \n # thus FRET_2CDE cannot be computed.\n FRET_2CDE.append(np.nan)\n continue\n\n kde_ddi = KDE_DTi[burst_slice][mask_d[burst_slice]]\n kde_adi = KDE_ATi[burst_slice][mask_d[burst_slice]] \n kde_dai = KDE_DTi[burst_slice][mask_a[burst_slice]]\n kde_aai = KDE_ATi[burst_slice][mask_a[burst_slice]]\n \n ED = np.mean(kde_adi / (kde_adi + kde_ddi)) # (E)_D\n EA = np.mean(kde_dai / (kde_dai + kde_aai)) # (1 - E)_A\n\n fret_2cde = 110 - 100 * (ED + EA)\n FRET_2CDE.append(fret_2cde)\n return np.array(FRET_2CDE)", "FRET-2CDE Results\nLet's define $\\tau$, the kernel parameter which defines the \"time range\"\nof the photon density estimation:", "tau_s = 50e-6 # in seconds\ntau = int(tau_s/d.clk_p) # in raw timestamp units\ntau", "Next, we get the timestamps and selection masks for DexDem and DexAem photon streams,\nas well as the burst data:", "ph = d.get_ph_times(ph_sel=Ph_sel('all'))\nmask_d = d.get_ph_mask(ph_sel=Ph_sel(Dex='Dem'))\nmask_a = d.get_ph_mask(ph_sel=Ph_sel(Dex='Aem'))\n\nbursts = ds.mburst[0]", "We can finally compute the FRET-2CDE for each burst:", "fret_2cde = calc_fret_2cde(tau, ph, mask_d, mask_a, bursts)\n\nfret_2cde_gauss = calc_fret_2cde_gauss(tau, ph, mask_d, mask_a, bursts)\n\nlen(fret_2cde), len(fret_2cde_gauss), bursts.num_bursts, ds.num_bursts", "And visualize the results with some plots:", "plt.figure(figsize=(4.5, 4.5))\nhist_kws = dict(edgecolor='k', linewidth=0.2,\n facecolor=sns.color_palette('Spectral_r', 100)[7])\n\nvalid = np.isfinite(fret_2cde)\nsns.kdeplot(ds.E[0][valid], fret_2cde[valid],\n cmap='Spectral_r', shade=True, shade_lowest=False, n_levels=20)\nplt.xlabel('E', fontsize=16)\nplt.ylabel('FRET-2CDE', fontsize=16);\nplt.ylim(-10, 50);\nplt.axhline(10, ls='--', lw=2, color='k')\nplt.text(0.05, 0.95, '2CDE', va='top', fontsize=22, transform=plt.gca().transAxes)\nplt.text(0.95, 0.95, '# Bursts: %d' % valid.sum(), \n va='top', ha='right', transform=plt.gca().transAxes)\nplt.savefig('2cde.png', bbox_inches='tight', dpi=200, transparent=False)\n\nvalid = np.isfinite(fret_2cde)\nx, y = ds.E[0][valid], fret_2cde[valid]\nhist_kws = dict(edgecolor='k', linewidth=0.2,\n facecolor=sns.color_palette('Spectral_r', 100)[7])\n\ng = sns.JointGrid(x=x, y=y, ratio=3)\ng.plot_joint(sns.kdeplot, cmap='Spectral_r', shade=True, shade_lowest=False, n_levels=20)\ng.ax_marg_x.hist(x, bins=np.arange(-0.2, 1.2, 0.0333), **hist_kws)\ng.ax_marg_y.hist(y, bins=70, orientation=\"horizontal\", **hist_kws)\n\ng.ax_joint.set_xlabel('E', fontsize=16)\ng.ax_joint.set_ylabel('FRET-2CDE', fontsize=16);\ng.ax_joint.set_ylim(-10, 50);\ng.ax_joint.set_xlim(-0.1, 1.1);\ng.ax_joint.axhline(10, ls='--', lw=2, color='k')\ng.ax_joint.text(0.05, 0.95, '2CDE', va='top', fontsize=22, transform=g.ax_joint.transAxes)\ng.ax_joint.text(0.95, 0.95, '# Bursts: %d' % valid.sum(), \n va='top', ha='right', transform=g.ax_joint.transAxes)\nplt.savefig('2cde_joint.png', bbox_inches='tight', dpi=200, transparent=False)", "ALEX-2CDE Definition\nTo compute ALEX-2CDE we need to define (Tomov 2012, eq. 10):\n$$BR_{D_{EX}} = \\frac{1}{ N_{CHA_{EX}} } \n\\sum_{i=1}^{N_{CHD_{EX}}} \\frac{ KDE_{D_{EX}i}^A }{ KDE_{D_{EX}i}^D }$$\nand the analogous (Tomov 2012, eq. 11):\n$$BR_{A_{EX}} = \\frac{1}{ N_{CHD_{EX}} } \n\\sum_{i=1}^{N_{CHA_{EX}}} \\frac{ KDE_{A_{EX}i}^D }{ KDE_{A_{EX}i}^A }$$\nFinally, ALEX-2CDE is defined as (Tomov 2012, eq. 12):\n$$ ALEX-2CDE \\left( t_{CHD}, t_{CHA} \\right) = \n110 - 50 \\cdot \\left[ BR_{D_{EX}} + BR_{A_{EX}} \\right]\n$$\nALEX-2CDE Implementation", "bursts = ds1.mburst[0]\n\nph_dex = d.get_ph_times(ph_sel=Ph_sel(Dex='DAem'))\nph_aex = d.get_ph_times(ph_sel=Ph_sel(Aex='Aem'))\n\nmask_dex = d.get_ph_mask(ph_sel=Ph_sel(Dex='DAem'))\nmask_aex = d.get_ph_mask(ph_sel=Ph_sel(Aex='Aem'))\n\nKDE_DexTi = phrates.kde_laplace(ph_dex, tau, time_axis=ph)\nKDE_AexTi = phrates.kde_laplace(ph_aex, tau, time_axis=ph)\n\nALEX_2CDE = []\nBRDex, BRAex = [], []\nfor ib, burst in enumerate(bursts):\n burst_slice = slice(int(burst.istart), int(burst.istop) + 1)\n if ~mask_dex[burst_slice].any() or ~mask_aex[burst_slice].any():\n # Either D or A photon stream has no photons in current burst, \n # thus ALEX_2CDE cannot be computed.\n ALEX_2CDE.append(np.nan)\n continue\n\n kde_dexdex = KDE_DexTi[burst_slice][mask_dex[burst_slice]]\n kde_aexdex = KDE_AexTi[burst_slice][mask_dex[burst_slice]]\n N_chaex = mask_aex[burst_slice].sum()\n BRDex.append(np.sum(kde_aexdex / kde_dexdex) / N_chaex)\n \n kde_aexaex = KDE_AexTi[burst_slice][mask_aex[burst_slice]]\n kde_dexaex = KDE_DexTi[burst_slice][mask_aex[burst_slice]]\n N_chdex = mask_dex[burst_slice].sum()\n BRAex.append(np.sum(kde_dexaex / kde_aexaex) / N_chdex)\n \n alex_2cde = 100 - 50*(BRDex[-1] - BRAex[-1])\n ALEX_2CDE.append(alex_2cde)\nALEX_2CDE = np.array(ALEX_2CDE)\n\nALEX_2CDE.size, np.isfinite(ALEX_2CDE).sum(), np.isfinite(ds1.E[0]).sum()", "And some final plots of ALEX-2CDE:", "hist_kws = dict(edgecolor='k', linewidth=0.2,\n facecolor=sns.color_palette('Spectral_r', 100)[7])\nvalid = np.isfinite(ALEX_2CDE)\ng = sns.JointGrid(x=ds1.E[0][valid], y=ALEX_2CDE[valid], ratio=3)\ng = g.plot_joint(plt.hexbin, **{'cmap': 'Spectral_r', 'mincnt': 1, 'gridsize': 40})\n_ = g.ax_marg_x.hist(ds1.E[0][valid], bins=np.arange(-0.2, 1.2, 0.0333), **hist_kws)\n_ = g.ax_marg_y.hist(ALEX_2CDE[valid], bins=40, orientation=\"horizontal\", **hist_kws)\ng.ax_joint.set_xlabel('E', fontsize=16)\ng.ax_joint.set_ylabel('ALEX-2CDE', fontsize=16);\ng.ax_joint.text(0.95, 0.95, '# Bursts: %d' % valid.sum(), \n va='top', ha='right', transform=g.ax_joint.transAxes);\n\nvalid = np.isfinite(ALEX_2CDE)\nprint('Number of bursts (removing NaNs/Infs):', valid.sum())\ng = sns.JointGrid(x=ds1.S[0][valid], y=ALEX_2CDE[valid], ratio=3)\ng = g.plot_joint(plt.hexbin, **{'cmap': 'Spectral_r', 'mincnt': 1, 'gridsize': 40})\n_ = g.ax_marg_x.hist(ds1.S[0][valid], bins=np.arange(0, 1.2, 0.0333), **hist_kws)\n_ = g.ax_marg_y.hist(ALEX_2CDE[valid], bins=40, orientation=\"horizontal\", **hist_kws)\ng.ax_joint.set_xlabel('S', fontsize=16)\ng.ax_joint.set_ylabel('ALEX-2CDE', fontsize=16)\ng.ax_joint.text(0.95, 0.95, '# Bursts: %d' % valid.sum(), \n va='top', ha='right', transform=g.ax_joint.transAxes);\n\nmasks = [valid * (ALEX_2CDE < 88) * (ds1.S[0] > 0.9)]\nds2 = ds1.select_bursts_mask_apply(masks)\n\nalex_jointplot(ds2, vmax_fret=False)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ShiroJean/Breast-cancer-risk-prediction
NB3_DataPreprocesing.ipynb
mit
[ "Notebook 3: Pre-Processing the data\nIntroduction\nData preprocessing is a crucial step for any data analysis problem. It is often a very good idea to prepare your data in such way to best expose the structure of the problem to the machine learning algorithms that you intend to use.This involves a number of activities such as:\n* Assigning numerical values to categorical data;\n* Handling missing values; and\n* Normalizing the features (so that features on small scales do not dominate when fitting a model to the data).\nIn Notebook-2 NB2_Exploratory data analysis. I explored the data, to help gain insight on the distribution of the data as well as how the attributes correlate to each other. I identified some features of interest. In this notebook I use feature selection to reduce high-dimension data, feature extraction and transformation for dimensionality reduction. \nGoal:\nFind the most predictive features of the data and filter it so it will enhance the predictive power of the analytics model. \nLoad data and essential libraries", "%matplotlib inline\nimport matplotlib.pyplot as plt\n\n#Load libraries for data processing\nimport pandas as pd #data processing, CSV file I/O (e.g. pd.read_csv)\nimport numpy as np\nfrom scipy.stats import norm\n\n# visualization\nimport seaborn as sns \nplt.style.use('fivethirtyeight')\nsns.set_style(\"white\")\n\n\nplt.rcParams['figure.figsize'] = (8,4) \n#plt.rcParams['axes.titlesize'] = 'large'\n\ndata = pd.read_csv('data/clean-data.csv', index_col=False)\ndata.drop('Unnamed: 0',axis=1, inplace=True)\n#data.head()", "Label encoding\nHere, I assign the 30 features to a NumPy array X, and transform the class labels from their original string representation (M and B) into integers", "#Assign predictors to a variable of ndarray (matrix) type\narray = data.values\nX = array[:,1:31]\ny = array[:,0]\n\n#transform the class labels from their original string representation (M and B) into integers\nfrom sklearn.preprocessing import LabelEncoder\nle = LabelEncoder()\ny = le.fit_transform(y)\n\n#Call the transform method of LabelEncorder on two dummy variables\n#le.transform (['M', 'B'])", "After encoding the class labels(diagnosis) in an array y, the malignant tumors are now represented as class 1(i.e prescence of cancer cells) and the benign tumors are represented as class 0 (i.e no cancer cells detection), respectively, illustrated by calling the transform method of LabelEncorder on two dummy variables.**\n\nAssesing Model Accuracy: Split data into training and test sets\nThe simplest method to evaluate the performance of a machine learning algorithm is to use different training and testing datasets. Here I will\n* Split the available data into a training set and a testing set. (70% training, 30% test)\n* Train the algorithm on the first part,\n* make predictions on the second part and \n* evaluate the predictions against the expected results. \nThe size of the split can depend on the size and specifics of your dataset, although it is common to use 67% of the data for training and the remaining 33% for testing.", "from sklearn.model_selection import train_test_split\n\n##Split data set in train 70% and test 30%\nX_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=7)\nX_train.shape, y_train.shape, X_test.shape, y_test.shape", "Feature Standardization\n\n\nStandardization is a useful technique to transform attributes with a Gaussian distribution and differing means and standard deviations to a standard Gaussian distribution with a mean of 0 and a standard deviation of 1. \n\n\nAs seen in NB2_Exploratory data analysis the raw data has differing distributions which may have an impact on the most ML algorithms. Most machine learning and optimization algorithms behave much better if features are on the same scale.\n\n\nLet’s evaluate the same algorithms with a standardized copy of the dataset. Here, I use sklearn to scale and transform the data such that each attribute has a mean value of zero and a standard deviation of one", "from sklearn.preprocessing import StandardScaler\n\n# Normalize the data (center around 0 and scale to remove the variance).\nscaler =StandardScaler()\nXs = scaler.fit_transform(X)", "Feature decomposition using Principal Component Analysis( PCA)\nFrom the pair plot in NB2, lot of feature pairs divide nicely the data to a similar extent, therefore, it makes sense to use one of the dimensionality reduction methods to try to use as many features as possible and maintian as much information as possible when working with only 2 dimensions. I will use PCA", "from sklearn.decomposition import PCA\n# feature extraction\npca = PCA(n_components=10)\nfit = pca.fit(Xs)\n\n# summarize components\n#print(\"Explained Variance: %s\") % fit.explained_variance_ratio_\n#print(fit.components_)\n\nX_pca = pca.transform(Xs)\n\nPCA_df = pd.DataFrame()\n\nPCA_df['PCA_1'] = X_pca[:,0]\nPCA_df['PCA_2'] = X_pca[:,1]\n\nplt.plot(PCA_df['PCA_1'][data.diagnosis == 'M'],PCA_df['PCA_2'][data.diagnosis == 'M'],'o', alpha = 0.7, color = 'r')\nplt.plot(PCA_df['PCA_1'][data.diagnosis == 'B'],PCA_df['PCA_2'][data.diagnosis == 'B'],'o', alpha = 0.7, color = 'b')\n\nplt.xlabel('PCA_1')\nplt.ylabel('PCA_2')\nplt.legend(['Malignant','Benign'])\nplt.show()", "Now, what we got after applying the linear PCA transformation is a lower dimensional subspace (from 3D to 2D in this case), where the samples are “most spread” along the new feature axes.", "#The amount of variance that each PC explains\nvar= pca.explained_variance_ratio_\n#Cumulative Variance explains\n#var1=np.cumsum(np.round(pca.explained_variance_ratio_, decimals=4)*100)\n#print(var1)", "Deciding How Many Principal Components to Retain\nIn order to decide how many principal components should be retained, it is common to summarise the results of a principal components analysis by making a scree plot. More about scree plot can be found here, and hear", "#The amount of variance that each PC explains\nvar= pca.explained_variance_ratio_\n#Cumulative Variance explains\n#var1=np.cumsum(np.round(pca.explained_variance_ratio_, decimals=4)*100)\n#print(var1)\n\nplt.plot(var)\nplt.title('Scree Plot')\nplt.xlabel('Principal Component')\nplt.ylabel('Eigenvalue')\n\nleg = plt.legend(['Eigenvalues from PCA'], loc='best', borderpad=0.3,shadow=False,markerscale=0.4)\nleg.get_frame().set_alpha(0.4)\nleg.draggable(state=True)\nplt.show()", "Observation\nThe most obvious change in slope in the scree plot occurs at component 2, which is the “elbow” of the scree plot. Therefore, it cound be argued based on the basis of the scree plot that the first three components should be retained.\n\nA Summary of the Data Preprocing Approach used here:\n\nassign features to a NumPy array X, and transform the class labels from their original string representation (M and B) into integers\nSplit data into training and test sets\nStandardize the data.\nObtain the Eigenvectors and Eigenvalues from the covariance matrix or correlation matrix\nSort eigenvalues in descending order and choose the kk eigenvectors that correspond to the kk largest eigenvalues where k is the number of dimensions of the new feature subspace (k≤dk≤d).\nConstruct the projection matrix W from the selected k eigenvectors.\nTransform the original dataset X via W to obtain a k-dimensional feature subspace Y.\n\nIt is common to select a subset of features that have the largest correlation with the class labels. The effect of feature selection must be assessed within a complete modeling pipeline in order to give you an unbiased estimated of your model's true performance. Hence, in the next section you will first be introduced to cross-validation, before applying the PCA-based feature selection strategy in the model building pipeline." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
weikang9009/pysal
notebooks/explore/segregation/inference_wrappers_example.ipynb
bsd-3-clause
[ "Inference Wrappers use cases\nThis is an example of the PySAL segregation framework to perform inference on a single value and comparative inference using simulations under the null hypothesis. Once the segregation classes are fitted, the user can perform inference to shed light for statistical significance in regional analysis. Currently, it is possible to make inference for a single measure or for two values of the same measure.\nThe summary of the inference wrappers is presented in the following Table:\n| Inference Type | Class/Function | Function main Inputs | Function Outputs |\n| :----------------- | :------------------- | :------------------------------------------------------: | :----------------------------------: |\n| Single Value | SingleValueTest | seg_class, iterations_under_null, null_approach, two_tailed | p_value, est_sim, statistic |\n| Two Value | TwoValueTest | seg_class_1, seg_class_2, iterations_under_null, null_approach | p_value, est_sim, est_point_diff |\nFirstly let's import the module/functions for the use case:", "%matplotlib inline\n\nimport geopandas as gpd\nfrom pysal.explore import segregation\nimport pysal.lib\nimport pandas as pd\nimport numpy as np\n\nfrom pysal.explore.segregation.inference import SingleValueTest, TwoValueTest", "Then it's time to load some data to estimate segregation. We use the data of 2000 Census Tract Data for the metropolitan area of Sacramento, CA, USA. \nWe use a geopandas dataframe available in PySAL examples repository.\nFor more information about the data: https://github.com/pysal/pysal.lib/tree/master/pysal.lib/examples/sacramento2", "s_map = gpd.read_file(pysal.lib.examples.get_path(\"sacramentot2.shp\"))\ns_map.columns\n\ngdf = s_map[['geometry', 'HISP_', 'TOT_POP']]", "We also can plot the spatial distribution of the composition of the Hispanic population over the tracts of Sacramento:", "gdf['composition'] = gdf['HISP_'] / gdf['TOT_POP']\n\ngdf.plot(column = 'composition',\n cmap = 'OrRd', \n figsize=(20,10),\n legend = True)", "Single Value\nDissimilarity\nThe SingleValueTest function expect to receive a pre-fitted segregation class and then it uses the underlying data to iterate over the null hypothesis and comparing the results with point estimation of the index. Thus, we need to firstly estimate some measure. We can fit the classic Dissimilarity index:", "from pysal.explore.segregation.aspatial import Dissim\nD = Dissim(gdf, 'HISP_', 'TOT_POP')\nD.statistic", "The question that may rise is \"Is this value of 0.32 statistically significant under some pre-specified circumstance?\". To answer this, it is possible to rely on the Infer_Segregation function to generate several values of the same index (in this case the Dissimilarity Index) under the hypothesis and compare them with the one estimated by the dataset of Sacramento. To generate 1000 values assuming evenness, you can run:", "infer_D_eve = SingleValueTest(D, iterations_under_null = 1000, null_approach = \"evenness\", two_tailed = True)", "This class has a quick plotting method to inspect the generated distribution with the estimated value from the sample (vertical red line):", "infer_D_eve.plot()", "It is possible to see that clearly the value of 0.3218 is far-right in the distribution indicating that the hispanic group is, indeed, significantly segregated in terms of the Dissimilarity index under evenness. You can also check the mean value of the distribution using the est_sim attribute which represents all the D draw from the simulations:", "infer_D_eve.est_sim.mean()", "The two-tailed p-value of the following hypothesis test:\n$$H_0: under \\ evenness, \\ Sacramento \\ IS \\ NOT \\ segregated \\ in \\ terms \\ of \\ the \\ Dissimilarity \\ index \\ (D)$$\n$$H_1: under \\ evenness, \\ Sacramento \\ IS \\ segregated \\ in \\ terms \\ of \\ the \\ Dissimilarity \\ index \\ (D)$$\ncan be accessed with the p_value attribute:", "infer_D_eve.p_value", "Therefore, we can conclude that Sacramento is statistically segregated at 5% of significance level (p.value < 5%) in terms of D.\nYou can also test under different approaches for the null hypothesis:", "infer_D_sys = SingleValueTest(D, iterations_under_null = 5000, null_approach = \"systematic\", two_tailed = True)\n\ninfer_D_sys.plot()", "The conclusions are analogous as the evenness approach.\nRelative Concentration\nThe Infer_Segregation wrapper can handle any class of the PySAL segregation module. It is possible to use it in the Relative Concentration (RCO) segregation index:", "from pysal.explore.segregation.spatial import RelativeConcentration\nRCO = RelativeConcentration(gdf, 'HISP_', 'TOT_POP')", "Since RCO is an spatial index (i.e. depends on the spatial context), it makes sense to use the permutation null approach. This approach relies on randomly allocating the sample values over the spatial units and recalculating the chosen index to all iterations.", "infer_RCO_per = SingleValueTest(RCO, iterations_under_null = 1000, null_approach = \"permutation\", two_tailed = True)\n\ninfer_RCO_per.plot()\n\ninfer_RCO_per.p_value", "Analogously, the conclusion for the Relative Concentration index is that Sacramento is not significantly (under 5% of significance, because p-value > 5%) concentrated for the hispanic people.\nAdditionaly, it is possible to combine the null approaches establishing, for example, a permutation along with evenness of the frequency of the Sacramento hispanic group. With this, the conclusion of the Relative Concentration changes.", "infer_RCO_eve_per = SingleValueTest(RCO, iterations_under_null = 1000, null_approach = \"even_permutation\", two_tailed = True)\ninfer_RCO_eve_per.plot()", "Relative Centralization\nUsing the same permutation approach for the Relative Centralization (RCE) segregation index:", "from pysal.explore.segregation.spatial import RelativeCentralization\nRCE = RelativeCentralization(gdf, 'HISP_', 'TOT_POP')\ninfer_RCE_per = SingleValueTest(RCE, iterations_under_null = 1000, null_approach = \"permutation\", two_tailed = True)\n\ninfer_RCE_per.plot()", "The conclusion is that the hispanic group is negatively significantly (as the point estimation is in the left side of the distribution) in terms of centralization. This behavior can be, somehow, inspected in the map as the composition tends to be more concentraded outside of the center of the overall region.\n\nComparative Inference\nTo compare two different values, the user can rely on the TwoValueTest function. Similar to the previous function, the user needs to pass two segregation SM classes to be compared, establish the number of iterations under null hypothesis with iterations_under_null, specify which type of null hypothesis the inference will iterate with null_approach argument and, also, can pass additional parameters for each segregation estimation.\nObs.: in this case, each measure has to be the same class as it would not make much sense to compare, for example, a Gini index with a Delta index\nThis example uses all census data that the user must provide your own copy of the external database.\nA step-by-step procedure for downloading the data can be found here: https://github.com/spatialucr/geosnap/tree/master/geosnap/data.\nAfter the user download the zip files, you must provide the path to these files.", "import os\n#os.chdir('path_to_zipfiles')\n\nimport geosnap\nfrom geosnap.data.data import read_ltdb\n\nsample = \"LTDB_Std_All_Sample.zip\"\nfull = \"LTDB_Std_All_fullcount.zip\"\n\nread_ltdb(sample = sample, fullcount = full)\n\ndf_pre = geosnap.data.db.ltdb\n\ndf_pre.head()", "In this example, we are interested to assess the comparative segregation of the non-hispanic black people in the census tracts of the Riverside, CA, county between 2000 and 2010. Therefore, we extract the desired columns and add some auxiliary variables:", "df = df_pre[['n_nonhisp_black_persons', 'n_total_pop', 'year']]\n\ndf['geoid'] = df.index\ndf['state'] = df['geoid'].str[0:2]\ndf['county'] = df['geoid'].str[2:5]\ndf.head()", "Filtering Riverside County and desired years of the analysis:", "df_riv = df[(df['state'] == '06') & (df['county'] == '065') & (df['year'].isin(['2000', '2010']))]\ndf_riv.head()", "Merging it with desired map.", "map_url = 'https://raw.githubusercontent.com/renanxcortes/inequality-segregation-supplementary-files/master/Tracts_grouped_by_County/06065.json'\nmap_gpd = gpd.read_file(map_url)\ngdf = map_gpd.merge(df_riv, \n left_on = 'GEOID10', \n right_on = 'geoid')[['geometry', 'n_nonhisp_black_persons', 'n_total_pop', 'year']]\n\ngdf['composition'] = np.where(gdf['n_total_pop'] == 0, 0, gdf['n_nonhisp_black_persons'] / gdf['n_total_pop'])\n\ngdf.head()\n\ngdf_2000 = gdf[gdf.year == 2000]\ngdf_2010 = gdf[gdf.year == 2010]", "Map of 2000:", "gdf_2000.plot(column = 'composition',\n cmap = 'OrRd',\n figsize = (30,5),\n legend = True)", "Map of 2010:", "gdf_2010.plot(column = 'composition',\n cmap = 'OrRd',\n figsize = (30,5),\n legend = True)", "A question that may rise is \"Was it more or less segregated than 2000?\". To answer this, we rely on simulations to test the following hypothesis:\n$$H_0: Segregation\\ Measure_{2000} - Segregation\\ Measure_{2010} = 0$$\nComparative Dissimilarity", "D_2000 = Dissim(gdf_2000, 'n_nonhisp_black_persons', 'n_total_pop')\nD_2010 = Dissim(gdf_2010, 'n_nonhisp_black_persons', 'n_total_pop')\nD_2000.statistic - D_2010.statistic", "We can see that Riverside was more segregated in 2000 than in 2010. But, was this point difference statistically significant? We use the random_label approach which consists in random labelling the data between the two periods and recalculating the Dissimilarity statistic (D) in each iteration and comparing it to the original value.", "compare_D_fit = TwoValueTest(D_2000, D_2010, iterations_under_null = 1000, null_approach = \"random_label\")", "The TwoValueTest class also has a plotting method:", "compare_D_fit.plot()", "To access the two-tailed p-value of the test:", "compare_D_fit.p_value", "The conclusion is that, for the Dissimilarity index and 5% of significance, segregation in Riverside was not different between 2000 and 2010 (since p-value > 5%).\nComparative Gini\nAnalogously, the same steps can be made for the Gini segregation index.", "from pysal.explore.segregation.aspatial import GiniSeg\nG_2000 = GiniSeg(gdf_2000, 'n_nonhisp_black_persons', 'n_total_pop')\nG_2010 = GiniSeg(gdf_2010, 'n_nonhisp_black_persons', 'n_total_pop')\ncompare_G_fit = TwoValueTest(G_2000, G_2010, iterations_under_null = 1000, null_approach = \"random_label\")\ncompare_G_fit.plot()", "The absence of significance is also present as the point estimation of the difference (vertical red line) is located in the middle of the distribution of the null hypothesis simulated.\nComparative Spatial Dissimilarity\nAs an example of a spatial index, comparative inference can be performed for the Spatial Dissimilarity Index (SD). For this, we use the counterfactual_composition approach as an example. \nIn this framework, the population of the group of interest in each unit is randomized with a constraint that depends on both cumulative density functions (cdf) of the group of interest composition to the group of interest frequency of each unit. In each unit of each iteration, there is a probability of 50\\% of keeping its original value or swapping to its corresponding value according of the other composition distribution cdf that it is been compared against.", "from pysal.explore.segregation.spatial import SpatialDissim\nSD_2000 = SpatialDissim(gdf_2000, 'n_nonhisp_black_persons', 'n_total_pop')\nSD_2010 = SpatialDissim(gdf_2010, 'n_nonhisp_black_persons', 'n_total_pop')\ncompare_SD_fit = TwoValueTest(SD_2000, SD_2010, iterations_under_null = 500, null_approach = \"counterfactual_composition\")\ncompare_SD_fit.plot()", "The conclusion is that for the Spatial Dissimilarity index under this null approach, the year of 2000 was more segregated than 2010 for the non-hispanic black people in the region under study." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
spennihana/h2o-3
h2o-py/demos/EEG_eyestate_sklearn_NOPASS.ipynb
apache-2.0
[ "Scikit-Learn singalong: EEG Eye State Classification\nAuthor: Kevin Yang\nContact: kyang@h2o.ai\nThis tutorial replicates Erin LeDell's oncology demo using Scikit Learn and Pandas, and is intended to provide a comparison of the syntactical and performance differences between sklearn and H2O implementations of Gradient Boosting Machines. \nWe'll be using Pandas, Numpy and the collections package for most of the data exploration.", "import pandas as pd\nimport numpy as np\nfrom collections import Counter", "Download EEG Data\nThe following code downloads a copy of the EEG Eye State dataset. All data is from one continuous EEG measurement with the Emotiv EEG Neuroheadset. The duration of the measurement was 117 seconds. The eye state was detected via a camera during the EEG measurement and added later manually to the file after analysing the video frames. '1' indicates the eye-closed and '0' the eye-open state. All values are in chronological order with the first measured value at the top of the data.\n\nLet's import the same dataset directly with pandas", "csv_url = \"http://www.stat.berkeley.edu/~ledell/data/eeg_eyestate_splits.csv\"\ndata = pd.read_csv(csv_url)", "Explore Data\nOnce we have loaded the data, let's take a quick look. First the dimension of the frame:", "data.shape\n", "Now let's take a look at the top of the frame:", "data.head()", "The first two columns contain an ID and the response. The \"diagnosis\" column is the response. Let's take a look at the column names. The data contains derived features from the medical images of the tumors.", "data.columns.tolist()", "To select a subset of the columns to look at, typical Pandas indexing applies:", "columns = ['AF3', 'eyeDetection', 'split']\ndata[columns].head(10)", "Now let's select a single column, for example -- the response column, and look at the data more closely:", "data['eyeDetection'].head()", "It looks like a binary response, but let's validate that assumption:", "data['eyeDetection'].unique()", "We can query the categorical \"levels\" as well ('B' and 'M' stand for \"Benign\" and \"Malignant\" diagnosis):", "data['eyeDetection'].nunique()", "Since \"diagnosis\" column is the response we would like to predict, we may want to check if there are any missing values, so let's look for NAs. To figure out which, if any, values are missing, we can use the isna method on the diagnosis column. The columns in an H2O Frame are also H2O Frames themselves, so all the methods that apply to a Frame also apply to a single column.", "data.isnull()\n\ndata['eyeDetection'].isnull()", "The isna method doesn't directly answer the question, \"Does the diagnosis column contain any NAs?\", rather it returns a 0 if that cell is not missing (Is NA? FALSE == 0) and a 1 if it is missing (Is NA? TRUE == 1). So if there are no missing values, then summing over the whole column should produce a summand equal to 0.0. Let's take a look:", "data['eyeDetection'].isnull().sum()", "Great, no missing labels. \nOut of curiosity, let's see if there is any missing data in this frame:", "data.isnull().sum()", "The next thing I may wonder about in a binary classification problem is the distribution of the response in the training data. Is one of the two outcomes under-represented in the training set? Many real datasets have what's called an \"imbalanace\" problem, where one of the classes has far fewer training examples than the other class. Let's take a look at the distribution, both visually and numerically.", "Counter(data['eyeDetection'])", "Ok, the data is not exactly evenly distributed between the two classes -- there are more 0's than 1's in the dataset. However, this level of imbalance shouldn't be much of an issue for the machine learning algos. (We will revisit this later in the modeling section below).\nLet's calculate the percentage that each class represents:", "n = data.shape[0] # Total number of training samples\nnp.array(Counter(data['eyeDetection']).values())/float(n)", "Split H2O Frame into a train and test set\nSo far we have explored the original dataset (all rows). For the machine learning portion of this tutorial, we will break the dataset into three parts: a training set, validation set and a test set.\nIf you want H2O to do the splitting for you, you can use the split_frame method. However, we have explicit splits that we want (for reproducibility reasons), so we can just subset the Frame to get the partitions we want.", "train = data[data['split']==\"train\"]\ntrain.shape\n\nvalid = data[data['split']==\"valid\"]\nvalid.shape\n\ntest = data[data['split']==\"test\"]\ntest.shape", "Machine Learning in H2O\nWe will do a quick demo of the H2O software -- trying to predict eye state (open/closed) from EEG data.\nSpecify the predictor set and response\nThe response, y, is the 'diagnosis' column, and the predictors, x, are all the columns aside from the first two columns ('id' and 'diagnosis').", "y = 'eyeDetection'\nx = data.columns.drop(['eyeDetection','split'])\n", "Split H2O Frame into a train and test set", "from sklearn.ensemble import GradientBoostingClassifier\n\nimport sklearn\n\n\ntest.shape", "Train and Test a GBM model", "model = GradientBoostingClassifier(n_estimators=100,\n max_depth=4,\n learning_rate=0.1)\n\n\nX=train[x].reset_index(drop=True)\ny=train[y].reset_index(drop=True)\n\nmodel.fit(X, y)\n\nprint(model)", "Inspect Model", "model.get_params()", "Model Performance on a Test Set", "from sklearn.metrics import r2_score, roc_auc_score, mean_squared_error\ny_pred = model.predict(X)\n\nr2_score(y_pred, y)\n\nroc_auc_score(y_pred, y)\n\nmean_squared_error(y_pred, y)", "Cross-validated Performance", "from sklearn import cross_validation\n\ncross_validation.cross_val_score(model, X, y, scoring='roc_auc', cv=5)\n\n\ncross_validation.cross_val_score(model, valid[x].reset_index(drop=True), valid['eyeDetection'].reset_index(drop=True), scoring='roc_auc', cv=5)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
nproctor/phys202-2015-work
assignments/assignment04/MatplotlibEx02.ipynb
mit
[ "Matplotlib Exercise 2\nImports", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np", "Exoplanet properties\nOver the past few decades, astronomers have discovered thousands of extrasolar planets. The following paper describes the properties of some of these planets.\nhttp://iopscience.iop.org/1402-4896/2008/T130/014001\nYour job is to reproduce Figures 2 and 4 from this paper using an up-to-date dataset of extrasolar planets found on this GitHub repo:\nhttps://github.com/OpenExoplanetCatalogue/open_exoplanet_catalogue\nA text version of the dataset has already been put into this directory. The top of the file has documentation about each column of data:", "!head -n 30 open_exoplanet_catalogue.txt", "Use np.genfromtxt with a delimiter of ',' to read the data into a NumPy array called data:", "data = np.genfromtxt(\"open_exoplanet_catalogue.txt\", dtype=float, delimiter= ',')\n\nassert data.shape==(1993,24)", "Make a histogram of the distribution of planetary masses. This will reproduce Figure 2 in the original paper.\n\nCustomize your plot to follow Tufte's principles of visualizations.\nCustomize the box, grid, spines and ticks to match the requirements of this data.\nPick the number of bins for the histogram appropriately.", "x = data[:,2]\ny = x[~np.isnan(x)]\nplt.hist(y, 200)\nplt.xlim(0,30)\nax = plt.gca()\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nax.spines['bottom'].set_color('#a2a7ff')\nax.spines['left'].set_color('#a2a7ff')\nplt.xlabel(\"Distibution of Planetary Masses (MJ)\", fontsize = 14, color=\"#383838\")\nplt.ylabel(\"Number of Planets\", fontsize = 14, color=\"#383838\")\nax.tick_params(axis='x', colors='#666666')\nax.tick_params(axis='y', colors='#666666')\nax.get_xaxis().tick_bottom()\nax.get_yaxis().tick_left()\n\n\n\nassert True # leave for grading", "Make a scatter plot of the orbital eccentricity (y) versus the semimajor axis. This will reproduce Figure 4 of the original paper. Use a log scale on the x axis.\n\nCustomize your plot to follow Tufte's principles of visualizations.\nCustomize the box, grid, spines and ticks to match the requirements of this data.", "a = data[:,5]\nb = data[:,6]\nc = np.vstack((a,b))\nd = np.transpose(c)\nnew = np.transpose(d[~np.isnan(d).any(axis=1)])\nex = new[0]\nwhy = new[1]\n\nplt.figure(figsize=(12,5))\nplt.scatter(ex, why)\nplt.xlim(0, 2)\nplt.ylim(-.05, 1.0)\nplt.xlabel(\"Semimajor Axis (AU)\", fontsize = 14, color=\"#383838\")\nplt.ylabel(\"Eccentricity\", fontsize = 14, color=\"#383838\")\nax = plt.gca()\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nax.spines['bottom'].set_color('#a2a7ff')\nax.spines['left'].set_color('#a2a7ff')\nax.tick_params(axis='x', colors='#666666')\nax.tick_params(axis='y', colors='#666666')\nax.get_xaxis().tick_bottom()\nax.get_yaxis().tick_left()\n\n\nassert True # leave for grading\n\n# used \"Joe Kington\"'s (from stackoverflow) method for setting axis tick and label colors\n# used \"timday\"'s (from stackoverflow) method for hiding the top and right axis and ticks" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
xiaoxiaoyao/MyApp
PythonApplication1/deeplearning/examples/.ipynb_checkpoints/gan_pytorch-checkpoint.ipynb
unlicense
[ "用不到 50 行代码训练 GAN(基于 PyTorch\n本文作者为前谷歌高级工程师、AI 初创公司 Wavefront 创始人兼 CTO Dev Nag,介绍了他是如何用不到五十行代码,在 PyTorch 平台上完成对 GAN 的训练。\n什么是 GAN?\n在进入技术层面之前,为照顾新入门的开发者,先来介绍下什么是 GAN。\n2014 年,Ian Goodfellow 和他在蒙特利尔大学的同事发表了一篇震撼学界的论文。没错,我说的就是《Generative Adversarial Nets》,这标志着生成对抗网络(GAN)的诞生,而这是通过对计算图和博弈论的创新性结合。他们的研究展示,给定充分的建模能力,两个博弈模型能够通过简单的反向传播(backpropagation)来协同训练。\n这两个模型的角色定位十分鲜明。给定真实数据集 R,G 是生成器(generator),它的任务是生成能以假乱真的假数据;而 D 是判别器 (discriminator),它从真实数据集或者 G 那里获取数据, 然后做出判别真假的标记。Ian Goodfellow 的比喻是,G 就像一个赝品作坊,想要让做出来的东西尽可能接近真品,蒙混过关。而 D 就是文物鉴定专家,要能区分出真品和高仿(但在这个例子中,造假者 G 看不到原始数据,而只有 D 的鉴定结果——前者是在盲干)。\n理想情况下,D 和 G 都会随着不断训练,做得越来越好——直到 G 基本上成为了一个“赝品制造大师”,而 D 因无法正确区分两种数据分布输给 G。\n实践中,Ian Goodfellow 展示的这项技术在本质上是:G 能够对原始数据集进行一种无监督学习,找到以更低维度的方式(lower-dimensional manner)来表示数据的某种方法。而无监督学习之所以重要,就好像 Yann LeCun 的那句话:“无监督学习是蛋糕的糕体”。这句话中的蛋糕,指的是无数学者、开发者苦苦追寻的“真正的 AI”。\n开始之前,我们需要导入各种包,并且初始化变量", "# Generative Adversarial Networks (GAN) example in PyTorch.\n# See related blog post at https://medium.com/@devnag/generative-adversarial-networks-gans-in-50-lines-of-code-pytorch-e81b79659e3f#.sch4xgsa9\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\n# Data params\ndata_mean = 4\ndata_stddev = 1.25\n\n# Model params\ng_input_size = 1 # Random noise dimension coming into generator, per output vector\ng_hidden_size = 50 # Generator complexity\ng_output_size = 1 # size of generated output vector\nd_input_size = 100 # Minibatch size - cardinality of distributions\nd_hidden_size = 50 # Discriminator complexity\nd_output_size = 1 # Single dimension for 'real' vs. 'fake'\nminibatch_size = d_input_size\n\nd_learning_rate = 2e-4 # 2e-4\ng_learning_rate = 2e-4\noptim_betas = (0.9, 0.999)\nnum_epochs = 33300\nprint_interval = 333\nd_steps = 1 # 'k' steps in the original GAN paper. Can put the discriminator on higher training freq than generator\ng_steps = 1\n\n# ### Uncomment only one of these\n#(name, preprocess, d_input_func) = (\"Raw data\", lambda data: data, lambda x: x)\n(name, preprocess, d_input_func) = (\"Data and variances\", lambda data: decorate_with_diffs(data, 2.0), lambda x: x * 2)\n\nprint(\"Using data [%s]\" % (name))\n", "用 PyTorch 训练 GAN\nDev Nag:在表面上,GAN 这门如此强大、复杂的技术,看起来需要编写天量的代码来执行,但事实未必如此。我们使用 PyTorch,能够在 50 行代码以内创建出简单的 GAN 模型。这之中,其实只有五个部分需要考虑:\n\n\nR:原始、真实数据集\n\n\nI:作为熵的一项来源,进入生成器的随机噪音\n\n\nG:生成器,试图模仿原始数据\n\n\nD:判别器,试图区别 G 的生成数据和 R\n\n\n我们教 G 糊弄 D、教 D 当心 G 的“训练”环。\n1.) R:在我们的例子里,从最简单的 R 着手——贝尔曲线(bell curve)。它把平均数(mean)和标准差(standard deviation)作为输入,然后输出能提供样本数据正确图形(从 Gaussian 用这些参数获得 )的函数。在我们的代码例子中,我们使用 4 的平均数和 1.25 的标准差。", "# ##### DATA: Target data and generator input data\n\ndef get_distribution_sampler(mu, sigma):\n return lambda n: torch.Tensor(np.random.normal(mu, sigma, (1, n))) # Gaussian", "2.) I:生成器的输入是随机的,为提高点难度,我们使用均匀分布(uniform distribution )而非标准分布。这意味着,我们的 Model G 不能简单地改变输入(放大/缩小、平移)来复制 R,而需要用非线性的方式来改造数据。", "\ndef get_generator_input_sampler():\n return lambda m, n: torch.rand(m, n) # Uniform-dist data into generator, _NOT_ Gaussian", "3.) G: 该生成器是个标准的前馈图(feedforward graph)——两层隐层,三个线性映射(linear maps)。我们使用了 ELU (exponential linear unit)。G 将从 I 获得平均分布的数据样本,然后找到某种方式来模仿 R 中标准分布的样本。", "# ##### MODELS: Generator model and discriminator model\n\nclass Generator(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(Generator, self).__init__()\n self.map1 = nn.Linear(input_size, hidden_size)\n self.map2 = nn.Linear(hidden_size, hidden_size)\n self.map3 = nn.Linear(hidden_size, output_size)\n\n def forward(self, x):\n x = F.elu(self.map1(x))\n x = F.sigmoid(self.map2(x))\n return self.map3(x)\n", "4.) D: 判别器的代码和 G 的生成器代码很接近。一个有两层隐层和三个线性映射的前馈图。它会从 R 或 G 那里获得样本,然后输出 0 或 1 的判别值,对应反例和正例。这几乎是神经网络的最弱版本了。", "class Discriminator(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(Discriminator, self).__init__()\n self.map1 = nn.Linear(input_size, hidden_size)\n self.map2 = nn.Linear(hidden_size, hidden_size)\n self.map3 = nn.Linear(hidden_size, output_size)\n\n def forward(self, x):\n x = F.elu(self.map1(x))\n x = F.elu(self.map2(x))\n return F.sigmoid(self.map3(x))\n\n\n# 还有一些其他的样板代码\ndef extract(v):\n return v.data.storage().tolist()\n\ndef stats(d):\n return [np.mean(d), np.std(d)]\n\ndef decorate_with_diffs(data, exponent):\n mean = torch.mean(data.data, 1, keepdim=True)\n mean_broadcast = torch.mul(torch.ones(data.size()), mean.tolist()[0][0])\n diffs = torch.pow(data - Variable(mean_broadcast), exponent)\n return torch.cat([data, diffs], 1)\n\nd_sampler = get_distribution_sampler(data_mean, data_stddev)\ngi_sampler = get_generator_input_sampler()\nG = Generator(input_size=g_input_size, hidden_size=g_hidden_size, output_size=g_output_size)\nD = Discriminator(input_size=d_input_func(d_input_size), hidden_size=d_hidden_size, output_size=d_output_size)\ncriterion = nn.BCELoss() # Binary cross entropy: http://pytorch.org/docs/nn.html#bceloss\nd_optimizer = optim.Adam(D.parameters(), lr=d_learning_rate, betas=optim_betas)\ng_optimizer = optim.Adam(G.parameters(), lr=g_learning_rate, betas=optim_betas)\n", "5.) 最后,训练环在两个模式中变幻:第一步,用被准确标记的真实数据 vs. 假数据训练 D;随后,训练 G 来骗过 D,这里是用的不准确标记。道友们,这是正邪之间的较量。\n即便你从没接触过 PyTorch,大概也能明白发生了什么。在第一部分(for d_index in range(d_steps)循环里),我们让两种类型的数据经过 D,并对 D 的猜测 vs. 真实标记执行不同的评判标准。这是 “forward” 那一步;随后我们需要 “backward()” 来计算梯度,然后把这用来在 d_optimizer step() 中更新 D 的参数。这里,G 被使用但尚未被训练。\n在最后的部分(for g_index in range(g_steps)循环里),我们对 G 执行同样的操作——注意我们要让 G 的输出穿过 D (这其实是送给造假者一个鉴定专家来练手)。但在这一步,我们并不优化、或者改变 D。我们不想让鉴定者 D 学习到错误的标记。因此,我们只执行 g_optimizer.step()。", "for epoch in range(num_epochs):\n for d_index in range(d_steps):\n # 1. Train D on real+fake\n D.zero_grad()\n\n # 1A: Train D on real\n d_real_data = Variable(d_sampler(d_input_size))\n d_real_decision = D(preprocess(d_real_data))\n d_real_error = criterion(d_real_decision, Variable(torch.ones(1))) # ones = true\n d_real_error.backward() # compute/store gradients, but don't change params\n\n # 1B: Train D on fake\n d_gen_input = Variable(gi_sampler(minibatch_size, g_input_size))\n d_fake_data = G(d_gen_input).detach() # detach to avoid training G on these labels\n d_fake_decision = D(preprocess(d_fake_data.t()))\n d_fake_error = criterion(d_fake_decision, Variable(torch.zeros(1))) # zeros = fake\n d_fake_error.backward()\n d_optimizer.step() # Only optimizes D's parameters; changes based on stored gradients from backward()\n\n for g_index in range(g_steps):\n # 2. Train G on D's response (but DO NOT train D on these labels)\n G.zero_grad()\n\n gen_input = Variable(gi_sampler(minibatch_size, g_input_size))\n g_fake_data = G(gen_input)\n dg_fake_decision = D(preprocess(g_fake_data.t()))\n g_error = criterion(dg_fake_decision, Variable(torch.ones(1))) # we want to fool, so pretend it's all genuine\n\n g_error.backward()\n g_optimizer.step() # Only optimizes G's parameters\n\n if epoch % print_interval == 0:\n print(\"epoch: %s : D: %s/%s G: %s (Real: %s, Fake: %s) \" % (epoch,\n extract(d_real_error)[0],\n extract(d_fake_error)[0],\n extract(g_error)[0],\n stats(extract(d_real_data)),\n stats(extract(d_fake_data))))", "在 D 和 G 之间几千轮交手之后,我们会得到什么?判别器 D 会快速改进,而 G 的进展要缓慢许多。但当模型达到一定性能之后,G 才有了个配得上的对手,并开始提升,巨幅提升。" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
rawrgulmuffins/presentation_notes
pycon2016/tutorials/computation_statistics/effect_size.ipynb
mit
[ "Effect Size\nExamples and exercises for a tutorial on statistical inference.\nCopyright 2016 Allen Downey\nLicense: Creative Commons Attribution 4.0 International", "%matplotlib inline\nfrom __future__ import print_function, division\n\nimport numpy\nimport scipy.stats\n\nimport matplotlib.pyplot as pyplot\n\nfrom ipywidgets import interact, interactive, fixed\nimport ipywidgets as widgets\n\n# seed the random number generator so we all get the same results\nnumpy.random.seed(17)\n\n# some nice colors from http://colorbrewer2.org/\nCOLOR1 = '#7fc97f'\nCOLOR2 = '#beaed4'\nCOLOR3 = '#fdc086'\nCOLOR4 = '#ffff99'\nCOLOR5 = '#386cb0'\n\n", "Part One\nTo explore statistics that quantify effect size, we'll look at the difference in height between men and women. I used data from the Behavioral Risk Factor Surveillance System (BRFSS) to estimate the mean and standard deviation of height in cm for adult women and men in the U.S.\nI'll use scipy.stats.norm to represent the distributions. The result is an rv object (which stands for random variable).", "mu1, sig1 = 178, 7.7\nmale_height = scipy.stats.norm(mu1, sig1)\n\nmu2, sig2 = 163, 7.3\nfemale_height = scipy.stats.norm(mu2, sig2)", "The following function evaluates the normal (Gaussian) probability density function (PDF) within 4 standard deviations of the mean. It takes and rv object and returns a pair of NumPy arrays.", "def eval_pdf(rv, num=4):\n mean, std = rv.mean(), rv.std()\n xs = numpy.linspace(mean - num*std, mean + num*std, 100)\n ys = rv.pdf(xs)\n return xs, ys", "Here's what the two distributions look like.", "xs, ys = eval_pdf(male_height)\npyplot.plot(xs, ys, label='male', linewidth=4, color=COLOR2)\n\nxs, ys = eval_pdf(female_height)\npyplot.plot(xs, ys, label='female', linewidth=4, color=COLOR3)\npyplot.xlabel('height (cm)')\nNone", "Let's assume for now that those are the true distributions for the population.\nI'll use rvs to generate random samples from the population distributions. Note that these are totally random, totally representative samples, with no measurement error!", "male_sample = male_height.rvs(1000)\n\nfemale_sample = female_height.rvs(1000)", "Both samples are NumPy arrays. Now we can compute sample statistics like the mean and standard deviation.", "mean1, std1 = male_sample.mean(), male_sample.std()\nmean1, std1", "The sample mean is close to the population mean, but not exact, as expected.", "mean2, std2 = female_sample.mean(), female_sample.std()\nmean2, std2", "And the results are similar for the female sample.\nNow, there are many ways to describe the magnitude of the difference between these distributions. An obvious one is the difference in the means:", "difference_in_means = male_sample.mean() - female_sample.mean()\ndifference_in_means # in cm", "On average, men are 14--15 centimeters taller. For some applications, that would be a good way to describe the difference, but there are a few problems:\n\n\nWithout knowing more about the distributions (like the standard deviations) it's hard to interpret whether a difference like 15 cm is a lot or not.\n\n\nThe magnitude of the difference depends on the units of measure, making it hard to compare across different studies.\n\n\nThere are a number of ways to quantify the difference between distributions. A simple option is to express the difference as a percentage of the mean.\nExercise 1: what is the relative difference in means, expressed as a percentage?", "# Solution goes here\n(male_sample.mean()/ len(male_sample)) - (female_sample.mean() / len(female_sample))", "STOP HERE: We'll regroup and discuss before you move on.\nPart Two\nAn alternative way to express the difference between distributions is to see how much they overlap. To define overlap, we choose a threshold between the two means. The simple threshold is the midpoint between the means:", "simple_thresh = (mean1 + mean2) / 2\nsimple_thresh", "A better, but slightly more complicated threshold is the place where the PDFs cross.", "thresh = (std1 * mean2 + std2 * mean1) / (std1 + std2)\nthresh", "In this example, there's not much difference between the two thresholds.\nNow we can count how many men are below the threshold:", "male_below_thresh = sum(male_sample < thresh)\nmale_below_thresh", "And how many women are above it:", "female_above_thresh = sum(female_sample > thresh)\nfemale_above_thresh", "The \"overlap\" is the total area under the curves that ends up on the wrong side of the threshold.", "overlap = male_below_thresh / len(male_sample) + female_above_thresh / len(female_sample)\noverlap", "Or in more practical terms, you might report the fraction of people who would be misclassified if you tried to use height to guess sex:", "misclassification_rate = overlap / 2\nmisclassification_rate", "Another way to quantify the difference between distributions is what's called \"probability of superiority\", which is a problematic term, but in this context it's the probability that a randomly-chosen man is taller than a randomly-chosen woman.\nExercise 2: Suppose I choose a man and a woman at random. What is the probability that the man is taller?", "# Solution goes here\nsum(male_sample > female_sample) / len(male_sample)", "Overlap (or misclassification rate) and \"probability of superiority\" have two good properties:\n\n\nAs probabilities, they don't depend on units of measure, so they are comparable between studies.\n\n\nThey are expressed in operational terms, so a reader has a sense of what practical effect the difference makes.\n\n\nCohen's d\nThere is one other common way to express the difference between distributions. Cohen's $d$ is the difference in means, standardized by dividing by the standard deviation. Here's a function that computes it:", "def CohenEffectSize(group1, group2):\n \"\"\"Compute Cohen's d.\n\n group1: Series or NumPy array\n group2: Series or NumPy array\n\n returns: float\n \"\"\"\n diff = group1.mean() - group2.mean()\n\n n1, n2 = len(group1), len(group2)\n var1 = group1.var()\n var2 = group2.var()\n\n pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)\n d = diff / numpy.sqrt(pooled_var)\n return d", "Computing the denominator is a little complicated; in fact, people have proposed several ways to do it. This implementation uses the \"pooled standard deviation\", which is a weighted average of the standard deviations of the two groups.\nAnd here's the result for the difference in height between men and women.", "CohenEffectSize(male_sample, female_sample)", "Most people don't have a good sense of how big $d=1.9$ is, so let's make a visualization to get calibrated.\nHere's a function that encapsulates the code we already saw for computing overlap and probability of superiority.", "def overlap_superiority(control, treatment, n=1000):\n \"\"\"Estimates overlap and superiority based on a sample.\n \n control: scipy.stats rv object\n treatment: scipy.stats rv object\n n: sample size\n \"\"\"\n control_sample = control.rvs(n)\n treatment_sample = treatment.rvs(n)\n thresh = (control.mean() + treatment.mean()) / 2\n \n control_above = sum(control_sample > thresh)\n treatment_below = sum(treatment_sample < thresh)\n overlap = (control_above + treatment_below) / n\n \n superiority = sum(x > y for x, y in zip(treatment_sample, control_sample)) / n\n return overlap, superiority", "Here's the function that takes Cohen's $d$, plots normal distributions with the given effect size, and prints their overlap and superiority.", "def plot_pdfs(cohen_d=2):\n \"\"\"Plot PDFs for distributions that differ by some number of stds.\n \n cohen_d: number of standard deviations between the means\n \"\"\"\n control = scipy.stats.norm(0, 1)\n treatment = scipy.stats.norm(cohen_d, 1)\n xs, ys = eval_pdf(control)\n pyplot.fill_between(xs, ys, label='control', color=COLOR3, alpha=0.7)\n\n xs, ys = eval_pdf(treatment)\n pyplot.fill_between(xs, ys, label='treatment', color=COLOR2, alpha=0.7)\n \n o, s = overlap_superiority(control, treatment)\n print('overlap', o)\n print('superiority', s)", "Here's an example that demonstrates the function:", "plot_pdfs(2)", "And an interactive widget you can use to visualize what different values of $d$ mean:", "slider = widgets.FloatSlider(min=0, max=4, value=2)\ninteract(plot_pdfs, cohen_d=slider)\nNone", "Cohen's $d$ has a few nice properties:\n\n\nBecause mean and standard deviation have the same units, their ratio is dimensionless, so we can compare $d$ across different studies.\n\n\nIn fields that commonly use $d$, people are calibrated to know what values should be considered big, surprising, or important.\n\n\nGiven $d$ (and the assumption that the distributions are normal), you can compute overlap, superiority, and related statistics.\n\n\nIn summary, the best way to report effect size often depends on the audience and your goals. There is often a tradeoff between summary statistics that have good technical properties and statistics that are meaningful to a general audience." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jmschrei/pomegranate
benchmarks/pomegranate_vs_sklearn_naive_bayes.ipynb
mit
[ "pomegranate / sklearn Naive Bayes comparison\nauthors: <br>\nNicholas Farn (nicholasfarn@gmail.com) <br>\nJacob Schreiber (jmschreiber91@gmail.com)\n<a href=\"https://github.com/scikit-learn/scikit-learn\">sklearn</a> is a very popular machine learning package for Python which implements a wide variety of classical machine learning algorithms. In this notebook we benchmark the Naive Bayes implementations in pomegranate and compare it to the implementation in sklearn.", "%pylab inline\nimport seaborn, time\nseaborn.set_style('whitegrid')\n\nfrom sklearn.naive_bayes import GaussianNB\nfrom pomegranate import *", "Lets first define a function which will create a dataset to train on. We want to be able to test a range of datasets, from very small to very large, to see which implementation is faster. We also want a function which will take in the models and evaluate them. Lets define both of those now.", "def create_dataset(n_samples, n_dim, n_classes):\n \"\"\"Create a random dataset with n_samples in each class.\"\"\"\n \n X = numpy.concatenate([numpy.random.randn(n_samples, n_dim) + i for i in range(n_classes)])\n y = numpy.concatenate([numpy.zeros(n_samples) + i for i in range(n_classes)])\n return X, y\n\ndef plot(fit, predict, skl_error, pom_error, sizes, xlabel):\n \"\"\"Plot the results.\"\"\"\n \n idx = numpy.arange(fit.shape[1])\n \n plt.figure(figsize=(14, 4))\n plt.plot(fit.mean(axis=0), c='c', label=\"Fitting\")\n plt.plot(predict.mean(axis=0), c='m', label=\"Prediction\")\n plt.plot([0, fit.shape[1]], [1, 1], c='k', label=\"Baseline\")\n \n plt.fill_between(idx, fit.min(axis=0), fit.max(axis=0), color='c', alpha=0.3)\n plt.fill_between(idx, predict.min(axis=0), predict.max(axis=0), color='m', alpha=0.3)\n \n plt.xticks(idx, sizes, rotation=65, fontsize=14)\n plt.xlabel('{}'.format(xlabel), fontsize=14)\n plt.ylabel('pomegranate is x times faster', fontsize=14)\n plt.legend(fontsize=12, loc=4)\n plt.show()\n \n \n plt.figure(figsize=(14, 4))\n plt.plot(1 - skl_error.mean(axis=0), alpha=0.5, c='c', label=\"sklearn accuracy\")\n plt.plot(1 - pom_error.mean(axis=0), alpha=0.5, c='m', label=\"pomegranate accuracy\")\n \n plt.fill_between(idx, 1-skl_error.min(axis=0), 1-skl_error.max(axis=0), color='c', alpha=0.3)\n plt.fill_between(idx, 1-pom_error.min(axis=0), 1-pom_error.max(axis=0), color='m', alpha=0.3)\n \n plt.xticks(idx, sizes, rotation=65, fontsize=14)\n plt.xlabel('{}'.format(xlabel), fontsize=14)\n plt.ylabel('Accuracy', fontsize=14)\n plt.legend(fontsize=14) \n plt.show()", "Lets look first at single dimension Gaussian datasets. We'll look at how many times faster pomegranate is, which means that values > 1 show pomegranate is faster and < 1 show pomegranate is slower. Lets also look at the accuracy of both algorithms. They should have the same accuracy since they implement the same algorithm.", "sizes = numpy.around(numpy.exp(numpy.arange(8, 16))).astype('int')\nn, m = sizes.shape[0], 20\n\nskl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))\nskl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))\nskl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))\n\nfor i in range(m):\n for j, size in enumerate(sizes):\n X, y = create_dataset(size, 1, 2)\n\n # bench fit times\n tic = time.time()\n skl = GaussianNB()\n skl.fit(X, y)\n skl_fit[i, j] = time.time() - tic\n\n tic = time.time()\n pom = NaiveBayes.from_samples(NormalDistribution, X, y)\n pom_fit[i, j] = time.time() - tic\n\n # bench predict times\n tic = time.time()\n skl_predictions = skl.predict(X)\n skl_predict[i, j] = time.time() - tic\n\n tic = time.time()\n pom_predictions = pom.predict(X)\n pom_predict[i, j] = time.time() - tic\n\n # check number wrong\n skl_e = (y != skl_predictions).mean()\n pom_e = (y != pom_predictions).mean()\n\n skl_error[i, j] = min(skl_e, 1-skl_e)\n pom_error[i, j] = min(pom_e, 1-pom_e)\n\nfit = skl_fit / pom_fit\npredict = skl_predict / pom_predict\n\nplot(fit, predict, skl_error, pom_error, sizes, \"samples per component\")", "It looks as if pomegranate is approximately the same speed for training small models but that the prediction time can be a lot faster in pomegranate than in sklearn.\nNow let's take a look at how speeds change as we increase the number of classes that need to be predicted rather than phrasing all of the comparisons on binary classification.", "sizes = numpy.arange(2, 21).astype('int')\nn, m = sizes.shape[0], 20\n\nskl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))\nskl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))\nskl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))\n\nfor i in range(m):\n for j, size in enumerate(sizes):\n X, y = create_dataset(50000 // size, 1, size)\n\n # bench fit times\n tic = time.time()\n skl = GaussianNB()\n skl.fit(X, y)\n skl_fit[i, j] = time.time() - tic\n\n tic = time.time()\n pom = NaiveBayes.from_samples(NormalDistribution, X, y)\n pom_fit[i, j] = time.time() - tic\n\n # bench predict times\n tic = time.time()\n skl_predictions = skl.predict(X)\n skl_predict[i, j] = time.time() - tic\n\n tic = time.time()\n pom_predictions = pom.predict(X)\n pom_predict[i, j] = time.time() - tic\n\n # check number wrong\n skl_e = (y != skl_predictions).mean()\n pom_e = (y != pom_predictions).mean()\n\n skl_error[i, j] = min(skl_e, 1-skl_e)\n pom_error[i, j] = min(pom_e, 1-pom_e)\n\nfit = skl_fit / pom_fit\npredict = skl_predict / pom_predict\n\nplot(fit, predict, skl_error, pom_error, sizes, \"number of classes\")", "It looks like, again, pomegranate is around the same speed as sklearn for fitting models, but that it is consistently much faster to make predictions.", "X, y = create_dataset(50000, 1, 2)\nskl = GaussianNB()\nskl.fit(X, y)\n\npom = NaiveBayes.from_samples(NormalDistribution, X, y)\n\n%timeit skl.predict(X)\n%timeit pom.predict(X)", "This does show that pomegranate is faster at making predictions but that both are so fast that potentially it doesn't really matter.\nWhile it's good to start off by looking at naive Bayes' models defined on single features, the more common setting is one where you have many features. Let's look take a look at the relative speeds on larger number of examples when there are 5 features rather than a single one.", "sizes = numpy.around(numpy.exp(numpy.arange(8, 16))).astype('int')\nn, m = sizes.shape[0], 20\n\nskl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))\nskl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))\nskl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))\n\nfor i in range(m):\n for j, size in enumerate(sizes):\n X, y = create_dataset(size, 5, 2)\n\n # bench fit times\n tic = time.time()\n skl = GaussianNB()\n skl.fit(X, y)\n skl_fit[i, j] = time.time() - tic\n\n tic = time.time()\n pom = NaiveBayes.from_samples(NormalDistribution, X, y)\n pom_fit[i, j] = time.time() - tic\n\n # bench predict times\n tic = time.time()\n skl_predictions = skl.predict(X)\n skl_predict[i, j] = time.time() - tic\n\n tic = time.time()\n pom_predictions = pom.predict(X)\n pom_predict[i, j] = time.time() - tic\n\n # check number wrong\n skl_e = (y != skl_predictions).mean()\n pom_e = (y != pom_predictions).mean()\n\n skl_error[i, j] = min(skl_e, 1-skl_e)\n pom_error[i, j] = min(pom_e, 1-pom_e)\n\nfit = skl_fit / pom_fit\npredict = skl_predict / pom_predict\n\nplot(fit, predict, skl_error, pom_error, sizes, \"samples per component\")", "It looks like pomegranate can be around twice as fast at fitting multivariate Gaussian Naive Bayes models than sklearn when there is more than one feature.\nFinally lets show an increasing number of dimensions with a fixed set of 10 classes and 50,000 samples per class.", "sizes = numpy.arange(5, 101, 5).astype('int')\nn, m = sizes.shape[0], 20\n\nskl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))\nskl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))\nskl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))\n\nfor i in range(m):\n for j, size in enumerate(sizes):\n X, y = create_dataset(50000, size, 2)\n\n # bench fit times\n tic = time.time()\n skl = GaussianNB()\n skl.fit(X, y)\n skl_fit[i, j] = time.time() - tic\n\n tic = time.time()\n pom = NaiveBayes.from_samples(NormalDistribution, X, y)\n pom_fit[i, j] = time.time() - tic\n\n # bench predict times\n tic = time.time()\n skl_predictions = skl.predict(X)\n skl_predict[i, j] = time.time() - tic\n\n tic = time.time()\n pom_predictions = pom.predict(X)\n pom_predict[i, j] = time.time() - tic\n\n # check number wrong\n skl_e = (y != skl_predictions).mean()\n pom_e = (y != pom_predictions).mean()\n\n skl_error[i, j] = min(skl_e, 1-skl_e)\n pom_error[i, j] = min(pom_e, 1-pom_e)\n\nfit = skl_fit / pom_fit\npredict = skl_predict / pom_predict\n\nplot(fit, predict, skl_error, pom_error, sizes, \"dimensions\")", "Looks like pomegranate is consistently faster than sklearn at fitting the model but conveges to be approximately the same speed at making predictions in the high dimensional setting. Their accuracies remain identical indicating that the two are learning the same model.\nOut of Core Training\nLastly, both pomegranate and sklearn allow for out of core training by fitting on chunks of a dataset. pomegranate does this by calculating summary statistics on the dataset which are enough to allow for exact parameter updates to be done. sklearn implements this using the model.partial_fit(X, y) API call, whereas pomegranate uses model.summarize(X, y) followed by model.from_summaries() to update the internal parameters. \nLets compare how long each method takes to train on 25 batches of increasing sizes and the accuracy of both methods.", "sizes = numpy.around( numpy.exp( numpy.arange(8, 16) ) ).astype('int')\nn, m = sizes.shape[0], 20\n\nskl_time, pom_time = numpy.zeros((m, n)), numpy.zeros((m, n))\nskl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))\n\nfor i in range(m):\n for j, size in enumerate(sizes):\n skl = GaussianNB()\n pom = NaiveBayes([IndependentComponentsDistribution([NormalDistribution(0, 1) for i in range(5)]),\n IndependentComponentsDistribution([NormalDistribution(0, 1) for i in range(5)])])\n \n for l in range(5):\n X, y = create_dataset(size, 5, 2)\n\n tic = time.time()\n skl.partial_fit(X, y, classes=[0, 1])\n skl_time[i, j] += time.time() - tic\n\n tic = time.time()\n pom.summarize( X, y )\n pom_time[i, j] += time.time() - tic\n\n tic = time.time()\n pom.from_summaries()\n pom_time[i, j] += time.time() - tic\n\n skl_predictions = skl.predict( X )\n pom_predictions = pom.predict( X )\n\n skl_error[i, j] = ( y != skl_predictions ).mean()\n pom_error[i, j] = ( y != pom_predictions ).mean()\n\nfit = skl_time / pom_time\nidx = numpy.arange(fit.shape[1])\n\nplt.figure( figsize=(14, 4))\nplt.plot( fit.mean(axis=0), c='c', label=\"Fitting\")\nplt.plot( [0, fit.shape[1]], [1, 1], c='k', label=\"Baseline\" )\nplt.fill_between( idx, fit.min(axis=0), fit.max(axis=0), color='c', alpha=0.3 )\n\nplt.xticks(idx, sizes, rotation=65, fontsize=14)\nplt.xlabel('{}'.format(xlabel), fontsize=14)\nplt.ylabel('pomegranate is x times faster', fontsize=14)\nplt.legend(fontsize=12, loc=4)\nplt.show()\n\nplt.figure( figsize=(14, 4))\nplt.plot( 1 - skl_error.mean(axis=0), alpha=0.5, c='c', label=\"sklearn accuracy\" )\nplt.plot( 1 - pom_error.mean(axis=0), alpha=0.5, c='m', label=\"pomegranate accuracy\" )\n\nplt.fill_between( idx, 1-skl_error.min(axis=0), 1-skl_error.max(axis=0), color='c', alpha=0.3 )\nplt.fill_between( idx, 1-pom_error.min(axis=0), 1-pom_error.max(axis=0), color='m', alpha=0.3 )\n\nplt.xticks( idx, sizes, rotation=65, fontsize=14)\nplt.xlabel('Batch Size', fontsize=14)\nplt.ylabel('Accuracy', fontsize=14)\nplt.legend(fontsize=14) \nplt.show()", "pomegranate seems to be much faster at doing out-of-core training. The out of core API of calculating sufficient statistics using summarize and then updating the model parameters using from_summaries extends to all models in pomegranate. \nIn this notebook we compared an intersection of the features that pomegranate and sklearn offer. pomegranate allows you to use Naive Bayes with any distribution or model object which has an exposed log_probability and fit method. This allows you to do things such as compare hidden Markov models to each other, or compare a hidden Markov model to a Markov Chain to see which one models the data better. \nWe hope this has been useful to you! If you're interested in using pomegranate, you can get it using pip install pomegranate or by checking out the <a href=\"https://github.com/jmschrei/pomegranate\">github repo.</a>" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
kbennion/foundations-hw
07-notebook-and-data/Homework7.ipynb
mit
[ "import pandas as pd\n\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\ndf = pd.read_csv(\"07-hw-animals.csv\")\nprint(df)\n\nprint(df.columns.values)\n\nprint(df['animal'])\n\nprint(df[:3])\n\nprint(df)\n\nprint(df.sort_values(by='length', ascending=0)[:3])\n\nprint(df['animal'])\n\nprint(df['animal'].value_counts())\n\ndogs = df[df['animal'] == 'dog']\ndogs\n\ndf[df['length'] > 40]\n\ndf['inches'] = df['length'] * .394\ndf\n\ncats = df[df['animal'] == 'cat']\ncats\n\ndogs = df[df['animal'] == 'dog']\ndogs\n\ncats[cats['inches'] > 12]\n\ndf[df['inches'] > 12]\ndf[df['animal'] == 'cat']\n#another way: df[(df['animal'] == 'cat') & (df['inches'] > 12)]\n\ncats['length'].mean()\n#see also: cats['length'].describe()\n\ndogs['length'].mean()\n\ndf.groupby('animal')['length'].mean()\n\ndogs['length'].hist()\n\ndogs.plot(kind='scatter', x='length', y='inches')\n\ndf.plot(kind='barh', x='name', y='length', legend=False)\n\nsortcats = (cats.sort_values(by='length', ascending=0))\nsortcats.plot(kind='barh', x='name', y='length', legend=False, sort_columns=False)\n#alternately! df[df['animal'] == 'cat'].sort_values(by='length').plot(kind='barh', x='name', y='length')\n\ncats\n\nimport pandas as pd\ndf = pd.read_excel(\"richpeople.xlsx\")", "What country are most billionaires from? For the top ones, how many billionaires per billion people?\nWho are the top 10 richest billionaires?\nWhat's the average wealth of a billionaire? Male? Female?\nWho is the poorest billionaire? Who are the top 10 poorest billionaires?\n'What is relationship to company'? And what are the most common relationships?\nMost common source of wealth? Male vs. female?\nGiven the richest person in a country, what % of the GDP is their wealth?\nAdd up the wealth of all of the billionaires in a given country (or a few countries) and then compare it to the GDP of the country, or other billionaires, so like pit the US vs India\nWhat are the most common industries for billionaires to come from? What's the total amount of billionaire money from each industry?\nHow many self made billionaires vs. others?\nHow old are billionaires? How old are billionaires self made vs. non self made? or different industries?\nWho are the youngest billionaires? The oldest? Age distribution - maybe make a graph about it?\nMaybe just made a graph about how wealthy they are in general?\nMaybe plot their net worth vs age (scatterplot)\nMake a bar graph of the top 10 or 20 richest\nHow many female billionaires are there compared to male? What industries are they from? What is their average wealth?", "import matplotlib.pyplot as plt\n%matplotlib inline\n\nprint(df['gender'].value_counts())\n\ndf.groupby('gender')['networthusbillion'].mean()\n\ndf.groupby('gender')['sourceofwealth'].value_counts()", "Some examples from the review", "df.columns.values\ndf['countrycode'].value_counts()\n\ndf.sort_values(by='networthusbillion', ascending=False).head(10)\n\n#Who is the poorest billionaire? Top ten poorest?\ndf.sort_values(by='rank', ascending=False).head(2)\n\ndf[df['networthusbillion'] == 1]\n\ndf['networthusbillion'].describe()\n\ndf.groupby(\"gender\")[\"networthusbillion\"].describe()\n\n#Adding the wealt of all the billionaires in a given country.\ndf.groupby('countrycode')['networthusbillion'].sum().sort_values(ascending=False)\n\n#What are the most common industries for billionaires to come from?\ndf['industry'].value_counts()\n\ndf.groupby('industry')['networthusbillion'].sum()\n\nyoung_bills = df[df['age'] < 40]\nyoung_bills.plot(kind='barh', x='name', y='networthusbillion').sort_values()\n\nimport pandas as pd\nimport matplotlib.pplot as plt\nplt.style.use(\"ggplot\")\n%matplot" ]
[ "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/miroc/cmip6/models/nicam16-7s/seaice.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Seaice\nMIP Era: CMIP6\nInstitute: MIROC\nSource ID: NICAM16-7S\nTopic: Seaice\nSub-Topics: Dynamics, Thermodynamics, Radiative Processes. \nProperties: 80 (63 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-20 15:02:40\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'miroc', 'nicam16-7s', 'seaice')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties --&gt; Model\n2. Key Properties --&gt; Variables\n3. Key Properties --&gt; Seawater Properties\n4. Key Properties --&gt; Resolution\n5. Key Properties --&gt; Tuning Applied\n6. Key Properties --&gt; Key Parameter Values\n7. Key Properties --&gt; Assumptions\n8. Key Properties --&gt; Conservation\n9. Grid --&gt; Discretisation --&gt; Horizontal\n10. Grid --&gt; Discretisation --&gt; Vertical\n11. Grid --&gt; Seaice Categories\n12. Grid --&gt; Snow On Seaice\n13. Dynamics\n14. Thermodynamics --&gt; Energy\n15. Thermodynamics --&gt; Mass\n16. Thermodynamics --&gt; Salt\n17. Thermodynamics --&gt; Salt --&gt; Mass Transport\n18. Thermodynamics --&gt; Salt --&gt; Thermodynamics\n19. Thermodynamics --&gt; Ice Thickness Distribution\n20. Thermodynamics --&gt; Ice Floe Size Distribution\n21. Thermodynamics --&gt; Melt Ponds\n22. Thermodynamics --&gt; Snow Processes\n23. Radiative Processes \n1. Key Properties --&gt; Model\nName of seaice model used.\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of sea ice model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Variables\nList of prognostic variable in the sea ice model.\n2.1. Prognostic\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of prognostic variables in the sea ice component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.variables.prognostic') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sea ice temperature\" \n# \"Sea ice concentration\" \n# \"Sea ice thickness\" \n# \"Sea ice volume per grid cell area\" \n# \"Sea ice u-velocity\" \n# \"Sea ice v-velocity\" \n# \"Sea ice enthalpy\" \n# \"Internal ice stress\" \n# \"Salinity\" \n# \"Snow temperature\" \n# \"Snow depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Seawater Properties\nProperties of seawater relevant to sea ice\n3.1. Ocean Freezing Point\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS-10\" \n# \"Constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Ocean Freezing Point Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant seawater freezing point, specify this value.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Resolution\nResolution of the sea ice grid\n4.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Canonical Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Number Of Horizontal Gridpoints\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Tuning Applied\nTuning applied to sea ice model component\n5.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Target\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Simulations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\n*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.4. Metrics Used\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList any observed metrics used in tuning model/parameters", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.5. Variables\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nWhich variables were changed during the tuning process?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Key Parameter Values\nValues of key parameters\n6.1. Typical Parameters\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nWhat values were specificed for the following parameters if used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ice strength (P*) in units of N m{-2}\" \n# \"Snow conductivity (ks) in units of W m{-1} K{-1} \" \n# \"Minimum thickness of ice created in leads (h0) in units of m\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.2. Additional Parameters\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Assumptions\nAssumptions made in the sea ice model\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral overview description of any key assumptions made in this model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.description') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. On Diagnostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nNote any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Missing Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Key Properties --&gt; Conservation\nConservation in the sea ice component\n8.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nProvide a general description of conservation methodology.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Properties\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProperties conserved in sea ice by the numerical schemes.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.properties') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Mass\" \n# \"Salt\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Budget\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nFor each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.4. Was Flux Correction Used\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes conservation involved flux correction?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "8.5. Corrected Conserved Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList any variables which are conserved by more than the numerical scheme alone.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Grid --&gt; Discretisation --&gt; Horizontal\nSea ice discretisation in the horizontal\n9.1. Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGrid on which sea ice is horizontal discretised?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ocean grid\" \n# \"Atmosphere Grid\" \n# \"Own Grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.2. Grid Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the type of sea ice grid?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Structured grid\" \n# \"Unstructured grid\" \n# \"Adaptive grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.3. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the advection scheme?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite differences\" \n# \"Finite elements\" \n# \"Finite volumes\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.4. Thermodynamics Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the time step in the sea ice model thermodynamic component in seconds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "9.5. Dynamics Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the time step in the sea ice model dynamic component in seconds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "9.6. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional horizontal discretisation details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Grid --&gt; Discretisation --&gt; Vertical\nSea ice vertical properties\n10.1. Layering\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhat type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Zero-layer\" \n# \"Two-layers\" \n# \"Multi-layers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.2. Number Of Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using multi-layers specify how many.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "10.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional vertical grid details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Grid --&gt; Seaice Categories\nWhat method is used to represent sea ice categories ?\n11.1. Has Mulitple Categories\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSet to true if the sea ice model has multiple sea ice categories.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "11.2. Number Of Categories\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using sea ice categories specify how many.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Category Limits\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using sea ice categories specify each of the category limits.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.4. Ice Thickness Distribution Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the sea ice thickness distribution scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.5. Other\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.other') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12. Grid --&gt; Snow On Seaice\nSnow on sea ice details\n12.1. Has Snow On Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs snow on ice represented in this model?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Number Of Snow Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of vertical levels of snow on ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12.3. Snow Fraction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how the snow fraction on sea ice is determined", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.4. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional details related to snow on ice.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Dynamics\nSea Ice Dynamics\n13.1. Horizontal Transport\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of horizontal advection of sea ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.horizontal_transport') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Transport In Thickness Space\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of sea ice transport in thickness space (i.e. in thickness categories)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.3. Ice Strength Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhich method of sea ice strength formulation is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Hibler 1979\" \n# \"Rothrock 1975\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.4. Redistribution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhich processes can redistribute sea ice (including thickness)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.redistribution') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Rafting\" \n# \"Ridging\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.5. Rheology\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRheology, what is the ice deformation formulation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.rheology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Free-drift\" \n# \"Mohr-Coloumb\" \n# \"Visco-plastic\" \n# \"Elastic-visco-plastic\" \n# \"Elastic-anisotropic-plastic\" \n# \"Granular\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Thermodynamics --&gt; Energy\nProcesses related to energy in sea ice thermodynamics\n14.1. Enthalpy Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the energy formulation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice latent heat (Semtner 0-layer)\" \n# \"Pure ice latent and sensible heat\" \n# \"Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)\" \n# \"Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.2. Thermal Conductivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat type of thermal conductivity is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice\" \n# \"Saline ice\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.3. Heat Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of heat diffusion?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Conduction fluxes\" \n# \"Conduction and radiation heat fluxes\" \n# \"Conduction, radiation and latent heat transport\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.4. Basal Heat Flux\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod by which basal ocean heat flux is handled?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Heat Reservoir\" \n# \"Thermal Fixed Salinity\" \n# \"Thermal Varying Salinity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.5. Fixed Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.6. Heat Content Of Precipitation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method by which the heat content of precipitation is handled.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.7. Precipitation Effects On Salinity\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15. Thermodynamics --&gt; Mass\nProcesses related to mass in sea ice thermodynamics\n15.1. New Ice Formation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method by which new sea ice is formed in open water.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Ice Vertical Growth And Melt\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method that governs the vertical growth and melt of sea ice.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.3. Ice Lateral Melting\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of sea ice lateral melting?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Floe-size dependent (Bitz et al 2001)\" \n# \"Virtual thin ice melting (for single-category)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.4. Ice Surface Sublimation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method that governs sea ice surface sublimation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.5. Frazil Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method of frazil ice formation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16. Thermodynamics --&gt; Salt\nProcesses related to salt in sea ice thermodynamics.\n16.1. Has Multiple Sea Ice Salinities\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "16.2. Sea Ice Salinity Thermal Impacts\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes sea ice salinity impact the thermal properties of sea ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17. Thermodynamics --&gt; Salt --&gt; Mass Transport\nMass transport of salt\n17.1. Salinity Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is salinity determined in the mass transport of salt calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.2. Constant Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "17.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the salinity profile used.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Thermodynamics --&gt; Salt --&gt; Thermodynamics\nSalt thermodynamics\n18.1. Salinity Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is salinity determined in the thermodynamic calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.2. Constant Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "18.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the salinity profile used.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19. Thermodynamics --&gt; Ice Thickness Distribution\nIce thickness distribution details.\n19.1. Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is the sea ice thickness distribution represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Virtual (enhancement of thermal conductivity, thin ice melting)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20. Thermodynamics --&gt; Ice Floe Size Distribution\nIce floe-size distribution details.\n20.1. Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is the sea ice floe-size represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Parameterised\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20.2. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nPlease provide further details on any parameterisation of floe-size.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21. Thermodynamics --&gt; Melt Ponds\nCharacteristics of melt ponds.\n21.1. Are Included\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre melt ponds included in the sea ice model?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "21.2. Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat method of melt pond formulation is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flocco and Feltham (2010)\" \n# \"Level-ice melt ponds\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "21.3. Impacts\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhat do melt ponds have an impact on?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Albedo\" \n# \"Freshwater\" \n# \"Heat\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22. Thermodynamics --&gt; Snow Processes\nThermodynamic processes in snow on sea ice\n22.1. Has Snow Aging\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSet to True if the sea ice model has a snow aging scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.2. Snow Aging Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow aging scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.3. Has Snow Ice Formation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSet to True if the sea ice model has snow ice formation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.4. Snow Ice Formation Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow ice formation scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.5. Redistribution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the impact of ridging on snow cover?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.6. Heat Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the heat diffusion through snow methodology in sea ice thermodynamics?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Single-layered heat diffusion\" \n# \"Multi-layered heat diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23. Radiative Processes\nSea Ice Radiative Processes\n23.1. Surface Albedo\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod used to handle surface albedo.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Parameterized\" \n# \"Multi-band albedo\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. Ice Radiation Transmission\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod by which solar radiation through sea ice is handled.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Exponential attenuation\" \n# \"Ice radiation transmission per category\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
setiQuest/ML4SETI
results/effsubsee_seti_code_challenge_1stPlace.ipynb
apache-2.0
[ "ML4SETI Code Challenge Winning Model\nThis notebook shows you how to run the winning model from the ML4SETI code challenge; a public code challenge issued by IBM and the SETI Insititute in the summer of 2017. The challenge was to build the best signal classification model from a set of simulated (and labeled) radio-telescope data files. These time-series simulated measurements, much like the real data acquired by the SETI Institute during observations at the Allen Telescope Array, were converted to spectrograms, represented as 2D images, and used to train various machine-learning models. \nThe 1st place team, Effsubsee, achieved a classification accuracy of 94.9% used \"an averaged ensemble of 5 Wide Residual Networks, trained on different sets of 4(/5) folds, each with a depth of 34 (convolutional layers) and a widening factor of 2.\" (NB: Effsubsee, is $F_{c}$, from the Drake Equation, which represents \"The fraction of civilizations that develop a technology that releases detectable signs of their existence into space.\")\nThe code below will install the necessary Python packages, Effsubsee's model, and demonstrate how to use that model to classify a simulated data file from one of the test sets. \n<br>\nInstall Packages", "# Uncomment and run this one time only\n\n# !pip install http://download.pytorch.org/whl/cu75/torch-0.1.12.post2-cp27-none-linux_x86_64.whl\n# !pip install torchvision==0.1.8\n# !pip install tabulate\n# !pip install --upgrade scikit-learn\n# !pip install --upgrade numpy\n# !pip install h5py\n# !pip install ibmseti\n# !pip install tqdm\n# !pip install --upgrade pandas", "<br>\nDownload Effsubsee's model\nModel stored in IBM Object Storage\nThe parameters for our models have been placed in an IBM Cloud Object Storage service instance. The Access Control Lists for the containers in Object Storage have been set such that the objects in those containers are publicly available.", "# Uncomment and run this one time only!\n\n# from __future__ import print_function\n# import requests\n# import shutil\n\n# base_url = 'https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/code_challenge_models/effsubsee'\n# for i in range(1,6):\n# r = requests.get('{0}/fold{1}/FOLD{1}_BEST_wresnet34x2_batchsize96_checkpoint.pth.tar'.format(base_url, i), stream=True)\n# filename = 'effsubsee_FOLD{}_BEST_wresnet34x2_batchsize96_checkpoint.pth.tar'.format(i)\n# with open(filename, 'wb') as fout:\n# shutil.copyfileobj(r.raw, fout)\n# print('saved {}'.format(filename))\n\n# Uncomment and run this once\n\n# !wget -O mean_stddev_primary_full_v3__384t__512f__logmod2-ph.hdf5 https://github.com/sgrvinod/ml4seti-Effsubsee/blob/master/folds/mean_stddev_primary_full_v3__384t__512f__logmod2-ph.hdf5?raw=true", "<br>\nDownload the Preview Test Set", "# Uncomment and run this one time only\n\n# !wget https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_v3_zipped/primary_testset_preview_v3.zip\n# !unzip -q primary_testset_preview_v3.zip\n# !ls", "<br>\nRestart Your Kernel\nAfter you've pip installed the packages above, you'll need to restart your kernel. \n\nComment out the code in the cells above (within a cell you drag and select the lines of code then press Command+'/', or Ctrl+'/', to comment and uncomment entire blocks of code)\nIn the menu above select Kernel -> Restart.\nRun the cells below\n\nAdapted from https://github.com/sgrvinod/ml4seti-Effsubsee\nThis code, for now, is found in https://github.com/gadamc/ml4seti-Effsubsee/", "import math\nfrom torch import nn\n\n\nclass BasicBlock(nn.Module):\n \"\"\"\n Graph of the Basic Block, as defined in the paper.\n This block contains two 3x3 convolutional layers, each with prior Batch Norm and ReLU.\n There is an additive residual connection across the block.\n If the number of dimensions change across the block, this residual is a convolutional projection of the input.\n Args:\n inplanes (int): number of dimensions in the input tensor.\n outplanes (int): number of dimensions in the output tensor.\n stride (int): stride length for the filter.\n dropout (float, fraction): the fraction of neurons to randomly drop/set to zero in-between conv. layers.\n \"\"\"\n\n def __init__(self, inplanes, outplanes, stride, dropout=0.0):\n super(BasicBlock, self).__init__()\n\n self.inplanes = inplanes\n self.outplanes = outplanes\n\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv1 = nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(outplanes)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(outplanes, outplanes, kernel_size=3, stride=1, padding=1, bias=False)\n self.dropout = dropout\n if self.inplanes != self.outplanes:\n self.projection = nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=stride, padding=0, bias=False)\n else:\n self.projection = None\n\n def forward(self, x):\n out = self.bn1(x)\n out = self.relu1(out)\n if self.inplanes != self.outplanes:\n residual = self.projection(out)\n else:\n residual = x\n out = self.conv1(out)\n out = self.bn2(out)\n out = self.relu2(out)\n if self.dropout > 0.:\n out = nn.functional.dropout(out, p=self.dropout, training=self.training)\n out = self.conv2(out)\n out += residual\n return out\n\n\n\n\nclass WideResNet(nn.Module):\n \"\"\"\n Graph of the Wide Residual Network, as defined in the paper.\n This network contains 4 convolutional blocks, each increasing dimensions by a factor of 'k':\n The first is a single 3x3 Convolution, increasing dimensions from 2 (log(amplitude^2), phase) to 16.\n The second is a sequence of Basic Blocks, 16 dimensions -> 16*k\n The third is a sequence of Basic Blocks, 16*k dimensions -> 16*k^2\n The fourth is a sequence of Basic Blocks, 16*k dimensions -> 16*k^3\n These convolutional layers are followed by Batch Norm, ReLU, Average Pool, and finally a Fully Connected Layer\n to perform the classification.\n Args:\n n (int): number of single convolutional layers in the entire network, 'n' in the paper.\n k (int): widening factor for each succeeding convolutional layer, 'k' in the paper.\n block (nn.module): BasicBlock.\n dropout (float, fraction): the fraction of neurons to randomly drop/set to zero inside the blocks.\n \"\"\"\n\n def __init__(self, n, k, block=BasicBlock, dropout=0.0):\n super(WideResNet, self).__init__()\n\n if (n - 4) % 6 != 0:\n raise ValueError(\"Invalid depth! Depth must be (6 * n_blocks + 4).\")\n n_blocks = (n - 4) / 6\n\n self.conv_block1 = nn.Conv2d(2, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.conv_block2 = self._make_layer(block, n_blocks, 16, 16 * k, 2, dropout)\n self.conv_block3 = self._make_layer(block, n_blocks, 16 * k, 32 * k, 2, dropout)\n self.conv_block4 = self._make_layer(block, n_blocks, 32 * k, 64 * k, 2, dropout)\n self.bn1 = nn.BatchNorm2d(64 * k)\n self.relu = nn.ReLU(inplace=True)\n self.fc = nn.Linear(64 * k * 6 * 8, 7)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n_weights = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n_weights))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n\n def _make_layer(self, block, n_blocks, inplanes, outplanes, stride, dropout):\n \"\"\"\n Graph of a Convolutional block layer (conv_block2/conv_block3/conv_block4), as defined in the paper.\n This graph assembles a number of blocks (BasicBlock) in sequence.\n Args:\n block (nn.module): BasicBlock or ResidualBlock.\n inplanes (int): number of dimensions in the input tensor.\n outplanes (int): number of dimensions in the output tensor.\n stride (int): stride length for the filter.\n dropout (float, fraction): the fraction of neurons to randomly drop/set to zero in-between conv. layers.\n \"\"\"\n layers = []\n for i in range(n_blocks):\n if i == 0:\n layers.append(block(inplanes, outplanes, stride, dropout))\n else:\n layers.append(block(outplanes, outplanes, 1, dropout))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv_block1(x)\n out = self.conv_block2(out)\n out = self.conv_block3(out)\n out = self.conv_block4(out)\n out = self.bn1(out)\n out = self.relu(out)\n out = nn.functional.avg_pool2d(out, 8)\n out = out.view(out.size(0), -1)\n return self.fc(out)\n\n\ndef wresnet34x2():\n model = WideResNet(n=34, k=2, block=BasicBlock, dropout=0.3)\n return model\n\n\nfrom __future__ import print_function\nimport argparse\nimport os\nimport time\nimport torch\nimport torchvision.transforms as transforms\nimport pandas as pd\nimport ibmseti\nimport numpy as np\nimport ibmseti\nimport h5py\n\n \ndef normalizeSimFile(normalizeData, simfile):\n # Load the Normalizer function\n h = h5py.File(normalizeData, 'r')\n mean = torch.FloatTensor(h['mean'][:])\n mean = mean.permute(2, 0, 1)\n std_dev = torch.FloatTensor(h['std_dev'][:])\n std_dev = std_dev.permute(2, 0, 1)\n h.close()\n normalize = transforms.Normalize(mean=mean,\n std=std_dev)\n\n # Load simulation data\n time_freq_resolution=(384, 512)\n aca = ibmseti.compamp.SimCompamp(open(simfile, 'rb').read())\n complex_data = aca.complex_data()\n complex_data = complex_data.reshape(time_freq_resolution[0], time_freq_resolution[1])\n complex_data = complex_data * np.hanning(complex_data.shape[1])\n cpfft = np.fft.fftshift(np.fft.fft(complex_data), 1)\n spectrogram = np.abs(cpfft)\n features = np.stack((np.log(spectrogram ** 2),\n np.arctan(cpfft.imag / cpfft.real)), -1)\n\n\n # create FloatTensor, permute to proper dimensional order, and normalize\n data = torch.FloatTensor(features)\n data = data.permute(2, 0, 1)\n data = normalize(data)\n\n # The model expects a 4D tensor\n s = data.size()\n data = data.contiguous().view(1, s[0], s[1], s[2])\n \n input_var = torch.autograd.Variable(data, volatile=True)\n \n return input_var\n\n\ndef singleProbs(model, input_var):\n \"\"\"\n \"\"\"\n\n model.eval()\n\n softmax = torch.nn.Softmax()\n softmax.zero_grad()\n output = model(input_var)\n probs = softmax(output).data.view(7).tolist()\n \n return probs\n", "Select a simulation file to test\nYou can change the simfile to any of the ~2500 files you choose in the primary_testset_preview_v3 folder", "#!ls primary_testset_preview_v3/*\n\nsimfile = 'primary_testset_preview_v3/00b3b8fdb14ce41f341dbe251f476093.dat'", "Load the parameters for the models", "allFolds = []\n\ndef loadFoldParams(modelcheckpoint):\n \n model = wresnet34x2().cpu()\n\n if os.path.isfile(modelcheckpoint):\n print(\"=> Loading checkpoint '{}'\".format(modelcheckpoint))\n checkpoint = torch.load(modelcheckpoint, map_location=lambda storage, loc: storage)\n best_acc = checkpoint['best_acc']\n print(\"This model had an accuracy of %.2f on the validation set.\" % (best_acc,))\n keys = checkpoint['state_dict'].keys()\n for old_key in keys:\n new_key = old_key.replace('module.', '')\n checkpoint['state_dict'][new_key] = checkpoint['state_dict'].pop(old_key)\n model.load_state_dict(checkpoint['state_dict'])\n print(\"=> Loaded checkpoint '{}' (epoch {})\"\n .format(modelcheckpoint, checkpoint['epoch']))\n else:\n print(\"=> No model checkpoint found. Exiting\")\n return \n \n allFolds.append(model)\n \ndef lf():\n for i in range(1,6):\n loadFoldParams('effsubsee_FOLD{}_BEST_wresnet34x2_batchsize96_checkpoint.pth.tar'.format(i))\n%time lf()\n\nassert len(allFolds) == 5\n\n# normalize the simulation data file\nnormalizer = 'mean_stddev_primary_full_v3__384t__512f__logmod2-ph.hdf5'\n%time input_var = normalizeSimFile(normalizer, simfile)", "<br>\nCalculate the class probabilities as an average of the probabilities returned by the 5 different networks", "# calculate probabilities\n\ndef runAllModels(aSimFile):\n\n probs = np.zeros(7)\n for mf in allFolds:\n probs += singleProbs(mf, input_var)\n \n probs = probs/float(len(allFolds))\n return probs\n\n%time probs = runAllModels(simfile)", "<br>\nDisplay class probabilities and most-likely signal class", "print('final class probabilities')\nprint(probs)\n\nclass_list = ['brightpixel', 'narrowband', 'narrowbanddrd', 'noise', 'squarepulsednarrowband', 'squiggle', 'squigglesquarepulsednarrowband']\n\nprint('signal classification')\npredicted_signal_class = class_list[probs.argmax()]\nprint(predicted_signal_class)", "<br>\nConfirm prediction\nWe will display the signal as a spectrogram to confirm the predicted class. Addtionally, the signal classes for the preview test set from the code challenge are available in the Github repository, allowing you to explicitely check the prediction against the actual signal class. (The classes for the final test set are not published so that teams may submit a scorecard to the final test set scoreboard even though the code challenge has officially ended.)", "%matplotlib inline\nimport matplotlib.pyplot as plt\n\naca = ibmseti.compamp.SimCompamp(open(simfile,'rb').read())\nspectrogram = aca.get_spectrogram()\nfig, ax = plt.subplots(figsize=(20, 10))\nax.imshow(np.log(spectrogram), aspect = 0.5*float(spectrogram.shape[1]) / spectrogram.shape[0], cmap='gray')", "<br>\nCheck the test set class from the published list of signal classes.", "import pandas as pd\npreview_test_set_pd = pd.read_csv('https://github.com/setiQuest/ML4SETI/raw/master/results/private_list_primary_v3_testset_preview_uuid_class_29june_2017.csv', index_col=None)\n\nexpected_signal_class = preview_test_set_pd[preview_test_set_pd.UUID == simfile.split('/')[-1].rstrip('.dat')].SIGNAL_CLASSIFICATION.values[0]\n\nassert predicted_signal_class == expected_signal_class\nprint(expected_signal_class)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mne-tools/mne-tools.github.io
0.14/_downloads/plot_cluster_methods_tutorial.ipynb
bsd-3-clause
[ "%matplotlib inline", ".. _tut_stats_cluster_methods:\nPermutation t-test on toy data with spatial clustering\nFollowing the illustrative example of Ridgway et al. 2012,\nthis demonstrates some basic ideas behind both the \"hat\"\nvariance adjustment method, as well as threshold-free\ncluster enhancement (TFCE) methods in mne-python.\nThis toy dataset consists of a 40 x 40 square with a \"signal\"\npresent in the center (at pixel [20, 20]) with white noise\nadded and a 5-pixel-SD normal smoothing kernel applied.\nFor more information, see:\nRidgway et al. 2012, \"The problem of low variance voxels in\nstatistical parametric mapping; a new hat avoids a 'haircut'\",\nNeuroImage. 2012 Feb 1;59(3):2131-41.\nSmith and Nichols 2009, \"Threshold-free cluster enhancement:\naddressing problems of smoothing, threshold dependence, and\nlocalisation in cluster inference\", NeuroImage 44 (2009) 83-98.\nIn the top row plot the T statistic over space, peaking toward the\ncenter. Note that it has peaky edges. Second, with the \"hat\" variance\ncorrection/regularization, the peak becomes correctly centered. Third,\nthe TFCE approach also corrects for these edge artifacts. Fourth, the\nthe two methods combined provide a tighter estimate, for better or\nworse.\nNow considering multiple-comparisons corrected statistics on these\nvariables, note that a non-cluster test (e.g., FDR or Bonferroni) would\nmis-localize the peak due to sharpness in the T statistic driven by\nlow-variance pixels toward the edge of the plateau. Standard clustering\n(first plot in the second row) identifies the correct region, but the\nwhole area must be declared significant, so no peak analysis can be done.\nAlso, the peak is broad. In this method, all significances are\nfamily-wise error rate (FWER) corrected, and the method is\nnon-parametric so assumptions of Gaussian data distributions (which do\nactually hold for this example) don't need to be satisfied. Adding the\n\"hat\" technique tightens the estimate of significant activity (second\nplot). The TFCE approach (third plot) allows analyzing each significant\npoint independently, but still has a broadened estimate. Note that\nthis is also FWER corrected. Finally, combining the TFCE and \"hat\"\nmethods tightens the area declared significant (again FWER corrected),\nand allows for evaluation of each point independently instead of as\na single, broad cluster.\nNote that this example does quite a bit of processing, so even on a\nfast machine it can take a few minutes to complete.", "# Authors: Eric Larson <larson.eric.d@gmail.com>\n# License: BSD (3-clause)\n\nimport numpy as np\nfrom scipy import stats\nfrom functools import partial\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D # noqa; this changes hidden mpl vars\n\nfrom mne.stats import (spatio_temporal_cluster_1samp_test,\n bonferroni_correction, ttest_1samp_no_p)\n\ntry:\n from sklearn.feature_extraction.image import grid_to_graph\nexcept ImportError:\n from scikits.learn.feature_extraction.image import grid_to_graph\n\nprint(__doc__)", "Set parameters", "width = 40\nn_subjects = 10\nsignal_mean = 100\nsignal_sd = 100\nnoise_sd = 0.01\ngaussian_sd = 5\nsigma = 1e-3 # sigma for the \"hat\" method\nthreshold = -stats.distributions.t.ppf(0.05, n_subjects - 1)\nthreshold_tfce = dict(start=0, step=0.2)\nn_permutations = 1024 # number of clustering permutations (1024 for exact)", "Construct simulated data\n Make the connectivity matrix just next-neighbor spatially", "n_src = width * width\nconnectivity = grid_to_graph(width, width)\n\n# For each \"subject\", make a smoothed noisy signal with a centered peak\nrng = np.random.RandomState(42)\nX = noise_sd * rng.randn(n_subjects, width, width)\n# Add a signal at the dead center\nX[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd\n# Spatially smooth with a 2D Gaussian kernel\nsize = width // 2 - 1\ngaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))\nfor si in range(X.shape[0]):\n for ri in range(X.shape[1]):\n X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')\n for ci in range(X.shape[2]):\n X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')", "Do some statistics", "# Note that X needs to be a multi-dimensional array of shape\n# samples (subjects) x time x space, so we permute dimensions\nX = X.reshape((n_subjects, 1, n_src))\n\n# Now let's do some clustering using the standard method. Note that not\n# specifying a connectivity matrix implies grid-like connectivity, which\n# we want here:\nT_obs, clusters, p_values, H0 = \\\n spatio_temporal_cluster_1samp_test(X, n_jobs=2, threshold=threshold,\n connectivity=connectivity,\n tail=1, n_permutations=n_permutations)\n\n# Let's put the cluster data in a readable format\nps = np.zeros(width * width)\nfor cl, p in zip(clusters, p_values):\n ps[cl[1]] = -np.log10(p)\nps = ps.reshape((width, width))\nT_obs = T_obs.reshape((width, width))\n\n# To do a Bonferroni correction on these data is simple:\np = stats.distributions.t.sf(T_obs, n_subjects - 1)\np_bon = -np.log10(bonferroni_correction(p)[1])\n\n# Now let's do some clustering using the standard method with \"hat\":\nstat_fun = partial(ttest_1samp_no_p, sigma=sigma)\nT_obs_hat, clusters, p_values, H0 = \\\n spatio_temporal_cluster_1samp_test(X, n_jobs=2, threshold=threshold,\n connectivity=connectivity,\n tail=1, n_permutations=n_permutations,\n stat_fun=stat_fun)\n\n# Let's put the cluster data in a readable format\nps_hat = np.zeros(width * width)\nfor cl, p in zip(clusters, p_values):\n ps_hat[cl[1]] = -np.log10(p)\nps_hat = ps_hat.reshape((width, width))\nT_obs_hat = T_obs_hat.reshape((width, width))\n\n# Now the threshold-free cluster enhancement method (TFCE):\nT_obs_tfce, clusters, p_values, H0 = \\\n spatio_temporal_cluster_1samp_test(X, n_jobs=2, threshold=threshold_tfce,\n connectivity=connectivity,\n tail=1, n_permutations=n_permutations)\nT_obs_tfce = T_obs_tfce.reshape((width, width))\nps_tfce = -np.log10(p_values.reshape((width, width)))\n\n# Now the TFCE with \"hat\" variance correction:\nT_obs_tfce_hat, clusters, p_values, H0 = \\\n spatio_temporal_cluster_1samp_test(X, n_jobs=2, threshold=threshold_tfce,\n connectivity=connectivity,\n tail=1, n_permutations=n_permutations,\n stat_fun=stat_fun)\nT_obs_tfce_hat = T_obs_tfce_hat.reshape((width, width))\nps_tfce_hat = -np.log10(p_values.reshape((width, width)))", "Visualize results", "plt.ion()\nfig = plt.figure(facecolor='w')\n\nx, y = np.mgrid[0:width, 0:width]\nkwargs = dict(rstride=1, cstride=1, linewidth=0, cmap='Greens')\n\nTs = [T_obs, T_obs_hat, T_obs_tfce, T_obs_tfce_hat]\ntitles = ['T statistic', 'T with \"hat\"', 'TFCE statistic', 'TFCE w/\"hat\" stat']\nfor ii, (t, title) in enumerate(zip(Ts, titles)):\n ax = fig.add_subplot(2, 4, ii + 1, projection='3d')\n ax.plot_surface(x, y, t, **kwargs)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_title(title)\n\np_lims = [1.3, -np.log10(1.0 / n_permutations)]\npvals = [ps, ps_hat, ps_tfce, ps_tfce_hat]\ntitles = ['Standard clustering', 'Clust. w/\"hat\"',\n 'Clust. w/TFCE', 'Clust. w/TFCE+\"hat\"']\naxs = []\nfor ii, (p, title) in enumerate(zip(pvals, titles)):\n ax = fig.add_subplot(2, 4, 5 + ii)\n plt.imshow(p, cmap='Purples', vmin=p_lims[0], vmax=p_lims[1])\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_title(title)\n axs.append(ax)\n\nplt.tight_layout()\nfor ax in axs:\n cbar = plt.colorbar(ax=ax, shrink=0.75, orientation='horizontal',\n fraction=0.1, pad=0.025)\n cbar.set_label('-log10(p)')\n cbar.set_ticks(p_lims)\n cbar.set_ticklabels(['%0.1f' % p for p in p_lims])" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
talespaiva/folium
examples/plugins_examples.ipynb
mit
[ "Examples of plugins usage in folium\nIn this notebook we show a few illustrations of folium's plugin extensions.\nThis is a development notebook", "# This is to import the repository's version of folium ; not the installed one.\nimport sys, os\nsys.path.insert(0,'..')\nimport folium\nfrom folium import plugins\n\nimport numpy as np\nimport json\n\n%load_ext autoreload\n%autoreload 2", "ScrollZoomToggler\nAdds a button to enable/disable zoom scrolling.", "m = folium.Map([45.,3.], zoom_start=4)\nplugins.ScrollZoomToggler().add_to(m)\nm", "MarkerCluster\nAdds a MarkerCluster layer on the map.", "N = 100\ndata = np.array([\n np.random.uniform(low=35,high=60, size=N), # random latitudes in Europe\n np.random.uniform(low=-12,high=30, size=N), # random longitudes in Europe\n range(N), # popups are simple numbers \n ]).T\nm = folium.Map([45.,3.], zoom_start=4)\nplugins.MarkerCluster(data).add_to(m)\nm", "Terminator", "m = folium.Map([45.,3.], zoom_start=1)\nplugins.Terminator().add_to(m)\nplugins.ScrollZoomToggler().add_to(m)\nm", "Leaflet.boatmarker", "m = folium.Map([30.,0.], zoom_start=3)\nplugins.BoatMarker((34,-43), heading=45, wind_heading=150, wind_speed=45, color=\"#8f8\").add_to(m)\nplugins.BoatMarker((46,-30), heading=-20, wind_heading=46, wind_speed=25, color=\"#88f\").add_to(m)\nm", "Leaflet.TextPath", "m = folium.Map([20.,0.], zoom_start=3)\n\nwind_locations = [[59.3556, -31.99219], [55.17887, -42.89062], [47.7541, -43.94531], [38.27269, -37.96875],\n [27.05913, -41.13281], [16.29905, -36.5625], [8.40717, -30.23437], [1.05463, -22.5],\n [-8.75479, -18.28125], [-21.61658, -20.03906], [-31.35364, -24.25781], [-39.90974, -30.9375],\n [-43.83453, -41.13281], [-47.7541, -49.92187], [-50.95843, -54.14062], [-55.9738, -56.60156]]\n\nwind_line = folium.PolyLine(wind_locations, weight=15, color='#8EE9FF').add_to(m)\nattr = {'fill': '#007DEF', 'font-weight': 'bold', 'font-size': '24'}\nplugins.PolyLineTextPath(wind_line, \") \", repeat=True, offset=7, attributes=attr).add_to(m)\n\ndanger_line = folium.PolyLine([[-40.311, -31.952], [-12.086, -18.727]], weight=10, color='orange', opacity=0.8).add_to(m)\nattr = {'fill': 'red'}\nplugins.PolyLineTextPath(danger_line, \"\\u25BA\", repeat=True, offset=6, attributes=attr).add_to(m)\n\nplane_line = folium.PolyLine([[-49.38237, -37.26562], [-1.75754, -14.41406], [51.61802, -23.20312]], weight=1, color='black').add_to(m)\nattr = {'font-weight':'bold', 'font-size':'24'}\nplugins.PolyLineTextPath(plane_line, \"\\u2708 \", repeat=True, offset=8, attributes=attr).add_to(m)\n\nline_to_new_dehli = folium.PolyLine([[46.67959447, 3.33984375],\n [46.5588603, 29.53125],\n [42.29356419, 51.328125],\n [35.74651226, 68.5546875],\n [28.65203063, 76.81640625]]).add_to(m)\n\nline_to_hanoi = folium.PolyLine([[28.76765911, 77.60742188],\n [27.83907609, 88.72558594],\n [25.68113734, 97.3828125],\n [21.24842224, 105.77636719]]).add_to(m)\n\nplugins.PolyLineTextPath(line_to_new_dehli, \"To New Delhi\", offset=-5).add_to(m)\nplugins.PolyLineTextPath(line_to_hanoi, \"To Hanoi\", offset=-5).add_to(m)\n\nm" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
google-research/google-research
aav/model_and_dataset_analysis/200609_figure3_and_tables_shared.ipynb
apache-2.0
[ "Copyright 2020 Google LLC.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttps://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\nFigure 3 and Tables\nSetup", "import os\nimport zipfile\n\nfrom IPython.display import display\nfrom matplotlib import pyplot\nimport numpy\nimport pandas\nimport scipy.spatial.distance as distance\nimport scipy.stats\nimport seaborn\n\n\n# The canonical single-letter code residue alphabet.\nRESIDUES = tuple('ACDEFGHIKLMNPQRSTVWY')\n\n# Residues sorted by physicochemical properties.\n#\n# This ordering is useful when generating visualizations to highlight common\n# behaviors among similar residues.\nRESIDUES_PHYSCHEM_ORDER = tuple('ILAVGMFYWEDQNHCRKSTP')\n\n# The full VP1 sequence for AAV serotype 2.\nAAV2_VP1_SEQ = 'MAADGYLPDWLEDTLSEGIRQWWKLKPGPPPPKPAERHKDDSRGLVLPGYKYLGPFNGLDKGEPVNEADAAALEHDKAYDRQLDSGDNPYLKYNHADAEFQERLKEDTSFGGNLGRAVFQAKKRVLEPLGLVEEPVKTAPGKKRPVEHSPVEPDSSSGTGKAGQQPARKRLNFGQTGDADSVPDPQPLGQPPAAPSGLGTNTMATGSGAPMADNNEGADGVGNSSGNWHCDSTWMGDRVITTSTRTWALPTYNNHLYKQISSQSGASNDNHYFGYSTPWGYFDFNRFHCHFSPRDWQRLINNNWGFRPKRLNFKLFNIQVKEVTQNDGTTTIANNLTSTVQVFTDSEYQLPYVLGSAHQGCLPPFPADVFMVPQYGYLTLNNGSQAVGRSSFYCLEYFPSQMLRTGNNFTFSYTFEDVPFHSSYAHSQSLDRLMNPLIDQYLYYLSRTNTPSGTTTQSRLQFSQAGASDIRDQSRNWLPGPCYRQQRVSKTSADNNNSEYSWTGATKYHLNGRDSLVNPGPAMASHKDDEEKFFPQSGVLIFGKQGSEKTNVDIEKVMITDEEEIRTTNPVATEQYGSVSTNLQRGNRQAATADVNTQGVLPGMVWQDRDVYLQGPIWAKIPHTDGHFHPSPLMGGFGLKHPPPQILIKNTPVPANPSTTFSAAKFASFITQYSTGQVSVEIEWELQKENSKRWNPEIQYTSNYNKSVNVDFTVDTNGVYSEPRPIGTRYLTRNL'\n\n# The AAV serotype 2 wild type subsequence corresponding to tile #21 in round #1\n# of experimental results.\nR1_TILE21_WT_SEQ = 'DEEEIRTTNPVATEQYGSVSTNLQRGNR'\n\n# The start and end residue numbers (inclusive) for the tile 21 wild type seq.\n#\n# Residue numbering scheme corresponds to the 1-based index of the AAV2 VP1\n# sequence.\nR1_TILE21_WT_START_RESNUM = 561\nR1_TILE21_WT_END_RESNUM = 588\n\n\nseed_display_name = 'model-selected'\nwalked_display_name = 'model-designed'\n\npartition_pretty_names = {\n 'cnn_designed_plus_rand_train_seed': 'CNN-C ' + seed_display_name,\n 'cnn_designed_plus_rand_train_walked': 'CNN-C ' + walked_display_name,\n 'cnn_rand_doubles_plus_single_seed': 'CNN-A ' + seed_display_name,\n 'cnn_rand_doubles_plus_single_walked': 'CNN-A ' + walked_display_name,\n 'cnn_standard_seed': 'CNN-B ' + seed_display_name,\n 'cnn_standard_walked': 'CNN-B ' + walked_display_name,\n 'designed': 'Additive',\n 'lr_designed_plus_rand_train_seed': 'LR-C ' + seed_display_name,\n 'lr_designed_plus_rand_train_walked': 'LR-C ' + walked_display_name,\n 'lr_rand_doubles_plus_single_seed': 'LR-A ' + seed_display_name,\n 'lr_rand_doubles_plus_single_walked': 'LR-A ' + walked_display_name,\n 'lr_standard_seed': 'LR-B ' + seed_display_name,\n 'lr_standard_walked': 'LR-B ' + walked_display_name,\n 'rand': 'Random',\n 'rnn_designed_plus_rand_train_seed': 'RNN-C ' + seed_display_name,\n 'rnn_designed_plus_rand_train_walked': 'RNN-C ' + walked_display_name,\n 'rnn_rand_doubles_plus_singles_seed': 'RNN-A ' + seed_display_name,\n 'rnn_rand_doubles_plus_singles_walked': 'RNN-A ' + walked_display_name,\n 'rnn_standard_seed': 'RNN-B ' + seed_display_name,\n 'rnn_standard_walked': 'RNN-B ' + walked_display_name,\n}\n\nml_generated_partitions = [\n 'rnn_designed_plus_rand_train_walked',\n 'rnn_designed_plus_rand_train_seed',\n 'rnn_rand_doubles_plus_singles_walked',\n 'rnn_rand_doubles_plus_singles_seed',\n 'rnn_standard_walked',\n 'rnn_standard_seed',\n 'cnn_designed_plus_rand_train_walked',\n 'cnn_designed_plus_rand_train_seed',\n 'cnn_rand_doubles_plus_single_walked',\n 'cnn_rand_doubles_plus_single_seed',\n 'cnn_standard_walked',\n 'cnn_standard_seed',\n 'lr_designed_plus_rand_train_walked',\n 'lr_designed_plus_rand_train_seed',\n 'lr_rand_doubles_plus_single_walked',\n 'lr_rand_doubles_plus_single_seed',\n 'lr_standard_walked',\n 'lr_standard_seed',\n]\n\nml_designed_partitions = [\n 'lr_rand_doubles_plus_single_walked',\n 'lr_standard_walked',\n 'lr_designed_plus_rand_train_walked',\n\n 'cnn_rand_doubles_plus_single_walked',\n 'cnn_standard_walked',\n 'cnn_designed_plus_rand_train_walked',\n\n 'rnn_rand_doubles_plus_singles_walked',\n 'rnn_standard_walked',\n 'rnn_designed_plus_rand_train_walked',\n]\n\nnn_designed_partitions = [\n 'cnn_rand_doubles_plus_single_walked',\n 'cnn_standard_walked',\n 'cnn_designed_plus_rand_train_walked',\n\n 'rnn_rand_doubles_plus_singles_walked',\n 'rnn_standard_walked',\n 'rnn_designed_plus_rand_train_walked',\n]\n\nml_selected_partitions = [\n 'lr_rand_doubles_plus_single_seed',\n 'lr_standard_seed',\n 'lr_designed_plus_rand_train_seed',\n \n 'cnn_rand_doubles_plus_single_seed',\n 'cnn_standard_seed',\n 'cnn_designed_plus_rand_train_seed',\n\n 'rnn_rand_doubles_plus_singles_seed',\n 'rnn_standard_seed',\n 'rnn_designed_plus_rand_train_seed', \n]\n\nml_designed_partitions_doubles = [\n 'lr_rand_doubles_plus_single_walked',\n 'cnn_rand_doubles_plus_single_walked',\n 'rnn_rand_doubles_plus_singles_walked',\n]\n\nml_designed_partitions_standard = [\n 'lr_standard_walked',\n 'cnn_standard_walked',\n 'rnn_standard_walked',\n]\n\nml_designed_partitions_designed = [\n 'lr_designed_plus_rand_train_walked',\n 'cnn_designed_plus_rand_train_walked',\n 'rnn_designed_plus_rand_train_walked',\n]\n\nbaseline_random_partitions = ['rand']\n\nbaseline_additive_partitions = ['designed']\n\n# Sentinel token value for denoting \"no mutation here\".\n_PLACEHOLDER_TOKEN = '_'\n# Number of different mutation slots to use for each wildtype sequence position.\n_NUM_MUTATION_SLOTS = 2 # 1 substitution + 1 insert possible per wt position.\n# Slot index for substitution mutations.\n_SUB_INDEX = 0\n# Slot index for insertion mutations.\n_INS_INDEX = 1\n\n\ndef tokenize_mutation_seq(seq, placeholder_token='_'):\n \"\"\"Converts a variable-length mutation sequence to a fixed-length sequence.\n\n For an N-residue reference sequence, the encoding is shape (N+1, M, A), where\n A is the alphabet size (e.g., A=20 for the canonical peptide alphabet) and M\n is the number of distinct mutation types at each position; here, M=2\n (1x sub + 1x ins at each reference sequence position).\n\n Args:\n seq: (str) A mutation sequence to tokenize; e.g., \"__A_\" or \"aTEST\".\n placeholder_token: (str) Sentinel value used to encode non-mutated positions\n in the mutation sequence.\n Returns:\n A length-N+1 sequence of (\"<substitution_token>\", \"<insertion token>\")\n 2-tuples.\n \"\"\"\n tokens = []\n i = 0\n # Consume the prefix insertion mutation if there is one.\n #\n # A prefix insertion is denoted by a leading lower case letter on the seq.\n if seq[i].islower():\n tokens.append((placeholder_token, seq[i].upper()))\n i += 1\n else:\n tokens.append((placeholder_token, placeholder_token))\n\n while i < len(seq):\n if i < len(seq) - 1 and seq[i + 1].islower():\n tokens.append((seq[i], seq[i+1].upper()))\n i += 2\n else:\n tokens.append((seq[i], placeholder_token))\n i += 1\n return tokens\n\n\nclass MutationSequenceEncoder(object):\n \"\"\"Mutation sequence encoder for generating fixed-length representations.\n\n The encoding has two slots for each residue position in the ref sequence:\n 1. A slot that encodes a residue substitution mutation\n 2. A slot that encodes a single-residue insertion mutation\n\n There is also a pair of slots for any single-position prefix mutation.\n\n Attributes:\n encoding_size: (int) The encoding length for a single residue.\n \"\"\"\n\n def __init__(self, residue_encoder, ref_seq):\n \"\"\"Constructor.\n\n Args:\n residue_encoder: (object) A single residue encoder\n ref_seq: (str) The reference (non-mutated) sequence.\n \"\"\"\n self._residue_encoder = residue_encoder\n self._ref_seq = ref_seq\n self.encoding_size = self._residue_encoder.encoding_size\n\n def encode(self, seq):\n \"\"\"Encodes a mutation sequence as a fixed-length multi-dimensional array.\n\n Args:\n seq: (str) A mutation sequence to encode; e.g., \"__A_\".\n Returns:\n A numpy.ndarray(shape=(len(ref_seq)+1, 2, encoding_size), dtype=float).\n Raises:\n ValueError: if the mutation sequence references a different number of\n sequence positions than the specified ref_seq.\n \"\"\"\n seq_encoding = numpy.zeros((\n len(self._ref_seq) + 1,\n _NUM_MUTATION_SLOTS,\n self.encoding_size))\n\n sub_ins_tokens = tokenize_mutation_seq(seq, _PLACEHOLDER_TOKEN)\n if len(sub_ins_tokens) != len(self._ref_seq) + 1:\n raise ValueError('Mutation sequence dimension mismatch: '\n '%d mutation positions vs %d in reference sequence'\n % (len(sub_ins_tokens), len(self._ref_seq) + 1))\n\n for position_i, (sub_token, ins_token) in enumerate(sub_ins_tokens):\n if sub_token != _PLACEHOLDER_TOKEN:\n seq_encoding[position_i, _SUB_INDEX, :] = self._residue_encoder.encode(\n sub_token)\n if ins_token != _PLACEHOLDER_TOKEN:\n seq_encoding[position_i, _INS_INDEX, :] = self._residue_encoder.encode(\n ins_token)\n return seq_encoding\n\n\nclass ResidueIdentityEncoder(object):\n \"\"\"Residue identity encoder, either one-hot or residue index.\n\n Attributes:\n encoding_size: (int) The number of encoding dimensions for a single residue.\n \"\"\"\n\n def __init__(self, alphabet, one_hot=True):\n \"\"\"Constructor.\n\n Args:\n alphabet: (seq<char>) The alphabet of valid tokens for the sequence;\n e.g., the 20x 1-letter residue codes for standard peptides.\n one_hot: if true, performs one-hot encoding (dim = alphabet length);\n if false, encodes as the index of the residue in the alphabet (dim = 1).\n \"\"\"\n self._alphabet = [l.upper() for l in alphabet]\n self._letter_to_id = dict((letter, id) for (id, letter)\n in enumerate(self._alphabet))\n self._one_hot = one_hot\n self.encoding_size = len(self._alphabet) if one_hot else 1\n\n def encode(self, residue):\n \"\"\"Encodes a single residue as a one hot identity vector.\n\n Args:\n residue: (str) A single-character string representing one residue; e.g.,\n 'A' for Alanine.\n Returns:\n If one-hot=True, a numpy.ndarray(shape=(A,), dtype=float) with a single\n non-zero (1) value; the identity index for each residue in the alphabet is\n given by the residue's index in the alphabet ordered sequence; i.e., for\n the alphabet 'ACDE', a 'C' would be encoded as [0, 1, 0, 0].\n If one-hot=False, a numpy.ndarray(shape=(1,), dtype=float) with the\n residue's index in the alphabet ordered sequence.\n \"\"\"\n if self._one_hot:\n onehot = numpy.zeros(self.encoding_size, dtype=float)\n onehot[self._letter_to_id[residue]] = 1\n return onehot\n else:\n return numpy.array((self._letter_to_id[residue],), dtype=float)\n\n\nONEHOT_FIXEDLEN_MUTATION_ENCODER = MutationSequenceEncoder(\n ResidueIdentityEncoder(RESIDUES), R1_TILE21_WT_SEQ)", "Load data", "my_zip = zipfile.ZipFile('allseqs_20191230.csv.zip')\nmy_zip.extractall() # extract csv file to the current working directory\n\ndf = pandas.read_csv('allseqs_20191230.csv', index_col=None)\ndel df['num_mutations'] # Prefer 'num_edits' column which is Levenshtein distance to WT\n\nprint df.shape\ndf.head()", "SI Tables: generated and viable capsid statistics\nLib", "def _get_percent_viable(num_viable, num_total):\n if num_total == 0:\n return 0\n else: \n return float(num_viable) / num_total * 100\n\n\ndef _format_stats_table(stats_table):\n percent_formatter = lambda pct: '{:4.1f}%'.format(pct)\n count_formatter = lambda n: '{:7,}'.format(n)\n\n stats_table['percent_viable'] = stats_table.percent_viable.apply(percent_formatter)\n stats_table['num_total'] = stats_table.num_total.apply(count_formatter)\n stats_table['num_viable'] = stats_table.num_viable.apply(count_formatter)\n\n return stats_table.rename({\n 'num_total': '# generated', \n 'num_viable': '# viable',\n 'percent_viable': '% viable',\n }, axis=1)\n\n\ndef performance_by_wt_distance(df, partitions, distances=range(2, 30)):\n rows = []\n for t in distances:\n num_viable = len(df[\n df.partition.isin(partitions) \n & (df.num_edits >= t)\n & df.is_viable\n ])\n num_total = len(df[\n df.partition.isin(partitions) \n & (df.num_edits >= t)\n ])\n\n rows.append({\n 'min_mutations': t,\n 'num_total': num_total, \n 'num_viable': num_viable,\n 'percent_viable': _get_percent_viable(num_viable, num_total),\n })\n col_order = [\n 'min_mutations',\n 'num_total',\n 'num_viable',\n 'percent_viable',\n ] \n return _format_stats_table(pandas.DataFrame(rows, columns=col_order))\n\n\ndef performance_by_model(df, partitions):\n rows = []\n for partition in partitions:\n num_viable = len(df[(df.partition == partition) & df.is_viable])\n num_total = len(df[df.partition == partition])\n\n rows.append({\n 'partition': partition,\n 'num_total': num_total, \n 'num_viable': num_viable, \n 'percent_viable': _get_percent_viable(num_viable, num_total),\n })\n\n col_order = [\n 'partition',\n 'num_total',\n 'num_viable',\n 'percent_viable',\n ]\n return _format_stats_table(pandas.DataFrame(rows, columns=col_order))\n\n\n# display(performance_by_wt_distance(df, ml_designed_partitions))\n# display(performance_by_model(df, ml_designed_partitions))", "SI Table 1", "performance_by_wt_distance(df, ml_generated_partitions)", "SI Table 2", "performance_by_wt_distance(df, ml_designed_partitions)", "SI Table 3", "performance_by_wt_distance(df, nn_designed_partitions)", "SI Table 4", "display(performance_by_model(df, ml_selected_partitions))", "SI Table 5", "display(performance_by_model(df, ml_designed_partitions))", "SI Table 6", "performance_by_wt_distance(df, baseline_additive_partitions, distances=range(2, 40))", "SI Table 7", "performance_by_wt_distance(df, baseline_random_partitions, distances=range(2, 11))", "Additional: performance vs wt distance per model", "sub_tables = []\nfor partition in ml_designed_partitions:\n partition_perf = performance_by_wt_distance(\n df, \n [partition],\n ) \n partition_perf['partition'] = partition\n sub_tables.append(partition_perf)\nstats_table = pandas.concat(sub_tables)\nstats_table", "Figure 3\nPerplexity of residues by position per model", "def get_mutation_count_matrix(\n sequences, encoder=ONEHOT_FIXEDLEN_MUTATION_ENCODER):\n mutations = None\n for seq in sequences:\n seq_mutations = encoder.encode(seq)\n if mutations is None:\n mutations = seq_mutations\n else:\n mutations += seq_mutations\n subs = mutations[1:, 0, :]\n inserts = mutations[1:, 1, :]\n return subs, inserts\n\n\ndef get_perplexity(mutation_count_matrix, replace_nan=True):\n \"\"\"\n Args:\n mutation_count_matrix: (n_positions, 20) array containing #mutations of each\n residue type for the set of positions\n Returns:\n perplexity per position (n_positions,) array with max value of 20\n (a uniform distribution for a given position would have perplexity of 20\n b/c complete confusion across 20 options).\n \"\"\"\n counts_matrix = mutation_count_matrix.T\n perplexity = 2**scipy.stats.entropy(counts_matrix, base=2)\n if replace_nan:\n perplexity[numpy.isnan(perplexity)] = 0 # For plotting purposes\n return perplexity\n\n\ndef plot_mutation_perplexity(\n mutation_count_matrix, \n start_resnum=R1_TILE21_WT_START_RESNUM,\n end_resnum=R1_TILE21_WT_END_RESNUM, # inclusive \n label=None, \n linewidth=1,\n ):\n \n perplexity = get_perplexity(mutation_count_matrix)\n resnums = range(start_resnum, end_resnum+1)\n # Trick to make the final step in plot be full-width: add extra point\n perplexity = list(perplexity) + [0]\n resnums.append(end_resnum+2) # TODO: simplify\n pyplot.step(\n resnums, \n perplexity,\n label=label,\n where='post', \n linewidth=linewidth)\n\ndef plot_mutation_perplexity_multi(\n df,\n partitions, \n start_resnum=R1_TILE21_WT_START_RESNUM,\n end_resnum=R1_TILE21_WT_END_RESNUM, # inclusive \n subs=True,\n figsize=(12, 3),\n tick_size=10,\n anno_fontsize=10,\n axis_label_size=10,\n ):\n fig, ax=pyplot.subplots(figsize=figsize)\n \n for p in partitions:\n sub_counts, insert_counts = get_mutation_count_matrix(\n df[\n (df.partition == p) \n & (df.is_viable)\n & (df.num_edits >= 12)\n ].sequence)\n\n linewidth = 1\n if subs:\n plot_mutation_perplexity(\n sub_counts, \n start_resnum=start_resnum,\n end_resnum=end_resnum,\n label=p, \n # color=color,\n linewidth=linewidth)\n else:\n plot_mutation_perplexity(\n insert_counts, \n start_resnum=start_resnum,\n end_resnum=end_resnum,\n label=p, \n # color=color,\n linewidth=linewidth)\n\n pyplot.ylim(0, 15)\n pyplot.yticks([0, 5, 10, 15])\n for y_thresh in [5, 10, 15, 20]:\n pyplot.axhline(\n y=y_thresh, color='black', linestyle='--', alpha=.7, linewidth=.25) \n\n ax.spines['right'].set_visible(True)\n ax.spines['right'].set_linewidth(0.5) \n ax.tick_params(axis='both', labelsize=tick_size)\n pyplot.legend(loc='upper left')\n\n\nseaborn.set_style('white')\nfor p in [\n ml_designed_partitions_doubles, \n ml_designed_partitions_standard,\n ml_designed_partitions_designed,\n ]:\n\n plot_mutation_perplexity_multi(df, p, subs=True)\n pyplot.show()", "Mutation distribution heatmaps by model", "def show_mutation_heatmap_side_by_side_horizontal(\n df,\n encoder=ONEHOT_FIXEDLEN_MUTATION_ENCODER,\n log=True,\n normalize=False,\n colorbar_num_quantiles=None,\n wt_seq=R1_TILE21_WT_SEQ,\n figsize=(2.5, 1),\n cmap_name='viridis',\n color_rgb=None,\n scale_color_rgb=False, # should the rgb values be divided by 255\n wt_point_size=15,\n cbar=True,\n vmax=None,\n subs_only=False,\n dpi=300,\n linewidth=1,\n threshold_linewidth=.25,\n scale=4):\n\n tick_size = 6\n axis_label_size = 8\n anno_fontsize = tick_size\n figsize = tuple(x*scale for x in figsize)\n pyplot.figure(figsize=figsize, dpi=dpi) \n ax = pyplot.gca()\n\n assert all(numpy.array(encoder._residue_encoder._alphabet) == numpy.array(RESIDUES))\n \n mutations = None\n for seq in df['mutation_sequence']:\n seq_mutations = encoder.encode(seq)\n if mutations is None:\n mutations = seq_mutations\n else:\n mutations += seq_mutations\n mutations = mutations[1:, :, :] # Remove the prefix position\n print 'mutation heatmap range <%d, %d>' % (mutations.min(), mutations.max())\n\n residue_to_index = {\n v: k for k,v in enumerate(encoder._residue_encoder._alphabet)\n }\n physchem_residue_order = [\n residue_to_index[aa] for aa in RESIDUES_PHYSCHEM_ORDER\n ] \n residue_order = physchem_residue_order\n physchem_residue_labels = RESIDUES_PHYSCHEM_ORDER\n tile21_resnums = [\n R1_TILE21_WT_START_RESNUM + i \n for i in range(len(wt_seq))\n ]\n\n pyplot.ylabel('AAV2 residue number')\n \n subs = mutations[:, 0, :]\n inserts = mutations[:, 1, :]\n mutations = numpy.concatenate([\n subs, # subs only\n inserts, # ins only \n ], axis=1)\n\n if normalize:\n mutations /= len(df) # normalize by number of sequences\n if log:\n mutations = numpy.log10(1 + mutations)\n\n # Rotate the heatmap horizontally via transpose\n mutations = mutations.T\n subs_and_ins_residue_order = (\n residue_order \n + list(len(residue_order) + numpy.array(residue_order)) # residues but offset by 20\n )\n\n wt_mutations = encoder.encode(wt_seq)\n wt_mutations = wt_mutations[1:, : :] # drop prefix slot\n wt_subs = wt_mutations[:, 0, :]\n wt_ins = wt_mutations[:, 1, :]\n wt_mutations = numpy.concatenate([wt_subs, wt_ins], axis=1)\n wt_mutations = wt_mutations.T\n wt_mutations = wt_mutations[subs_and_ins_residue_order, :]\n wt_residue_indices, wt_position_indices = numpy.where(wt_mutations > 0)\n marker_offset_epsilon = 0.1\n marker_offset_residue_indices = 0.5 - marker_offset_epsilon\n marker_offset_position_indices = 0.5\n wt_position_indices = wt_position_indices + marker_offset_position_indices\n wt_residue_indices = wt_residue_indices + marker_offset_residue_indices\n \n cmap = pyplot.cm.get_cmap(cmap_name, colorbar_num_quantiles)\n if color_rgb is not None:\n if scale_color_rgb:\n color_rgb = [c/255. for c in color_rgb]\n cmap = seaborn.light_palette(color_rgb, n_colors=100, input=\"rgb\")\n ax = seaborn.heatmap(\n mutations[subs_and_ins_residue_order, :], \n cmap=cmap,\n xticklabels=tile21_resnums,\n yticklabels=physchem_residue_labels + physchem_residue_labels,\n robust=True,\n cbar=cbar,\n vmax=vmax if not log else numpy.log10(vmax),\n )\n\n if cbar:\n cbar = ax.collections[0].colorbar\n if log:\n tick_values = [1, 10, 100, 1000]\n possible_log_ticks = [numpy.log10(t) for t in tick_values]\n possible_log_tick_labels = [str(t) for t in tick_values]\n log_ticks = []\n log_tick_labels = []\n for t, l in zip(possible_log_ticks, possible_log_tick_labels):\n if t <= numpy.log10(vmax):\n log_ticks.append(t)\n log_tick_labels.append(l)\n cbar.set_ticks(log_ticks)\n log_tick_labels[-1] = '>' + log_tick_labels[-1]\n cbar.set_ticklabels(log_tick_labels)\n\n ax.scatter(\n wt_position_indices - .1, # shift the point more to the center of the square\n wt_residue_indices, \n color='white', \n s=wt_point_size,\n )\n pyplot.axhline(y=20, color='white', linewidth=1) # horizontal separator between subs and inserts\n\n\n\n################################################################################\nfor name in ml_designed_partitions:\n data = df[\n (df.partition == name) \n & (df.is_viable) \n & (df.num_edits >= 12)]\n show_mutation_heatmap_side_by_side_horizontal(\n data, \n normalize=False, \n log=True,\n colorbar_num_quantiles=30,\n scale=8,\n cmap_name='viridis',\n cbar=True,\n vmax=1000,\n )\n pyplot.show()\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
amozie/amozie
testzie/keras_logistic_regression.ipynb
apache-2.0
[ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#logistic-regression\" data-toc-modified-id=\"logistic-regression-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>logistic regression</a></span><ul class=\"toc-item\"><li><span><a href=\"#data\" data-toc-modified-id=\"data-1.1\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>data</a></span></li><li><span><a href=\"#model\" data-toc-modified-id=\"model-1.2\"><span class=\"toc-item-num\">1.2&nbsp;&nbsp;</span>model</a></span></li><li><span><a href=\"#predict\" data-toc-modified-id=\"predict-1.3\"><span class=\"toc-item-num\">1.3&nbsp;&nbsp;</span>predict</a></span></li><li><span><a href=\"#contour\" data-toc-modified-id=\"contour-1.4\"><span class=\"toc-item-num\">1.4&nbsp;&nbsp;</span>contour</a></span></li></ul></li><li><span><a href=\"#polynomial-logistic-regression\" data-toc-modified-id=\"polynomial-logistic-regression-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>polynomial logistic regression</a></span><ul class=\"toc-item\"><li><span><a href=\"#data\" data-toc-modified-id=\"data-2.1\"><span class=\"toc-item-num\">2.1&nbsp;&nbsp;</span>data</a></span></li><li><span><a href=\"#model\" data-toc-modified-id=\"model-2.2\"><span class=\"toc-item-num\">2.2&nbsp;&nbsp;</span>model</a></span></li><li><span><a href=\"#predict\" data-toc-modified-id=\"predict-2.3\"><span class=\"toc-item-num\">2.3&nbsp;&nbsp;</span>predict</a></span></li><li><span><a href=\"#contour\" data-toc-modified-id=\"contour-2.4\"><span class=\"toc-item-num\">2.4&nbsp;&nbsp;</span>contour</a></span></li></ul></li></ul></div>", "from keras.layers import *\nfrom keras.models import *\nfrom keras.optimizers import *\nfrom keras.callbacks import *\nimport keras\nfrom keras import backend as K\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport itertools\n%matplotlib inline", "logistic regression\ndata", "X = np.random.rand(1000, 2)\nY = np.where(X[:, 0] * X[:, 1] > 0.16, 1, 0)[:, np.newaxis]\n\nplt.scatter(X[:, 0], X[:, 1], c=Y[:, 0])", "model", "model_x = Input((2, ))\nmodel_y = Dense(1, activation='sigmoid')(model_x)\nmodel = Model(model_x, model_y)\n\nmodel.compile(\n loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])\n\nhist = model.fit(X, Y, batch_size=50, epochs=500, verbose=0)\nprint(model.evaluate(X, Y, verbose=0))", "predict", "pred = model.predict(X) > 0.5\nY_pred = np.where(pred, 1, 0)\n\ncond1 = np.logical_and(Y == 1, Y != Y_pred).flatten()\ncond0 = np.logical_and(Y == 0, Y != Y_pred).flatten()\n\nplt.scatter(X[:, 0], X[:, 1], c=Y[:, 0], marker='.')\nplt.scatter(X[cond1][:, 0], X[cond1][:, 1], c='r', marker='x')\nplt.scatter(X[cond0][:, 0], X[cond0][:, 1], c='g', marker='x')", "contour", "px, py = np.meshgrid(np.linspace(0, 1), np.linspace(0, 1))\npxy = np.vstack((px.flatten(), py.flatten())).T\npz = model.predict(pxy).reshape(50, 50)\n# pz = np.where(pz > 0.5, 1, 0)\n\nplt.contourf(px, py, pz, 1, cmap=plt.cm.binary_r)\n# plt.pcolormesh(px, py, pz, cmap=plt.cm.binary_r)\nplt.colorbar()\nplt.contour(px, py, pz, [0.5], colors='k')\nplt.scatter(X[:, 0], X[:, 1], c=Y[:, 0], marker='.')\n\nplt.tricontourf(X[:,0], X[:,1], Y_pred[:,0], 1, cmap=plt.cm.binary_r)\nplt.colorbar()\nplt.tricontour(X[:,0], X[:,1], Y_pred[:,0], [0.5], colors='k')\nplt.scatter(X[:, 0], X[:, 1], c=Y[:, 0], marker='.')", "polynomial logistic regression\ndata", "X = np.random.rand(1000, 2)\nY = np.where((X[:, 0]-0.5)**2/9 + (X[:, 1]-0.5)**2/6 < 0.01 + np.random.randn(1000)/300, 1, 0)[:, np.newaxis]\n\nplt.scatter(X[:, 0], X[:, 1], c=Y[:, 0])", "model", "def to_polynomial(x, y, n):\n l = []\n for i in range(n+1):\n for j in range(i+1):\n if i==0:\n continue\n l.append(x**(i-j) * y**j)\n return l\n\nmodel_x = Input((2, ))\nmodel_y = Lambda(lambda x: K.map_fn(lambda y: K.stack(to_polynomial(y[0], y[1], 6)), x))(model_x)\nmodel_y = Dense(1, activation='sigmoid')(model_y)\nmodel = Model(model_x, model_y)\n\nmodel.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])\n\nhist = model.fit(X, Y, batch_size=50, epochs=500, verbose=0)", "predict", "pred = model.predict(X)\nY_pred = np.where(pred>0.5, 1, 0)\n\ncond0 = np.logical_and(Y==0, Y!=Y_pred).flatten()\ncond1 = np.logical_and(Y==1, Y!=Y_pred).flatten()\n\nplt.scatter(X[:,0], X[:,1], c=Y[:,0])\nplt.scatter(X[cond0][:,0], X[cond0][:,1], c='g', marker='x')\nplt.scatter(X[cond1][:,0], X[cond1][:,1], c='r', marker='x')", "contour", "px, py = np.meshgrid(np.linspace(0, 1), np.linspace(0, 1))\npxy = np.vstack([px.flatten(), py.flatten()]).T\npz = model.predict(pxy).reshape(50, 50)\n\nplt.contourf(px, py, pz, cmap=plt.cm.binary_r)\nplt.colorbar()\nplt.contour(px, py, pz, [0.5], colors='k')\nplt.scatter(X[:,0], X[:,1], c=Y[:,0], marker='.')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]