Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
6,900
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'awi', 'awi-cm-1-0-mr', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
6,901
|
<ASSISTANT_TASK:>
Python Code:
import os
from urllib.request import urlopen
# MovieLensのサイトから、Zipファイルを取得し、ローカルに保存します.
# この処理は少しだけ時間がかかるので、未ダウンロードの場合のみ、実行します.
file_name = "data/ml-100k.zip"
if not os.path.exists(os.path.dirname(file_name)):
os.makedirs(os.path.dirname(file_name))
if not os.path.exists("data/ml-100k.zip"):
url = "http://files.grouplens.org/datasets/movielens/ml-100k.zip"
with urlopen(url) as res:
with open("data/ml-100k.zip", "wb") as f:
f.write(res.read())
# Zipファイルを解凍します.
from shutil import unpack_archive
unpack_archive("data/ml-100k.zip", "data/", "zip")
import numpy as np
import pandas as pd
udata = pd.read_csv("data/ml-100k/u1.base", delimiter="\t", names=("user", "movie", "rating", "timestamp"))
udata.tail()
# 評価が3以上のデータを抽出.
high_rate = udata.loc[udata["rating"] >= 3]
# movieを行, columnsを列にした後、欠損部分(=NaN)を0埋め.
raw = high_rate.pivot(index="movie", columns="user", values="rating")
df = raw.fillna(0)
# whereメソッドはわかりにくいですが、以下で3未満以外(つまり3以上)を1で埋めて返します
df = df.where(df < 3, 1)
df.head()
# 評価として取り込んだデータの数
df.astype(bool).sum(axis=1).sum()
item1 = np.array([1,1,0])
item2 = np.array([1,0,1])
from scipy.spatial.distance import cosine
sim = 1 - cosine(item1, item2)
print(sim)
sim = 1 - cosine(df.iloc[0], df.iloc[1])
print(sim)
# 上記の雰囲気で、総当たりで全アイテムの距離を計算する.
from scipy.spatial.distance import pdist
# 類似度
d = pdist(df, "cosine")
d = 1 - d
# 結果を行列に変換します(上記だとベクトルで見辛い!!)
from scipy.spatial.distance import squareform
d = squareform(d)
# nan ができるので、0に補正します.
d[np.isnan(d)] = 0
# ここでちょっとしたトリックで、自分自身は「-1」に補正して、類似度を最低にします.
d = d - np.eye(d.shape[0])
# 表示してみる.
print(d)
# 映画ID=1(indexが0始まりなことに注意)
movie_id = 0
# 評価の良い順に並べます.
# ソート後のインデックスを収納.
id = d[movie_id].argsort()[::-1]
# 最初の5件を表示してみます.
for i in id[:5]:
print("{i:0>3d}: {v: .3f}".format(i=i, v=d[movie_id, i]))
# 指定したユーザーへレコメンドするアイテムを5個出力する関数
def get_recommend_items(user_id):
# 高く評価した映画のリストを取得
favorite = df.loc[:, user_id].nonzero()
# 評価テーブルから、高評価の行を取り出す
table = d[favorite]
# 列ごとに類似度を合計
table[np.where(table < 0)] = 0
indicator = table.sum(axis=0)
# 類似度の高い順にソート
sorted_id = indicator.argsort()[::-1]
# 評価済み映画のリストを取得
reviewed = raw[raw.loc[:, user_id].notnull()].index.tolist()
# 評価済みを削除
recommend_id = [i for i in sorted_id if i not in reviewed]
# 5件だけ返す
return recommend_id[:5]
# 試しにUser_ID=100の人
recommends = get_recommend_items(100)
print(recommends)
utest = pd.read_csv("data/ml-100k/u1.test", delimiter="\t", names=("user", "movie", "rating", "timestamp"))
utest.head()
# 好評価のみを対象とした、行列(行=映画、列=ユーザー)を作成します.
high_rate_test = utest.loc[udata["rating"] >= 3]
raw_test = high_rate_test.pivot(index="movie", columns="user", values="rating")
df_test = raw_test.fillna(0)
df_test = df_test.where(df_test < 3, 1)
### 試しに、userId=1の人でテスト.
user_id = 1
# (1) レコメンド対象
recommends = set(get_recommend_items(user_id))
# (2) テストデータ内に存在する閲覧データ
real = set(df_test.loc[:, user_id].nonzero()[0])
# (1) と (2) の積集合
real & recommends
# テストデータに存在するユーザーの一覧を取得する.
users = df_test.columns
# 全ユーザー数
all = len(users)
# 成功数
good = 0
# 1ユーザーごとに、成功 or not を判定する.
for user_id in users:
real = set(df_test.loc[:, user_id].nonzero()[0])
recommends = set(get_recommend_items(user_id))
matches = real & recommends
good += 1 if matches else 0
# 結果を表示.
print("全件={0}, 成功数={1}, 成功率={2}%".format(all, good, good * 100 // all))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. データの前処理
Step2: 上記の形式だとモデルの学習に用いづらいため、 行が映画、列がユーザーの行列(マトリックス) に変換します。
Step3: (参考までに)
Step4: 全70,000件中、66,103件は好評価のようです(94%)。今回は、評価>=3の考慮はあまり意味がないかもしれません(笑)。
Step5: 上記は、それぞれ3つの特徴を持つベクトルで、コサイン距離(=類似度)は以下のように計算します。
Step6: 上記の要領で、実際に映画ID=1と映画ID=2の類似度を計算してみると、以下のようになります。
Step7: 上記の 0.32は相対的な数値でありそれ自体に意味はありませんが、他の類似度と比較することで、より類似しているアイテムを見つけることができます。
Step8: 上記で、各映画ごとの類似度を総当たりで計算することができました。
Step9: 上記の処理では、指定した映画に類似する映画を知ることができます。
Step10: これで、レコメンド処理の実装が完了しました!!!
Step11: 無事にレコメンドができたようです(ホッとしますw)。
|
6,902
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pylab
import matplotlib.patches as patches
import matplotlib.path as path
from ipywidgets import interact
import numpy
import sys
import random
import itertools
import time
import scipy.special
import math
pylab.rcParams['figure.figsize'] = 16, 10
pylab.rcParams['image.cmap'] = 'viridis'
try:
sys.path.append('../..')
from crocodile.synthesis import *
from util.visualize import *
print("Crocodile mode")
except ImportError:
print("Stand-alone mode")
# Convolution and FFT helpers
def conv(a, b): return ifft(fft(a) * fft(b))
def coordinates(N):
return numpy.fft.fftshift(numpy.fft.fftfreq(N))
def fft(a):
if len(a.shape) == 1: return numpy.fft.fftshift(numpy.fft.fft(numpy.fft.ifftshift(a)))
elif len(a.shape) == 2: return numpy.fft.fftshift(numpy.fft.fft2(numpy.fft.ifftshift(a)))
def ifft(a):
if len(a.shape) == 1: return numpy.fft.fftshift(numpy.fft.ifft(numpy.fft.ifftshift(a)))
elif len(a.shape) == 2: return numpy.fft.fftshift(numpy.fft.ifft2(numpy.fft.ifftshift(a)))
def pad_mid(a, N):
N0 = a.shape[0]
assert N >= N0
return numpy.pad(a, len(a.shape) * [(N//2-N0//2, (N+1)//2-(N0+1)//2)], mode='constant', constant_values=0.0)
def extract_mid(a, N):
assert N <= a.shape[0]
cx = a.shape[0] // 2
s = N // 2
if N % 2 == 0:
return a[len(a.shape) * [slice(cx - s, cx + s)]]
else:
return a[len(a.shape) * [slice(cx - s, cx + s + 1)]]
def anti_aliasing_function(shape, m, c):
if len(numpy.array(shape).shape) == 0:
mult = 2 - 1/shape/4
return scipy.special.pro_ang1(m, m, c, mult*coordinates(shape))[0]
return numpy.outer(anti_aliasing_function(shape[0], m, c),
anti_aliasing_function(shape[1], m, c))
def coordinates2(N):
N2 = N // 2
if N % 2 == 0:
return numpy.mgrid[-N2:N2, -N2:N2][::-1] / N
else:
return numpy.mgrid[-N2:N2+1, -N2:N2+1][::-1] / N
def _show(a, name, scale, axes):
size = a.shape[0]
if size % 2 == 0:
low,high = -0.5, 0.5 * (size - 2) / size
else:
low,high = -0.5 * (size - 1) / size, 0.5 * (size - 1) / size
low = (low - 1/size/2) * scale
high = (high - 1/size/2) * scale
cax=axes.imshow(a, extent=(low,high,low,high)); axes.set_title(name);
axes.figure.colorbar(cax,shrink=.4,pad=0.025)
def show_grid(grid, name, theta, axes):
return _show(grid, name, theta, axes)
def show_image(img, name, theta, axes):
return _show(img, name, img.shape[0] / theta, axes)
def extract_oversampled(a, Qpx, N):
result = numpy.empty((Qpx, N), dtype=complex)
for xf in range(Qpx):
# Determine start offset.
mx = a.shape[0]//2 - Qpx*(N//2) + xf
# Extract every Qpx-th pixel
result[xf] = a[mx : mx+Qpx*N : Qpx]
return result
def kernel_oversample(ff, Qpx, s=None):
# Pad the far field to the required pixel size
N = ff.shape[0]
if s is None: s = N
padff = pad_mid(ff, N*Qpx)
# Obtain oversampled uv-grid
af = fft(padff)
# Extract kernels
return extract_oversampled(af, Qpx, s)
# Helper for marking ranges in a graph
def mark_range(lbl, x0, x1, y0=None, y1=None, ax=None):
if ax is None: ax = pylab.gca()
if y0 is None: y0 = ax.get_ylim()[1]
if y1 is None: y1 = ax.get_ylim()[0]
wdt = ax.get_xlim()[1] - ax.get_xlim()[0]
ax.add_patch(patches.PathPatch(patches.Path([(x0,y0), (x0,y1)]), linestyle="dashed"))
ax.add_patch(patches.PathPatch(patches.Path([(x1,y0), (x1,y1)]), linestyle="dashed"))
if pylab.gca().get_yscale() == 'linear':
lbl_y = (y0*7+y1) / 8
else: # Some type of log scale
lbl_y = (y0**7*y1)**(1/8)
ax.annotate(lbl, (x1+wdt/200, lbl_y))
def error_approx(yB, yN, xN, alpha=0, dim=1, hexagon=False):
# gridding error
assert yB < yN
pswf = anti_aliasing_function(int(yN)*2, alpha, 2*numpy.pi*yN*xN)
pswf /= numpy.prod(numpy.arange(2*alpha-1,0,-2, dtype=float)) # double factorial
grid_error = numpy.abs(numpy.sum(pswf[::2] - pswf[1::2]))
# correction error
b_error = numpy.abs(pswf[int(yN) + int(yB)])
if dim >= 2 and hexagon:
b_error *= numpy.abs(pswf[int(yN) + int(yB/2)])**(dim-1)
else:
b_error **= dim
return numpy.abs(grid_error) / (2*xM) / b_error
import math
N = 512
yB = N / 8
xA = 1 / 8
xA_size = int(math.ceil(xA*2*N))
yB_size = int(math.ceil(yB*2))
print("image_size=%d, xA_size=%d, yB_size=%d" % (N, xA_size, yB_size))
overhead = 2.2
alpha = 0
max_par = 100
err_best = 10000
xM_step_best = xM_best = xN_best = yN_best = None
for xM_step in numpy.arange(int(numpy.ceil(1 / 2 / xA)), 1, -1):
if N % xM_step != 0:
continue
xM = 1 / 2 / xM_step
xN = (xM - xA) / 2
yN = numpy.floor(overhead * xA * yB / xM)
par = 2 * numpy.pi * xN * yN
if xN < 1 / N:
continue
print("xM = 1/%d, xN = %.4f, yN = %d, par = %.1f" % (xM_step*2, xN, yN, par), flush=True, end="")
if yN <= yB or par > max_par:
print(", par too high")
break
err = error_approx(yB, yN, xN, alpha=alpha)
print(", err = %g" % (err))
if err < err_best:
err_best = err; xM_step_best = xM_step; xM_best = xM; xN_best = xN; yN_best = yN
xM_step = xM_step_best; xM = xM_best; xN = xN_best; yN = yN_best
print("Chose xM=1/%d" % (xM_step*2))
xM_size = N // xM_step
xM_yN_size = int(numpy.ceil(xM*2*yN*2))
yN_size = xM_yN_size * xM_step
print("xM_size=%d, xM_yN_size=%d, yN_size=%d" % (xM_size, xM_yN_size, yN_size))
print("xM_step=%d" % xM_step)
print(yB, yN)
print("(Parameters for Sze-Tan: x0=%f, R=%d)" % (yB / yN / 2, int(numpy.floor(xN*2*yN))))
nsubgrid = int(math.ceil(N / xA_size))
nfacet = int(math.ceil(N / yB_size))
best_maxdxdy = N; best_subgrid_off = None; best_facet_off = None; best_ny = N
while best_subgrid_off is None:
print("Trying %d sub-grids, %d facets (%.1f%% overhead):"
% (nsubgrid, nfacet, 100 * (1 / (N / nsubgrid / xA_size * N / nfacet / yB_size) - 1)))
facet_y0s = numpy.arange(nfacet) * N / nfacet
subgrid_x0s = numpy.arange(nsubgrid) / nsubgrid
warnx_count = 0; warny_count = 0
for Ny in xM_step * numpy.arange(1, N // xM_step):
if N % Ny != 0:
continue
Nx = N // Ny
subgrid_off = Nx * numpy.round(N * subgrid_x0s / Nx)
facet_off = Ny * numpy.round(facet_y0s / Ny)
maxdx = numpy.max(numpy.abs(subgrid_off - N * subgrid_x0s))
maxdy = numpy.max(numpy.abs(facet_off - facet_y0s))
warnx = warny = ""
if maxdx > (xA_size - N / nsubgrid) / 2:
warnx = " (> %.1f!)" % ((xA_size - N / nsubgrid) / 2)
warnx_count+=1
if maxdy > (yB_size - N / nfacet) / 2:
warny = " (> %.1f!)" % ((yB_size - N / nfacet) / 2)
warny_count+=1
print("Nx=%d, Ny=%d, maxdx=%.1f%s, maxdy=%.1f%s" % (Nx, Ny, maxdx, warnx, maxdy, warny))
# Select
if warnx == "" and warny == "" and best_ny > Ny:
# best_maxdxdy > max(maxdx, maxdy):
best_maxdxdy = max(maxdx, maxdy)
best_subgrid_off = subgrid_off.astype(int)
best_facet_off = facet_off.astype(int)
best_ny = Ny
# No solution found? Crudely use number of warnings as indicator what we need more of
if best_subgrid_off is None:
if warnx_count >= warny_count:
nsubgrid += 1
else:
nfacet += 1
assert best_maxdxdy != N
subgrid_off = best_subgrid_off
facet_off = best_facet_off
Nx = N // best_ny; Ny = best_ny;
print ("Chose Nx=%d, Ny=%d, N*x0s=%s, y0s=%s" % (N // Ny, Ny, subgrid_off, facet_off))
def whole(xs): return numpy.all(numpy.abs(xs - numpy.around(xs)) < 1e-13)
assert whole(numpy.outer(subgrid_off, facet_off) / N)
assert whole(facet_off*xM_size/N)
pylab.figure(figsize=(16, 1)); pylab.title("Subgrid Centre Offsets")
pylab.plot(((subgrid_off+N//2)%N-N//2)/N,numpy.zeros_like(subgrid_off), "b|", markersize=30);
pylab.plot(coordinates(N//Nx), numpy.zeros(N//Nx), "g|")
pylab.xlim(-.5,.5); pylab.yticks([])
for i, x in enumerate(subgrid_x0s):
mark_range("$x_{0,%d}$"%i, 0, (x + .5) % 1 - .5)
pylab.figure(figsize=(16, 1)); pylab.title("Facet Centre Offsets")
pylab.plot((facet_off+N//2)%N-N//2,numpy.zeros_like(facet_off), "b|", markersize=30);
pylab.plot(coordinates(N//Ny)*N, numpy.zeros(N//Ny), "g|")
pylab.xlim(-N/2,N/2); pylab.yticks([])
for i, y in enumerate(facet_y0s):
mark_range("$y_{0,%d}$"%i, 0, (y + N//2) % N - N//2)
pylab.show();
def greatest_prime_factor(x):
i = 2
while i * i <= x:
while i < x and x % i == 0:
x //= i
i += 1
return x
yP_size_options = (int(numpy.ceil( int(yB+yN*2) / Ny )) + numpy.arange(0,12)) * Ny
yP_size_primes = numpy.vectorize(greatest_prime_factor)(yP_size_options)
print(", ".join(["%d: %d" % yp for yp in zip(yP_size_options, yP_size_primes)]))
yP_size = yP_size_options[numpy.argmin(yP_size_primes)]
print("Chose yP_size = %d (%.1f %% overhead)" % (yP_size, 100 * yP_size / (yB_size/2+yN_size) - 100))
assert whole(subgrid_off*yP_size/N)
xM_yP_size = int(xM*2*yP_size)
xMxN_yP_size = xM_yP_size + 2*int(numpy.ceil(xN*yP_size)) # same margin both sides
print("yP_size=%d, xM_yP_size=%d, xMxN_yP_size=%d" % (yP_size, xM_yP_size, xMxN_yP_size))
G = numpy.random.rand(N)-0.5
FG = fft(G)
subgrid = numpy.empty((nsubgrid, xA_size), dtype=complex)
subgrid_A = numpy.zeros_like(subgrid, dtype=int)
subgrid_border = (subgrid_off + numpy.hstack([subgrid_off[1:],[N]])) // 2
print(subgrid_border)
for i in range(nsubgrid):
left = (subgrid_border[i-1] - subgrid_off[i] + xA_size//2) % N
right = subgrid_border[i] - subgrid_off[i] + xA_size//2
assert left >= 0 and right <= xA_size, "xA not large enough to cover subgrids!"
subgrid_A[i,left:right] = 1
subgrid[i] = subgrid_A[i] * extract_mid(numpy.roll(G, -subgrid_off[i]), xA_size)
facet = numpy.empty((nfacet, yB_size), dtype=complex)
facet_B = numpy.zeros_like(facet, dtype=bool)
facet_split = numpy.array_split(range(N), nfacet)
facet_border = (facet_off + numpy.hstack([facet_off[1:],[N]])) // 2
print(facet_border)
for j in range(nfacet):
left = (facet_border[j-1] - facet_off[j] + yB_size//2) % N
right = facet_border[j] - facet_off[j] + yB_size//2
assert left >= 0 and right <= yB_size, "yB not large enough to cover facets!"
facet_B[j,left:right] = 1
facet[j] = facet_B[j] * extract_mid(numpy.roll(FG, -facet_off[j]), yB_size)
pswf = anti_aliasing_function(yN_size, alpha, 2*numpy.pi*yN*xN).real
pylab.semilogy(extract_mid(numpy.abs(fft(pswf)), int(numpy.floor(xN*2*yN*2))), "s")
Fb = 1/extract_mid(pswf, yB_size)
Fn = pswf[(yN_size//2)%int(1/2/xM)::int(1/2/xM)]
facet_m0_trunc = pswf * numpy.sinc(coordinates(yN_size)*xM_size/N*yN_size)
facet_m0_trunc = xM_size*yP_size/N * extract_mid(ifft(pad_mid(facet_m0_trunc, yP_size)), xMxN_yP_size).real
print(N / 5, yP_size / 5, yN_size/ 5)
xN_yP_size = xMxN_yP_size - xM_yP_size
RNjMiBjFj = numpy.empty((nsubgrid, nfacet, xM_yN_size), dtype=complex)
for j in range(nfacet):
BjFj = ifft(pad_mid(facet[j] * Fb, yP_size))
for i in range(nsubgrid):
MiBjFj = facet_m0_trunc * extract_mid(numpy.roll(BjFj, -subgrid_off[i]*yP_size//N), xMxN_yP_size)
MiBjFj_sum = numpy.array(extract_mid(MiBjFj, xM_yP_size))
MiBjFj_sum[:xN_yP_size//2] += MiBjFj[-xN_yP_size//2:]
MiBjFj_sum[-xN_yP_size//2:] += MiBjFj[:xN_yP_size//2:]
RNjMiBjFj[i,j] = Fn * extract_mid(fft(MiBjFj_sum), xM_yN_size)
# - redistribution of RNjMiBjFj here -
fig = pylab.figure(figsize=(16, 8))
ax1, ax2 = fig.add_subplot(211), fig.add_subplot(212)
err_sum = err_sum_img = 0
for i in range(nsubgrid):
approx = numpy.zeros(xM_size, dtype=complex)
for j in range(nfacet):
approx += numpy.roll(pad_mid(RNjMiBjFj[i,j], xM_size), facet_off[j]*xM_size//N)
approx = subgrid_A[i] * extract_mid(ifft(approx), xA_size)
ax1.semilogy(xA*2*coordinates(xA_size), numpy.abs( approx - subgrid[i] ))
ax2.semilogy(N*coordinates(xA_size), numpy.abs( fft(approx - subgrid[i]) ))
err_sum += numpy.abs(approx - subgrid[i])**2
err_sum_img += numpy.abs(fft(approx - subgrid[i]))**2
mark_range("$x_A$", -xA, xA, ax=ax1); mark_range("$N/2$", -N/2, N/2, ax=ax2)
print("RMSE:", numpy.sqrt(numpy.mean(err_sum)), "(image:", numpy.sqrt(numpy.mean(err_sum_img)), ")")
FNjSi = numpy.empty((nsubgrid, nfacet, xM_yN_size), dtype=complex)
for i in range(nsubgrid):
FSi = fft(pad_mid(subgrid[i], xM_size))
for j in range(nfacet):
FNjSi[i,j] = extract_mid(numpy.roll(FSi, -facet_off[j]*xM_size//N), xM_yN_size)
# - redistribution of FNjSi here -
fig = pylab.figure(figsize=(16, 8))
ax1, ax2 = fig.add_subplot(211), fig.add_subplot(212)
err_sum = err_sum_img = 0
for j in range(nfacet):
approx = numpy.zeros(yB_size, dtype=complex)
for i in range(nsubgrid):
NjSi = numpy.zeros(xMxN_yP_size, dtype=complex)
NjSi_mid = extract_mid(NjSi, xM_yP_size)
NjSi_mid[:] = ifft(pad_mid(Fn * FNjSi[i,j], xM_yP_size)) # updates NjSi_tile via reference!
NjSi[-xN_yP_size//2:] = NjSi_mid[:xN_yP_size//2]
NjSi[:xN_yP_size//2:] = NjSi_mid[-xN_yP_size//2:]
FMiNjSi = fft(numpy.roll(pad_mid(facet_m0_trunc * NjSi, yP_size), subgrid_off[i]*yP_size//N))
approx += extract_mid(FMiNjSi, yB_size)
approx *= Fb * facet_B[j]
err_sum += numpy.abs(ifft(approx - facet[j]))**2
err_sum_img += numpy.abs(approx - facet[j])**2
ax1.semilogy(coordinates(yB_size), numpy.abs(ifft(facet[j] - approx)))
ax2.semilogy(yB_size*coordinates(yB_size), numpy.abs(facet[j] - approx))
print("RMSE:", numpy.sqrt(numpy.mean(err_sum)), "(image:", numpy.sqrt(numpy.mean(err_sum_img)), ")")
mark_range("$x_A$", -xA, xA, ax=ax1)
mark_range("$x_M$", -xM, xM, ax=ax1)
mark_range("$y_B$", -yB, yB, ax=ax2)
mark_range("$0.5$", -.5, .5, ax=ax1)
pylab.show(fig)
print(nsubgrid,"x",nsubgrid,"subgrids,",nfacet,"x", nfacet,"facets")
subgrid_2 = numpy.empty((nsubgrid, nsubgrid, xA_size, xA_size), dtype=complex)
facet_2 = numpy.empty((nfacet, nfacet, yB_size, yB_size), dtype=complex)
G_2 = numpy.exp(2j*numpy.pi*numpy.random.rand(N,N))*numpy.random.rand(N,N)/2
for i0,i1 in itertools.product(range(nsubgrid), range(nsubgrid)):
subgrid_2[i0,i1] = extract_mid(numpy.roll(G_2, (-subgrid_off[i0], -subgrid_off[i1]), (0,1)), xA_size)
subgrid_2[i0,i1] *= numpy.outer(subgrid_A[i0], subgrid_A[i1])
FG_2 = fft(G_2)
for j0,j1 in itertools.product(range(nfacet), range(nfacet)):
facet_2[j0,j1] = extract_mid(numpy.roll(FG_2, (-facet_off[j0], -facet_off[j1]), (0,1)), yB_size)
facet_2[j0,j1] *= numpy.outer(facet_B[j0], facet_B[j1])
def slice_a(fill_val, axis_val, dims, axis):
return tuple( [ axis_val if i == axis else fill_val for i in range(dims) ] )
def pad_mid_a(a, N, axis):
N0 = a.shape[axis]
if N == N0: return a
pad = slice_a((0,0), (N//2-N0//2, (N+1)//2-(N0+1)//2),
len(a.shape), axis)
return numpy.pad(a, pad, mode='constant', constant_values=0.0)
def extract_mid_a(a, N, axis):
assert N <= a.shape[axis]
cx = a.shape[axis] // 2
if N % 2 != 0:
slc = slice(cx - N // 2, cx + N // 2 + 1)
else:
slc = slice(cx - N // 2, cx + N // 2)
return a[slice_a(slice(None), slc, len(a.shape), axis)]
def fft_a(a, axis):
return numpy.fft.fftshift(numpy.fft.fft(numpy.fft.ifftshift(a, axis),axis=axis),axis)
def ifft_a(a, axis):
return numpy.fft.fftshift(numpy.fft.ifft(numpy.fft.ifftshift(a, axis),axis=axis),axis)
def broadcast_a(a, dims, axis):
return a[slice_a(numpy.newaxis, slice(None), dims, axis)]
def prepare_facet(facet, axis):
BF = pad_mid_a(facet * broadcast_a(Fb, len(facet.shape), axis), yP_size, axis)
return ifft_a(BF, axis)
def extract_subgrid(BF, i, axis):
dims = len(BF.shape)
BF_mid = extract_mid_a(numpy.roll(BF, -subgrid_off[i]*yP_size//N, axis), xMxN_yP_size, axis)
MBF = broadcast_a(facet_m0_trunc,dims,axis) * BF_mid
MBF_sum = numpy.array(extract_mid_a(MBF, xM_yP_size, axis))
xN_yP_size = xMxN_yP_size - xM_yP_size
# [:xN_yP_size//2] / [-xN_yP_size//2:] for axis, [:] otherwise
slc1 = slice_a(slice(None), slice(xN_yP_size//2), dims, axis)
slc2 = slice_a(slice(None), slice(-xN_yP_size//2,None), dims, axis)
MBF_sum[slc1] += MBF[slc2]
MBF_sum[slc2] += MBF[slc1]
return broadcast_a(Fn,len(BF.shape),axis) * \
extract_mid_a(fft_a(MBF_sum, axis), xM_yN_size, axis)
t = time.time()
NMBF_NMBF = numpy.empty((nsubgrid, nsubgrid, nfacet, nfacet, xM_yN_size, xM_yN_size), dtype=complex)
for j0,j1 in itertools.product(range(nfacet), range(nfacet)):
BF_F = prepare_facet(facet_2[j0,j1], 0)
BF_BF = prepare_facet(BF_F, 1)
for i0 in range(nsubgrid):
NMBF_BF = extract_subgrid(BF_BF, i0, 0)
for i1 in range(nsubgrid):
NMBF_NMBF[i0,i1,j0,j1] = extract_subgrid(NMBF_BF, i1, 1)
print(time.time() - t, "s")
t = time.time()
for j0,j1 in itertools.product(range(nfacet), range(nfacet)):
BF_F = prepare_facet(facet_2[j0,j1], 0)
for i0 in range(nsubgrid):
NMBF_F = extract_subgrid(BF_F, i0, 0)
NMBF_BF = prepare_facet(NMBF_F, 1)
for i1 in range(nsubgrid):
NMBF_NMBF[i0,i1,j0,j1] = extract_subgrid(NMBF_BF, i1, 1)
print(time.time() - t, "s")
t = time.time()
for j0,j1 in itertools.product(range(nfacet), range(nfacet)):
F_BF = prepare_facet(facet_2[j0,j1], 1)
for i1 in range(nsubgrid):
F_NMBF = extract_subgrid(F_BF, i1, 1)
BF_NMBF = prepare_facet(F_NMBF, 0)
for i0 in range(nsubgrid):
NMBF_NMBF[i0,i1,j0,j1] = extract_subgrid(BF_NMBF, i0, 0)
print(time.time() - t, "s")
pylab.rcParams['figure.figsize'] = 16, 8
err_sum = err_sum_img = 0
for i0,i1 in itertools.product(range(nsubgrid), range(nsubgrid)):
approx = numpy.zeros((xM_size, xM_size), dtype=complex)
for j0,j1 in itertools.product(range(nfacet), range(nfacet)):
approx += numpy.roll(pad_mid(NMBF_NMBF[i0,i1,j0,j1], xM_size),
(facet_off[j0]*xM_size//N, facet_off[j1]*xM_size//N), (0,1))
approx = extract_mid(ifft(approx), xA_size)
approx *= numpy.outer(subgrid_A[i0], subgrid_A[i1])
err_sum += numpy.abs(approx - subgrid_2[i0,i1])**2 / nsubgrid**2
err_sum_img += numpy.abs(fft(approx - subgrid_2[i0,i1]))**2 / nsubgrid**2
pylab.imshow(numpy.log(numpy.sqrt(err_sum)) / numpy.log(10)); pylab.colorbar(); pylab.show()
pylab.imshow(numpy.log(numpy.sqrt(err_sum_img)) / numpy.log(10)); pylab.colorbar(); pylab.show()
print("RMSE:", numpy.sqrt(numpy.mean(err_sum)), "(image:", numpy.sqrt(numpy.mean(err_sum_img)), ")")
pylab.rcParams['figure.figsize'] = 16, 4
gc_alpha = 0; xGp = 5/N; gc_x0 = 0.35
gc_support = int(2*xGp*N)
print("parameter:", numpy.pi*gc_support/2, "x0:", gc_x0)
x0_size = int(N*gc_x0*2)
gc_pswf = anti_aliasing_function(N, gc_alpha, numpy.pi*gc_support/2)
gc = pad_mid(extract_mid(1 / gc_pswf, x0_size), N)
pylab.semilogy(x0_size*coordinates(x0_size), numpy.abs(extract_mid(gc, x0_size))); pylab.legend(["F[n]"]);
pylab.xlim((-N/1.8, N/1.8))
mark_range("$x_0N$", -gc_x0*N,gc_x0*N);
mark_range("$N/2$", -N/2,N/2); pylab.title("Grid correction"); pylab.show();
FG_2_gc = FG_2 * numpy.outer(gc, gc)
show_image(numpy.log(numpy.maximum(1e-15, numpy.abs(FG_2_gc))) / numpy.log(10), "FG_2_cropped", N)
G_2_gc = ifft(FG_2_gc)
crop = pad_mid(numpy.ones(x0_size), N)
G_2_cropped = ifft(FG_2 * numpy.outer(crop,crop))
subgrid_2 = numpy.empty((nsubgrid, nsubgrid, xA_size, xA_size), dtype=complex)
facet_2 = numpy.empty((nfacet, nfacet, yB_size, yB_size), dtype=complex)
for i0,i1 in itertools.product(range(nsubgrid), range(nsubgrid)):
subgrid_2[i0,i1] = extract_mid(numpy.roll(G_2_gc, (-subgrid_off[i0], -subgrid_off[i1]), (0,1)), xA_size)
subgrid_2[i0,i1] *= numpy.outer(subgrid_A[i0], subgrid_A[i1])
fig = pylab.figure(figsize=(32,32))
for j0,j1 in itertools.product(range(nfacet), range(nfacet)):
facet_2[j0,j1] = extract_mid(numpy.roll(FG_2_gc, (-facet_off[j0], -facet_off[j1]), (0,1)), yB_size)
facet_2[j0,j1] *= numpy.outer(facet_B[j0], facet_B[j1])
show_image(numpy.log(numpy.maximum(1e-15, numpy.abs(facet_2[j0,j1]))) / numpy.log(10),
"facet_%d%d" % (j0,j1), N/nfacet, axes=fig.add_subplot(nfacet,nfacet,j1+(nfacet-j0-1)*nfacet+1),
norm=(-15,8))
pylab.show(fig)
NMBF_NMBF = numpy.empty((nsubgrid, nsubgrid, nfacet, nfacet, xM_yN_size, xM_yN_size), dtype=complex)
for j0,j1 in itertools.product(range(nfacet), range(nfacet)):
F_BF = prepare_facet(facet_2[j0,j1], 1)
for i1 in range(nsubgrid):
F_NMBF = extract_subgrid(F_BF, i1, 1)
BF_NMBF = prepare_facet(F_NMBF, 0)
for i0 in range(nsubgrid):
NMBF_NMBF[i0,i1,j0,j1] = extract_subgrid(BF_NMBF, i0, 0)
from pylru import lrudecorator
@lrudecorator(100)
def make_approx_subgrid(i0,i1):
approx = numpy.zeros((xM_size, xM_size), dtype=complex)
for j0,j1 in itertools.product(range(nfacet), range(nfacet)):
approx += numpy.roll(pad_mid(NMBF_NMBF[i0,i1,j0,j1], xM_size),
(facet_off[j0]*xM_size//N, facet_off[j1]*xM_size//N), (0,1))
# Extract region that is set in subgrid for comparison
approx_compare = extract_mid(ifft(approx), xA_size)
approx_compare *= numpy.outer(subgrid_A[i0], subgrid_A[i1])
rmse = numpy.sqrt(numpy.mean(numpy.abs(approx_compare - subgrid_2[i0,i1])**2 / nsubgrid**2))
# Return full approximation. We degrid from it, so bounds don't matter
return ifft(approx), rmse / numpy.mean(numpy.abs(approx_compare))
oversample = 2**14
print("grid support:", gc_support)
print("oversampling:", oversample)
kernel = kernel_oversample(gc_pswf, oversample, gc_support).real
kernel /= numpy.sum(kernel[0])
r = numpy.arange(-oversample*(gc_support//2), oversample*((gc_support+1)//2)) / oversample
pylab.semilogy(r, numpy.transpose(kernel).flatten().real); mark_range("$Nx_G$", -N*xGp,N*xGp);
pylab.title("Gridding kernel (oversampled x%d)" % oversample); pylab.show();
@interact(iu=(0, N, 0.01),iv=(0, N, 0.01))
def test_degrid_accuracy(iu,iv, show_subgrid=False):
u = (iu - N//2) / N; v = (iv - N//2) / N
su = numpy.sum((iu+N//2)%N >= subgrid_border) % nsubgrid
sv = numpy.sum((iv+N//2)%N >= subgrid_border) % nsubgrid
siu = iu + xA_size//2-(subgrid_off[su] + N//2) % N
siv = iv + xA_size//2-(subgrid_off[sv] + N//2) % N
dAM = (xM_size - xA_size) // 2
deg = conv_predict(N, 1, numpy.array([(u,v,0)]), None, G_2_gc, kernel)[0]
if whole(iu) and whole(iv):
actual = G_2_cropped[int(iv),int(iu)]
print("actual: ", actual)
print("degridded: ", deg)
print("degrid error: ", numpy.abs(deg-actual))
else:
print("degridded: ", deg)
approx_subgrid, rmse = make_approx_subgrid(sv, su)
print("subgrid: (%d/%d), rmse: %g" % (su, sv, rmse))
sou = (((subgrid_off[su] + N//2) % N) - N//2) / N
sov = (((subgrid_off[sv] + N//2) % N) - N//2) / N
deg_ap = conv_predict(N, 2*xM, numpy.array([(u-sou,v-sov,0)]), None, approx_subgrid, kernel)[0]
print("recomb+degrid:", deg_ap);
print("recomb error: ", numpy.abs(deg_ap-deg))
if whole(iu) and whole(iv):
print("total error: ", numpy.abs(deg_ap-actual))
if show_subgrid:
fig = pylab.figure()
ax = fig.add_subplot(111)
show_grid(numpy.abs(numpy.log(approx_subgrid) / numpy.log(10)), "subgrid_%d%d" % (su,sv), N, axes=ax)
ax.add_patch(patches.Rectangle((u-sou-gc_support//2/N, v-sov-gc_support//2/N),
gc_support/N, gc_support/N, fill=False))
pylab.show(fig)
nvis = 15000
nvis_sg = 500
us = (N - gc_support) / N * (numpy.random.rand(nvis) - 0.5)
vs = (N - gc_support) / N * (numpy.random.rand(nvis) - 0.5)
sus = numpy.sum((us[:,None] * N + N) % N >= subgrid_border,axis=1) % nsubgrid
svs = numpy.sum((vs[:,None] * N + N) % N >= subgrid_border,axis=1) % nsubgrid
uvws = numpy.transpose([us, vs, numpy.zeros_like(us)])
deg = conv_predict(N, 1, uvws, None, G_2_gc, kernel)
sel_sg = numpy.empty((nsubgrid, nsubgrid, nvis_sg), dtype=int)
uvw_sg = numpy.empty((nsubgrid, nsubgrid, nvis_sg, 3), dtype=float)
deg_ap_sg = numpy.empty((nsubgrid, nsubgrid, nvis_sg), dtype=complex)
for sv in range(nsubgrid):
for su in range(nsubgrid):
# Sub-grid offsets
sou = (((subgrid_off[su] + N//2) % N) - N//2) / N
sov = (((subgrid_off[sv] + N//2) % N) - N//2) / N
print(sov, sou)
# Determine uvws
sel_sg[sv,su] = numpy.array(numpy.where( (sus == su) & (svs == sv) ))[0,:nvis_sg]
uvw_sg[sv,su] = numpy.transpose([us[sel_sg[sv,su]]-sou, vs[sel_sg[sv,su]]-sov,
numpy.zeros_like(us[sel_sg[sv,su]])])
# Handle sub-grids overlapping the sides of the grid
uvw_sg[sv,su,numpy.abs(uvw_sg[sv,su,:,0] /2/xM) >= 0.5,0] -= 1
uvw_sg[sv,su,numpy.abs(uvw_sg[sv,su,:,1] /2/xM) >= 0.5,1] -= 1
# Degrid from sub-grids
approx_subgrid, rmse = make_approx_subgrid(sv, su)
deg_ap_sg[sv,su] = conv_predict(N, 2*xM, uvw_sg[sv,su], None, approx_subgrid, kernel)
# Check accuracy
diff = deg_ap_sg[sv,su] - deg[sel_sg[sv,su]]
print("Facet %d/%d: %d vis, rmse %g" % (sv,su,len(sel_sg[sv,su]),
numpy.sqrt(numpy.mean(numpy.abs(diff)**2))))
import os.path
import h5py
out_prefix = "../../data/grid/T05b_"
with h5py.File(out_prefix + "in.h5",'w') as f:
f['pswf'] = numpy.fft.ifftshift(pswf)
f['sepkern/kern'] = kernel
for j0,j1 in itertools.product(range(nfacet), range(nfacet)):
f["j0=%d/j1=%d/facet" % (j0,j1)] = numpy.fft.ifftshift(facet_2[j0,j1])
for i0,i1 in itertools.product(range(nsubgrid), range(nsubgrid)):
f['i0=%d/i1=%d/j0=%d/j1=%d/nmbf' % (i0,i1,j0,j1)] = \
numpy.fft.ifftshift(NMBF_NMBF[i0,i1,j0,j1])
for i0,i1 in itertools.product(range(nsubgrid), range(nsubgrid)):
#f["i0=%d/i1=%d/subgrid" % (i0,i1)] = numpy.fft.ifftshift(subgrid_2[i0, i1])
f["i0=%d/i1=%d/approx" % (i0,i1)] = numpy.fft.ifftshift(make_approx_subgrid(i0, i1)[0])
for sv in range(nsubgrid):
for su in range(nsubgrid):
# Write
f['i0=%d/i1=%d/uvw' % (sv,su)] = uvws[sel_sg[sv,su]]
f['i0=%d/i1=%d/uvw_subgrid' % (sv,su)] = uvw_sg[sv,su]
f['i0=%d/i1=%d/vis' % (sv,su)] = deg[sel_sg[sv,su]]
#f['i0=%d/i1=%d/vis_approx' % (sv,su)] = deg_ap
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parametrisation
Step2: Choose "gridding" function extents
Step3: Placing facets and subgrids
Step4: Sizing the intermediate grid
Step5: We need a bunch of array constants derived from the gridding function
Step6: Facet $\rightarrow$ Subgrid
Step7: Subgrid $\rightarrow$ facet
Step8: 2D case
Step9: Given that the amount of data has been squared, performance is a bit more of a concern now. Fortunately, the entire procedure is completely separable, so let us first re-define the operations to work on one array axis exclusively
Step10: This allows us to define the two fundamental operations - going from $F$ to $b\ast F$ and from $b\ast F$ to $n\ast m(b\ast F)$ separately
Step11: Having those operations separately means that we can shuffle things around quite a bit without affecting the result. The obvious first choice might be to do all facet-preparation up-front, as this allows us to share the computation across all subgrids
Step12: However, remember that prepare_facet increases the amount of data involved, which in turn means that we need to shuffle more data through subsequent computations.
Step13: Degridding
Step14: From this we derive the new $\mathcal F G$ that we are going to feed to the recombination algorithm
Step15: Which in turn leads to new facets. Note how the grid correction pattern is clearly larger than any individual facet.
Step16: The recombination algorithm again, using the new data.
Step17: In order to obtain visibilities at non-integer positions we need an oversampled gridding function, as usual
Step18: Test systematically
Step19: Write test data set
|
6,903
|
<ASSISTANT_TASK:>
Python Code:
from PyQt5.QtCore import QSettings
myOrganization = 'MyOrg' # 组织名称
myApplication = 'MyApp' # 应用名称
settings = QSettings(myOrganization, myApplication)
settings.setValue("editor/wrapMargin", 68)
print (settings.value("editor/wrapMargin"))
print (settings.value("editor/someth"))
# 如果在程序中多处用到 QSettings ,那么可以一次性定义,然后统一使用
from PyQt5.QtCore import QCoreApplication
QCoreApplication.setOrganizationName("MyOrg2nd")
QCoreApplication.setOrganizationDomain("MyOrg2nd.com")
QCoreApplication.setApplicationName("MyApp2nd")
settings_2nd = QSettings()
print (settings.applicationName())
print (settings_2nd.applicationName())
print (settings_2nd.organizationName())
# 测试键是否存在
settings.contains("editor")
settings.contains("editor/wrapMargin")
settings.remove("editor") #删除一个键,键不存在也不会报错
settings.contains("editor/wrapMargin")
settings.clear() #清空所有键(只是清空自己添加的,系统固有的还在)
settings.allKeys() #所有键
settings.fileName() #储存位置
settings_ini = QSettings("conf.ini", QSettings.IniFormat) #使用 ini 文件来储存
settings_ini.fileName()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 最简单的用法
|
6,904
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import swap
base_collection_path = '/nfs/slac/g/ki/ki18/cpd/swap/pickles/15.09.02/'
base_directory = '/nfs/slac/g/ki/ki18/cpd/swap_catalog_diagnostics/'
annotated_catalog_path = base_directory + 'annotated_catalog.csv'
cut_empty = True
stages = [1, 2]
categories = ['ID', 'ZooID', 'location', 'mean_probability', 'category', 'kind', 'flavor',
'state', 'status', 'truth', 'stage', 'line']
annotation_categories = ['At_X', 'At_Y', 'PD', 'PL']
catalog = []
for stage in stages:
print(stage)
collection_path = base_collection_path + 'stage{0}'.format(stage) + '/CFHTLS_collection.pickle'
collection = swap.read_pickle(collection_path, 'collection')
for ID in collection.list():
subject = collection.member[ID]
catalog_i = []
# for stage1 we shall skip the tests for now
if (stage == 1) * (subject.category == 'test'):
continue
# flatten out x and y. also cut out empty entries
annotationhistory = subject.annotationhistory
x_unflat = annotationhistory['At_X']
x = np.array([xi for xj in x_unflat for xi in xj])
# cut out catalogs with no clicks
if (len(x) < 1) and (cut_empty):
continue
# oh yeah there's that absolutely nutso entry with 50k clicks
if len(x) > 10000:
continue
for category in categories:
if category == 'stage':
catalog_i.append(stage)
elif category == 'line':
catalog_i.append(line)
else:
catalog_i.append(subject.__dict__[category])
for category in annotation_categories:
catalog_i.append(list(annotationhistory[category]))
catalog.append(catalog_i)
catalog = pd.DataFrame(catalog, columns=categories + annotation_categories)
# save catalog
catalog.to_csv(annotated_catalog_path)
knownlens_dir = '/nfs/slac/g/ki/ki18/cpd/code/strongcnn/catalog/knownlens/'
knownlensID = pd.read_csv(knownlens_dir + 'knownlensID', sep=' ')
listfiles_d1_d11 = pd.read_csv(knownlens_dir + 'listfiles_d1_d11.txt', sep=' ')
knownlenspath = knownlens_dir + 'knownlens.csv'
X2 = listfiles_d1_d11[listfiles_d1_d11['CFHTID'].isin(knownlensID['CFHTID'])] # cuts down to like 212 entries.
ZooID = []
for i in range(len(Y)):
ZooID.append(X2['ZooID'][X2['CFHTID'] == knownlensID['CFHTID'][i]].values[0])
knownlensID['ZooID'] = ZooID
knownlensID.to_csv(knownlenspath)
# code to regenerate the catalogs
base_directory = '/nfs/slac/g/ki/ki18/cpd/swap_catalog_diagnostics/'
cluster_directory = base_directory
## uncomment this line when updating the shared catalog!
# base_directory = '/nfs/slac/g/ki/ki18/cpd/swap_catalog/'
# cluster_directory = base_directory + 'clusters/'
field_directory = base_directory
knownlens_path = base_directory + 'knownlens.csv'
collection_path = base_directory + 'annotated_catalog.csv'
catalog_path = cluster_directory + 'catalog.csv'
# if we're rerunning this code, we should remove the old cluster pngs,
# all of which have *_*.png
from glob import glob
files_to_delete = glob(cluster_directory + '*_*.png')
from os import remove
for delete_this_file in files_to_delete:
remove(delete_this_file)
# run create catalog code. This can take a while.
from subprocess import call
command = ['python', '/nfs/slac/g/ki/ki18/cpd/code/strongcnn/code/create_catalogs.py',
'--collection', collection_path,
'--knownlens', knownlens_path,
'--clusters', cluster_directory,
'--fields', field_directory,
#'--augment', augmented_directory,
#'--do_a_few', '100',
]
call(command)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create the knownlens catalog
Step2: Convert the annotated catalog and knownlens catalog into cluster catalogs and cutouts
|
6,905
|
<ASSISTANT_TASK:>
Python Code:
# The interpreter can be used as a calculator, and can also echo or concatenate strings.
3 + 3
3 * 3
3 ** 3
3 / 2 # classic division - output is a floating point number
# Use quotes around strings, single or double, but be consistent to the extent possible
'dogs'
"dogs"
"They're going to the beach"
'He said "I like mac and cheese"'
# sometimes you can't escape the escape
'He said "I\'d like mac and cheese"'
# + operator can be used to concatenate strings
'dogs' + "cats"
print('Hello World!')
a = 5
b = 10
a + b
b = 38764289.1097
a + b
a = 5
b = 10
while b > a:
print("b="+str(b))
b = b-1
a = 5
type(a)
a = int(5)
type(a)
c = 'dogs'
print(type(c))
c = str('dogs')
print(type(c))
customer = 'Carol'
pizzas = 2
print(customer + ' ordered ' + pizzas + ' pizzas.')
customer = 'Carol'
pizzas = str(2)
print(customer + ' ordered ' + pizzas + ' pizzas.')
customer = 'Carol'
pizzas = 2
print(customer + ' ordered ' + str(pizzas) + ' pizzas.')
# Read unstructured text
# One way is to open the whole file as a block
file_path = "./beowulf" # We can save the path to the file as a variable
file_in = open(file_path, "r") # Options are 'r', 'w', and 'a' (read, write, append)
beowulf_a = file_in.read()
file_in.close()
print(beowulf_a)
# Another way is to read the file as a list of individual lines
with open(file_path, "r") as b:
beowulf_b = b.readlines()
print(beowulf_b)
# In order to get a similar printout to the first method, we use a for loop
# to print line by line - more on for loops below!
for l in beowulf_b:
print(l)
# We now have two variables with the content of our 'beowulf' file represented using two different data structures.
# Why do you think we get the different outputs from the next two statements?
# Beowulf text stored as one large string
print("As string:", beowulf_a[0])
# Beowulf text stored as a list of lines
print("As list of lines:", beowulf_b[0])
# We can confirm our expectations by checking on the types of our two beowulf variables
print(type(beowulf_a))
print(type(beowulf_b))
# Read CSV files using the Pandas read_csv method.
# Note: Pandas also includes methods for reading Excel.
# First we need to import the pandas library
import pandas as pd
# Create a variable to hold the path to the file
fpath = "aaj1945_DataS1_Egg_shape_by_species_v2.csv"
egg_data = pd.read_csv(fpath)
# We can get all kinds of info about the dataset
# info() provides an overview of the structure
print(egg_data.info())
# Look at the first five rows
egg_data.head()
# Names of columns
print(egg_data.columns.values)
# Dimensions (number of rows and columns)
print(egg_data.shape)
# And much more! But as a final example we can perform operations on the data.
# Descriptive statistics on the "Number of eggs" column
print(egg_data["Number of eggs"].describe())
# Or all of the columns in whole table with numeric data types:
print(egg_data.describe())
# Fun with types
this = 12
that = 15
the_other = "27"
my_stuff = [this,that,the_other,["a","b","c",4]]
more_stuff = {
"item1": this,
"item2": that,
"item3": the_other,
"item4": my_stuff
}
this + that
# this won't work ...
# this + that + the_other
# ... but this will ...
this + that + int(the_other)
# ...and this too
str(this) + str(that) + the_other
# Separate list items with commas!
number_list = [1, 2, 3, 4, 5]
string_list = ['apples', 'oranges', 'pears', 'grapes', 'pineapples']
combined_list = [1, 2, 'oranges', 3.14, 'peaches', 'grapes', 99.19876]
# Nested lists - lists of lists - are allowed.
list_of_lists = [[1, 2, 3],
['oranges', 'grapes', 8],
[['small list'],
['bigger', 'list', 55],
['url_1', 'url_2']
]
]
# Create an empty list
empty_list = []
# As we did above, by using square brackets around a comma-separated sequence of items
new_list = [1, 2, 3]
# Using the type constructor
constructed_list = list('purple')
# Using a list comprehension
result_list = [i for i in range(1, 20)]
empty_list
new_list
result_list
constructed_list
constructed_list_int = list(123)
constructed_list_str = list('123')
constructed_list_str
ordered = [3, 2, 7, 1, 19, 0]
ordered
# There is a 'sort' method for sorting list items as needed:
ordered.sort()
ordered
string_list = ['apples', 'oranges', 'pears', 'grapes', 'pineapples']
string_list[0]
# We can use positions to 'slice' or select sections of a list:
string_list[3:] # start at index '3' and continue to the end
string_list[:3] # start at index '0' and go up to, but don't include index '3'
string_list[1:4] # start at index '1' and go up to and don't include index '4'
# If we don't know the position of a list item, we can use the 'index()' method to find out.
# Note that in the case of duplicate list items, this only returns the position of the first one:
string_list.index('pears')
string_list.append('oranges')
string_list
string_list.index('oranges')
# one more time with lists and dictionaries
list_ex1 = my_stuff[0] + my_stuff[1] + int(my_stuff[2])
print(list_ex1)
# we can use parentheses to split a continuous group of commands over multiple lines
list_ex2 = (
str(my_stuff[0])
+ str(my_stuff[1])
+ my_stuff[2]
+ my_stuff[3][0]
)
print(list_ex2)
dict_ex1 = (
more_stuff['item1']
+ more_stuff['item2']
+ int(more_stuff['item3'])
)
print(dict_ex1)
dict_ex2 = (
str(more_stuff['item1'])
+ str(more_stuff['item2'])
+ more_stuff['item3']
)
print(dict_ex2)
# Now try it yourself ...
# print out the phrase "The answer: 42" using the following
# variables and one or more of your own and the 'print()' function
# (remember spaces are characters as well)
start = "The"
answer = 42
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Try It Yourself
Step2: Variables can be reassigned
Step3: The ability to reassign variable values becomes important when iterating through groups of objects for batch processing or other purposes. In the example below, the value of b is dynamically updated every time the while loop is executed
Step4: Variable data types can be inferred, so Python does not require us to declare the data type of a variable on assignment.
Step5: is equivalent to
Step6: There are cases when we may want to declare the data type, for example to assign a different data type from the default that will be inferred. Concatenating strings provides a good example.
Step7: Above, Python has inferred the type of the variable pizza to be an integer. Since strings can only be concatenated with other strings, our print statement generates an error. There are two ways we can resolve the error
Step8: Given the following variable assignments
Step9: Structure
Step11: Lists
Step12: There are multiple ways to create a list
Step13: We can inspect our lists
Step14: The above output for constructed_list may seem odd. Referring to the documentation, we see that the argument to the type constructor is an iterable, which according to the documentation is "An object capable of returning its members one at a time." In our construtor statement above
Step15: Lists in Python are
Step16: Info on additional list methods is available at https
|
6,906
|
<ASSISTANT_TASK:>
Python Code:
# Import all functions from external file
from download_and_process_DE_functions import *
# Jupyter functions
%matplotlib inline
download_from = 'original_sources'
#download_from = 'opsd_server'
if download_from == 'original_sources':
# BNetzA Power plant list
url_bnetza = ('http://www.bundesnetzagentur.de/SharedDocs/Downloads/DE/'
'Sachgebiete/Energie/Unternehmen_Institutionen/Versorgungssicherheit/'
'Erzeugungskapazitaeten/Kraftwerksliste/Kraftwerksliste_CSV.csv'
'?__blob=publicationFile&v=10')
# UBA Power plant list
url_uba = ('https://www.umweltbundesamt.de/sites/default/files/medien/'
'372/dokumente/kraftwerke_de_ab_100_mw_0.xls')
if download_from == 'opsd_server':
# Specify direction to original_data folder on the opsd data server
# BNetzA Power plant list
url_bnetza = 'http://data.open-power-system-data.org/conventional_power_plants/'
url_bnetza = url_bnetza + '2020-10-01'
url_bnetza = url_bnetza +'/original_data/Kraftwerksliste_CSV.csv'
# UBA Power plant list
url_uba = 'http://data.open-power-system-data.org/conventional_power_plants/'
url_uba = url_uba + '2020-10-01'
url_uba = url_uba +'/original_data/kraftwerke-de-ab-100-mw_0.xls'
plantlist = getbnetzalist(url_bnetza)
# clean unnamed columns
plantlist.drop([c for c in plantlist.columns if 'Unnamed:' in c], axis=1, inplace=True)
plantlist.head()
plantlist_uba = getubalist(url_uba)
plantlist_uba.head()
dict_columns = {
'Kraftwerksnummer Bundesnetzagentur':
'id',
'Unternehmen':
'company',
'Kraftwerksname':
'name',
'PLZ\n(Standort Kraftwerk)':
'postcode',
'Ort\n(Standort Kraftwerk)':
'city',
'Straße und Hausnummer (Standort Kraftwerk)':
'street',
'Bundesland':
'state',
'Blockname':
'block',
('Datum der Aufnahme der kommerziellen Stromeinspeisung der Erzeugungseinheit [Datum/jahr]'):
'commissioned',
('Kraftwerksstatus \n(in Betrieb/\nvorläufig stillgelegt/\nsaisonale '
'Konservierung\nReservekraftwerk/\nSonderfall)'):
'status',
('Kraftwerksstatus \n(in Betrieb/\nvorläufig stillgelegt/\nsaisonale '
'Konservierung\nGesetzlich an Stilllegung gehindert/\nSonderfall)'):
'status',
('Kraftwerksstatus \n(in Betrieb/\nvorläufig stillgelegt/\nsaisonale '
'Konservierung\nNetzreserve/ Sicherheitsbereitschaft/\nSonderfall)'):
'status',
'Energieträger':
'fuel_basis',
('Spezifizierung "Mehrere Energieträger" und "Sonstige Energieträger" - '
'Hauptbrennstoff'): 'fuel_multiple1',
'Spezifizierung "Mehrere Energieträger" - Zusatz- / Ersatzbrennstoffe':
'fuel_multiple2',
('Auswertung\nEnergieträger (Zuordnung zu einem Hauptenergieträger bei '
'Mehreren Energieträgern)'):
'fuel',
'Förderberechtigt nach EEG\n(ja/nein)':
'eeg',
'Wärmeauskopplung (KWK)\n(ja/nein)':
'chp',
'Netto-Nennleistung (elektrische Wirkleistung) in MW':
'capacity',
('Bezeichnung Verknüpfungspunkt (Schaltanlage) mit dem Stromnetz der '
'Allgemeinen Versorgung gemäß Netzbetreiber'):
'network_node',
'Netz- oder Umspannebene des Anschlusses':
'voltage',
'Name Stromnetzbetreiber':
'network_operator',
'Kraftwerksname / Standort':
'uba_name',
'Betreiber ':
'uba_company',
'Standort-PLZ':
'uba_postcode',
'Kraftwerksstandort':
'uba_city',
'Elektrische Bruttoleistung (MW)':
'uba_capacity',
'Fernwärme-leistung (MW)':
'uba_chp_capacity',
'Inbetriebnahme (ggf. Ertüchtigung)':
'uba_commissioned',
'Anlagenart':
'uba_technology',
'Primärenergieträger':
'uba_fuel',
}
plantlist.rename(columns=dict_columns, inplace=True)
# Check if all columns have been translated
for columnnames in plantlist.columns:
# if columnnames not in dict_columns.values():
if columnnames not in dict_columns.values():
logger.error("Untranslated column: "+ columnnames)
# first remove line breaks
plantlist['fuel'] = plantlist['fuel'].str.replace('\n', ' ')
# Delete entries without fuel and name
plantlist = plantlist.dropna(subset = ['fuel','name'])
dict_fuels = {
'Steinkohle': 'Hard coal',
'Erdgas': 'Natural gas',
'Braunkohle': 'Lignite',
'Kernenergie': 'Nuclear',
'Pumpspeicher': 'Hydro PSP',
'Biomasse': 'Biomass and biogas',
'Mineralölprodukte': 'Oil',
'Laufwasser': 'Hydro',
'Sonstige Energieträger (nicht erneuerbar) ': 'Other fuels',
'Abfall': 'Waste',
'Speicherwasser (ohne Pumpspeicher)': 'Hydro Reservoir',
'Unbekannter Energieträger (nicht erneuerbar)': 'Other fuels',
'Sonstige Energieträger (nicht erneuerbar)': 'Other fuels',
'Mehrere Energieträger (nicht erneuerbar)': 'Mixed fossil fuels',
'Deponiegas': 'Sewage and landfill gas',
'Windenergie (Onshore-Anlage)': 'Onshore',
'Windenergie (Onshore-Anlage)neu': 'Onshore',
'Windenergie (Offshore-Anlage)': 'Offshore',
'Solare Strahlungsenergie': 'Solar',
'Klärgas': 'Sewage and landfill gas',
'Geothermie': 'Geothermal',
'Grubengas': 'Other fossil fuels',
'Sonstige Speichertechnologien': 'Storage Technologies'
}
plantlist["fuel"].replace(dict_fuels, inplace=True)
# Check if all fuels have been translated
for fuelnames in plantlist["fuel"].unique():
if fuelnames not in dict_fuels.values():
print(dict_fuels.values(), fuelnames)
logger.error("Untranslated fuel: " + fuelnames)
dict_plantstatus = {
'in Betrieb': 'operating',
'In Betrieb': 'operating',
'vorläufig stillgelegt': 'shutdown_temporary',
'Vorläufig stillgelegt': 'shutdown_temporary',
'Vorläufig Stillgelegt': 'shutdown_temporary',
'Sonderfall': 'special_case',
'saisonale Konservierung': 'seasonal_conservation',
'Saisonale Konservierung': 'seasonal_conservation',
'Reservekraftwerk':'reserve',
'Endgültig Stillgelegt 2011': 'shutdown_2011',
'Endgültig Stillgelegt 2012': 'shutdown_2012',
'Endgültig Stillgelegt 2013': 'shutdown_2013',
'Endgültig Stillgelegt 2014': 'shutdown_2014',
'Endgültig Stillgelegt 2015': 'shutdown_2015',
'Endgültig stillgelegt 2015': 'shutdown_2015',
'Endgültig Stillgelegt 2016': 'shutdown_2016',
'Gesetzlich an Stilllegung gehindert': 'operating',
'Endgültig Stillgelegt 2011 (ohne StA)': 'shutdown_2011',
'Endgültig Stillgelegt 2012 (ohne StA)': 'shutdown_2012',
'Endgültig Stillgelegt 2013 (mit StA)': 'shutdown_2013',
'Endgültig Stillgelegt 2013 (ohne StA)': 'shutdown_2013',
'Endgültig Stillgelegt 2014 (mit StA)': 'shutdown_2014',
'Endgültig Stillgelegt 2014 (ohne StA)': 'shutdown_2014',
'Endgültig Stillgelegt 2015 (mit StA)': 'shutdown_2015',
'Endgültig Stillgelegt 2015 (ohne StA)': 'shutdown_2015',
'Endgültig Stillgelegt 2016 (mit StA)': 'shutdown_2016',
'Sicherheitsbereitschaft': 'reserve',
'Vorläufig Stillgelegt (mit StA)': 'shutdown_temporary',
'Vorläufig Stillgelegt (ohne StA)': 'shutdown_temporary',
'Endgültig Stillgelegt 2016 (ohne StA)': 'shutdown_2016',
'Endgültig Stillgelegt 2017 (mit StA)' : 'shutdown_2017',
'Endgültig Stillgelegt 2017 (ohne StA)': 'shutdown_2017',
'Endgültig Stillgelegt 2018 (mit StA)' : 'shutdown_2018',
'Endgültig Stillgelegt 2018 (ohne StA)': 'shutdown_2018',
'Endgültig Stillgelegt 2019 (mit StA)': 'shutdown_2019',
'Endgültig Stillgelegt 2019 (ohne StA)': 'shutdown_2019',
'gesetzlich an Stilllegung gehindert' : 'operating',
'Netzreserve' : 'reserve',
'Wegfall IWA nach DE' : 'special_case',
}
plantlist['status'].replace(dict_plantstatus, inplace=True)
# Check if all fuels have been translated
for statusnames in plantlist['status'].unique():
if statusnames not in dict_plantstatus.values():
logger.error('Untranslated plant status: '+ statusnames)
dict_yesno ={
'Nein': 'no',
'nein': 'no',
'Ja': 'yes',
'ja': 'yes',
}
plantlist['chp'].replace(dict_yesno, inplace=True)
# Check if all fuels have been translated
for chpnames in plantlist['chp'].unique():
if (chpnames not in dict_yesno.values()) & (str(chpnames) != 'nan'):
logger.error('Untranslated chp capability: ' + str(chpnames))
plantlist['eeg'].replace(dict_yesno, inplace=True)
# Check if all fuels have been translated
for eegnames in plantlist['eeg'].unique():
if (eegnames not in dict_yesno.values()) & (str(eegnames) != 'nan'):
logger.error('Untranslated EEG type: ' + str(eegnames))
dict_uba_columns = {
'Kraftwerksname / Standort': 'uba_name',
'Betreiber ': 'uba_company',
'Standort-PLZ': 'uba_postcode',
'Kraftwerksstandort': 'uba_city',
'Elektrische Bruttoleistung (MW)': 'uba_capacity',
'Fernwärme-leistung (MW)': 'uba_chp_capacity',
'Inbetriebnahme (ggf. Ertüchtigung)': 'uba_commissioned',
'Anlagenart': 'uba_technology',
'Primärenergieträger': 'uba_fuel',
'Bundesland':'uba_state',
}
plantlist_uba.rename(columns=dict_uba_columns, inplace=True)
# Check if all columns have been translated
for columnnames in plantlist_uba.columns:
if columnnames not in dict_uba_columns.values():
logger.error('Untranslated column: ' + columnnames)
# Prepare for matching
plantlist_uba['uba_id_string'] = (plantlist_uba['uba_name']
+ '_'
+ plantlist_uba['uba_fuel'])
# Set Index of BNetzA power plant list to Kraftwerksnummer_Bundesnetzagentur
plantlist['bnetza_id'] = plantlist['id']
plantlist = plantlist.set_index('id')
# remove line breaks in some columns
plantlist['network_node'] = plantlist['network_node'].str.replace('\n', ' ')
plantlist['company'] = plantlist['company'].str.replace('\n', ' ')
plantlist['name'] = plantlist['name'].str.replace('\n', ' ')
plantlist['fuel'] = plantlist['fuel'].str.replace('\n', ' ')
plantlist['block'] = plantlist['block'].str.replace('\n', ' ')
plantlist['network_operator'] = plantlist['network_operator'].str.replace('\n', ' ')
plantlist['street'] = plantlist['street'].str.replace('\n', ' ')
plantlist['commissioned'] = plantlist['commissioned'].str.replace('\n', ' ')
plantlist.head()
plantlist.loc[plantlist['bnetza_id'] == 'BNA0834', 'fuel'] = 'Natural gas'
plantlist.loc[plantlist['bnetza_id'] == 'BNA0662a', 'fuel'] = 'Hard coal'
plantlist.loc[plantlist['bnetza_id'] == 'BNA0662b', 'fuel'] = 'Hard coal'
# read matching list
matchinglist = getmatchinglist()
matchinglist.head()
match1t1 = matchinglist[
(matchinglist.duplicated(subset=['uba_id_string'], keep=False) == False)
& (matchinglist.duplicated(subset=['ID BNetzA'], keep=False) == False)]
match1t1 = pd.merge(match1t1, plantlist_uba,
left_on='uba_id_string',
right_on='uba_id_string',
how='left')
match1t1 = match1t1.set_index('ID BNetzA')
#Add comment
match1t1['merge_comment'] = ('List matching type: Single UBA power plant '
'assigned to single BNetzA power plant')
match1t1.head()
# Matching structure (example):
# bnetza_id uba_id
# 1 1
# 2 1
# 3 1
# 4 2
# 5 2
# Get relevant entries from the matchinglist and merge the corresponding
# UBA Data to the list.
matchnt1= matchinglist[
(matchinglist.duplicated(subset=['uba_id_string'], keep=False) == True)
& (matchinglist.duplicated(subset=['ID BNetzA'], keep=False)== False)]
matchnt1 = pd.merge(matchnt1, plantlist_uba,
left_on='uba_id_string', right_on='uba_id_string', how='left')
matchnt1 = matchnt1.set_index('ID BNetzA')
# Import BNetzA Capacities and CHP criterion into matchnt1 dataframe
plantlist_capacities = pd.DataFrame(plantlist[['capacity', 'chp']]).rename(
columns={'capacity': 'capacity_bnetza', 'chp': 'chp_bnetza'})
matchnt1 = pd.merge(matchnt1, plantlist_capacities,
left_index=True, right_index=True, how='left')
# Get sum of BNetzA Capacitites for each UBA Index and merge into matchnt1 dataframe
plantlist_uba_capacitysum = pd.DataFrame(
matchnt1.groupby('uba_id_string').sum()['capacity_bnetza']).rename(
columns={'capacity_bnetza': 'capacity_bnetza_aggregate'})
matchnt1 = pd.merge(matchnt1, plantlist_uba_capacitysum,
left_on='uba_id_string', right_index=True, how='left')
# Scale UBA Capacities based BNetzA Data
matchnt1['uba_capacity_scaled'] = (matchnt1['uba_capacity']
* matchnt1['capacity_bnetza']
/ matchnt1['capacity_bnetza_aggregate'])
# determine sum of capacities with chp capability and add to matchnt1
plantlist_uba_chp_capacities = matchnt1[(matchnt1['chp_bnetza'] == 'yes')]
plantlist_uba_chp_capacitysum = pd.DataFrame(
plantlist_uba_chp_capacities.groupby('uba_id_string')
.sum()['capacity_bnetza'])
plantlist_uba_chp_capacitysum = plantlist_uba_chp_capacitysum.rename(
columns={'capacity_bnetza': 'capacity_bnetza_with_chp'})
matchnt1 = pd.merge(matchnt1, plantlist_uba_chp_capacitysum,
left_on='uba_id_string', right_index=True, how='left',)
matchnt1['uba_chp_capacity'] = pd.to_numeric(matchnt1['uba_chp_capacity'], errors='coerce')
matchnt1['uba_chp_capacity_scaled'] = (matchnt1['uba_chp_capacity']
* matchnt1['capacity_bnetza']
/ matchnt1['capacity_bnetza_with_chp'])
# Change column names for merge later on
matchnt1['uba_chp_capacity_original'] = matchnt1['uba_chp_capacity']
matchnt1['uba_chp_capacity'] = matchnt1['uba_chp_capacity_scaled']
matchnt1['uba_capacity_original'] = matchnt1['uba_capacity']
matchnt1['uba_capacity'] = matchnt1['uba_capacity_scaled']
#Add comment
matchnt1['merge_comment'] = ('List matching type: UBA capacity distributed '
'proportionally to multiple BNetzA power plants')
matchnt1.head()
# The resulting DataFrame should be called "match1tn"
# Matching structure:
# bnetza_id uba_id
# 1 1
# 1 2
# 1 3
# 2 4
# 2 5
# Get relevant entries from the matchinglist and merge the corresponding UBA Data to the list.
match1tn= matchinglist[
(matchinglist.duplicated(subset=['ID BNetzA'], keep=False) == True) &
(matchinglist.duplicated(subset=['uba_id_string'], keep=False)== False)]
match1tn = pd.merge(match1tn, plantlist_uba,
left_on='uba_id_string', right_on='uba_id_string', how='left')
match1tn = match1tn.set_index('ID BNetzA')
match1tn.head()
# Import BNetzA Capacities and CHP criterion into match1tn dataframe
plantlist_capacities = pd.DataFrame(plantlist[['capacity','chp']]).rename(
columns = {'capacity': 'capacity_bnetza', 'chp': 'chp_bnetza'})
match1tn = pd.merge(match1tn, plantlist_capacities,
left_index=True, right_index=True, how='left')
match1tn.index.names=['ID BNetzA']
match1tn.head()
# Get sum of UBA Capacitites per BNetzA Index and merge to match1tn dataframe
plantlist_bnetza_capacitysum = pd.DataFrame(
match1tn.groupby(match1tn.index).sum()['uba_capacity'])
plantlist_bnetza_capacitysum = plantlist_bnetza_capacitysum.rename(
columns={'uba_capacity':'uba_capacity_aggregate'})
match1tn = pd.merge(match1tn, plantlist_bnetza_capacitysum,
left_index=True, right_index=True, how='left')
match1tn['uba_chp_capacity'] = pd.to_numeric(match1tn['uba_chp_capacity'], errors='coerce')
match1tn
# Get sum of UBA CHP Capacities per BNetzA Index and merge to match1tn dataframe
plantlist_bnetza_chp_capacitysum = pd.DataFrame(
match1tn.groupby(match1tn.index).sum()['uba_chp_capacity'])
plantlist_bnetza_chp_capacitysum = plantlist_bnetza_chp_capacitysum.rename(
columns={'uba_chp_capacity': 'uba_chp_capacity_aggregate'})
match1tn = pd.merge(match1tn, plantlist_bnetza_chp_capacitysum,
left_index=True, right_index=True, how='left')
# Get UBA Technology for each BNetzA Index and merge into match1tn dataframe
## Option 1: Take all technologies and merge them
#match1tn['uba_technology_aggregate'] = pd.DataFrame(
# match1tn.groupby(match1tn.index)
# .transform(lambda x: ', '.join(x))['uba_technology'])
## Option 2 (currently preferred): Take technology with highest occurence
match1tn['uba_technology_aggregate'] = pd.DataFrame(
match1tn.groupby(match1tn.index)['uba_technology']
.agg(lambda x: x.value_counts().index[0]))
# Get UBA Plant name
match1tn['uba_name_aggregate'] = pd.DataFrame(
match1tn.groupby(match1tn.index).transform(lambda x: ', '.join(x))['uba_name'])
# Get UBA company name
match1tn['uba_company_aggregate'] = pd.DataFrame(
match1tn.groupby(match1tn.index)['uba_company']
.agg(lambda x:x.value_counts().index[0]))
# Change column names for merge later on
match1tn = match1tn.rename(
columns={'uba_chp_capacity': 'uba_chp_capacity_original',
'uba_capacity': 'uba_capacity_original',
'uba_chp_capacity_aggregate': 'uba_chp_capacity',
'uba_capacity_aggregate': 'uba_capacity'})
#Add comment
match1tn['merge_comment'] = ('List matching type: Multiple UBA capacities '
'aggregated to single BNetzA power plant')
# Drop duplicate rows and keep first entry
match1tn = match1tn.reset_index().drop_duplicates(subset='ID BNetzA',keep='first').set_index('ID BNetzA')
match1tn.head()
# Merge the UBA DataFrames
# Merge first two dataframes
plantlist_uba_for_merge = match1t1.append(matchnt1, sort=True)
# Add third dataframe
plantlist_uba_for_merge = plantlist_uba_for_merge.append(match1tn,sort=True)
# Merge plantlist_uba_for_merge into the plantlist
plantlist = pd.merge(plantlist, plantlist_uba_for_merge,
left_index=True, right_index=True, how='left',sort=True)
plantlist.head()
# Delete solar, wind onshore, and wind offshore
plantlist = plantlist[(plantlist['fuel'] != 'Solar')
& (plantlist['fuel'] != 'Onshore')
& (plantlist['fuel'] != 'Offshore')]
# Delete aggregate values
plantlist = plantlist[(plantlist['company'] != 'EEG-Anlagen < 10 MW')
& (plantlist['company'] != 'Nicht-EEG-Anlagen < 10 MW')]
# Add columns with empty data
plantlist['shutdown'] = 'NaN'
plantlist['shutdown'] = pd.to_numeric(
plantlist['status'].str.extract('[\w].+(\d\d\d\d)', expand=False),
errors='coerce')
plantlist.loc[plantlist['shutdown'] > 0, 'status'] = 'shutdown'
# Fill retrofit data column
# Identify restrofit dates in UBA list
plantlist['retrofit'] = pd.to_numeric(
plantlist['uba_commissioned'].str.extract('[(.+](\d\d\d\d)', expand=False),
errors='coerce')
# Split multiple commissioning dates as listed in UBA
plantlist['uba_commissioned_1'] = pd.to_numeric(
plantlist['uba_commissioned'].str.extract('(\d\d\d\d)', expand=False),
errors='coerce')
plantlist.loc[plantlist['uba_commissioned_1'].isnull(), 'uba_commissioned_1'] = pd.to_numeric(
plantlist['uba_commissioned'].str.extract('(\d\d\d\d).+[\w]', expand=False),
errors='coerce').loc[plantlist['uba_commissioned_1'].isnull()]
plantlist['uba_commissioned_2'] = pd.to_numeric(
plantlist['uba_commissioned'].str.extract('[\w].+(\d\d\d\d).+[\w]', expand=False),
errors='coerce')
plantlist['uba_commissioned_3'] = pd.to_numeric(
plantlist['uba_commissioned'].str.extract('[\w].+(\d\d\d\d)', expand=False),
errors='coerce')
plantlist.loc[plantlist['retrofit'] == plantlist['uba_commissioned_1'], 'uba_commissioned_1'] = ''
plantlist.loc[plantlist['retrofit'] == plantlist['uba_commissioned_2'], 'uba_commissioned_2'] = ''
plantlist.loc[plantlist['retrofit'] == plantlist['uba_commissioned_3'], 'uba_commissioned_3'] = ''
# Split multiple commissioning dates as listed in BNetzA
plantlist['commissioned_1'] = pd.to_numeric(
plantlist['commissioned'].str.extract('(\d\d\d\d)', expand=False),
errors='coerce')
plantlist.loc[plantlist['commissioned_1'].isnull(), 'commissioned_1'] = pd.to_numeric(
plantlist['commissioned'].str.extract('(\d\d\d\d).+[\w]', expand=False),
errors='coerce').loc[plantlist['commissioned_1'].isnull()]
plantlist['commissioned_2'] = pd.to_numeric(
plantlist['commissioned'].str.extract('[\w].+(\d\d\d\d).+[\w]', expand=False),
errors='coerce')
plantlist['commissioned_3'] = pd.to_numeric(
plantlist['commissioned'].str.extract('[\w].+(\d\d\d\d)', expand=False),
errors='coerce')
# Show plantlist
plantlist[plantlist['status'] == 'shutdown']
plantlist['capacity_float'] = pd.to_numeric(
plantlist['capacity'],
errors='coerce')
plantlist['commissioned_float'] = pd.to_numeric(
plantlist[['commissioned','commissioned_1','commissioned_2','commissioned_3']].max(axis=1),
errors='coerce')
plantlist['retrofit_float'] = pd.to_numeric(
plantlist['retrofit'],
errors='coerce')
plantlist.head()
# Split uba_technology information into technology (GT, CC,...) and type (HKW, IKW, ...)
plantlist['technology'] = plantlist['uba_technology']
plantlist['type'] = plantlist['uba_technology']
dict_technology = {
'GT': 'Gas turbine',
'GT / DT': 'Combined cycle',
'DT': 'Steam turbine',
'GuD': 'Combined cycle',
'DKW': 'Steam turbine',
'LWK': 'Run-of-river',
'PSW': 'Pumped storage',
'DWR': 'Steam turbine', #Pressurized water reactor
'G/AK': 'Gas turbine', #GT with heat recovery
'SWR': 'Steam turbine', #boiling water reactor
'SWK': 'Reservoir', #storage power plant
'SSA': 'Steam turbine', #bus bar
'HKW (DT)': 'Steam turbine',
'HKW / GuD': 'Combined cycle',
'GuD / HKW': 'Combined cycle',
'IKW / GuD': 'Combined cycle',
'IKW /GuD': 'Combined cycle',
'GuD / IKW': 'Combined cycle',
'HKW / SSA': 'Steam turbine',
'IKW / SSA': 'Steam turbine',
'SSA / IKW': 'Steam turbine',
'HKW': '',
'IKW': '',
'IKW / HKW': '',
'HKW / IKW': '',
'IKW / HKW / GuD' : 'Combined cycle',
'HKW / GuD / IKW' : 'Combined cycle',
'GuD / HKW / IKW': 'Combined cycle',
}
plantlist['technology'].replace(dict_technology, inplace=True)
plantlist['technology'].unique()
# Check if all technologies have been translated
for technology in plantlist['technology'].unique():
if (technology not in dict_technology.values()) & (str(technology) != 'nan'):
logger.error('Untranslated technology: ' + str(technology))
# Translate types
dict_type = {
'HKW': 'CHP', #thermal power plant,
'HKW (DT)': 'CHP',
'IKW': 'IPP', #industrial power plant
'HKW / GuD': 'CHP',
'GuD / HKW': 'CHP',
'IKW / GuD': 'IPP',
'IKW /GuD': 'IPP',
'GuD / IKW': 'IPP',
'IKW / SSA': 'IPP',
'HKW / SSA': 'CHP',
'IKW / HKW': 'CHP',
'HKW / IKW': 'CHP',
'SSA / IKW': 'IPP',
'GT': '',
'GT / DT': '',
'DT': '',
'GuD': '',
'DKW': '',
'LWK': '',
'PSW': '',
'DWR': '', #Pressurized water reactor
'G/AK': 'CHP', #GT with heat recovery
'SWR': '', #boiling water reactor
'SWK': '', #storage power plant
'SSA': '',
'WEA': '',
'IKW / HKW / GuD' : 'CHP',
'HKW / GuD / IKW': 'CHP',
'GuD / HKW / IKW': 'CHP',
}
plantlist['type'].replace(dict_type, inplace=True)
plantlist['type'].unique()
# Check if all types have been translated
for type in plantlist['type'].unique():
if (type not in dict_type.values()) & (str(type) != 'nan'):
logger.error('Untranslated type: ' + str(type))
# Set technology based on fuels
plantlist.loc[(plantlist['fuel'] == 'Nuclear') & ((plantlist['technology'] == '') | (
plantlist['technology'].isnull())), 'technology'] = 'Steam turbine'
plantlist.loc[(plantlist['fuel'] == 'Lignite') & ((plantlist['technology'] == '') | (
plantlist['technology'].isnull())), 'technology'] = 'Steam turbine'
plantlist.loc[(plantlist['fuel'] == 'Hard Coal') & ((plantlist['technology'] == '') | (
plantlist['technology'].isnull())), 'technology'] = 'Steam turbine'
plantlist.loc[(plantlist['fuel'] == 'Hard coal') & ((plantlist['technology'] == '') | (
plantlist['technology'].isnull())), 'technology'] = 'Steam turbine'
plantlist.loc[(plantlist['fuel'] == 'Hydro') & ((plantlist['technology'] == '') | (
plantlist['technology'].isnull())), 'technology'] = 'Run-of-river'
plantlist.loc[(plantlist['fuel'] == 'Hydro PSP') &
((plantlist['technology'] == '') | (plantlist['technology'].isnull())),
'technology'] = 'Pumped storage'
plantlist.loc[(plantlist['fuel'] == 'Hydro PSP'), 'fuel'] = 'Hydro'
plantlist.loc[(plantlist['fuel'] == 'Hydro Reservoir') &
((plantlist['technology'] == '') | (plantlist['technology'].isnull())),
'technology'] = 'RES'
plantlist.loc[(plantlist['fuel'] == 'Hydro Reservoir'), 'fuel'] = 'Hydro'
plantlist.loc[(plantlist['fuel'] == 'reservoir') & ((plantlist['technology'] == '') |
(plantlist['technology'].isnull())),
'technology'] = 'RES'
# Set technology based on name and block information combined with fuels (e.g. combined-cycle, gas turbine)
# Define technology CC as combination of GT and DT
plantlist.loc[((plantlist['name'].str.contains("GT")) | (plantlist['block'].str.contains("GT")))
& ((plantlist['name'].str.contains("DT")) | (plantlist['block'].str.contains("DT")))
& ((plantlist['technology'] == '') | (plantlist['technology'].isnull())), 'technology'] = 'Combined cycle'
# Define technology CC if specified as GuD
plantlist.loc[((plantlist['name'].str.contains("GuD")) | (plantlist['block'].str.contains("GuD"))
| (plantlist['name'].str.contains("GUD")) | (plantlist['name'].str.contains("GUD")))
& ((plantlist['technology'] == '') | (plantlist['technology'].isnull())), 'technology'] = 'Combined cycle'
# Define technology GT
plantlist.loc[((plantlist['name'].str.contains("GT"))
| (plantlist['block'].str.contains("GT"))
| (plantlist['name'].str.contains("Gasturbine"))
| (plantlist['block'].str.contains("Gasturbine")))
& ((plantlist['technology'] == '') | (plantlist['technology'].isnull())), 'technology'] = 'Gas turbine'
# Define technology ST
plantlist.loc[((plantlist['name'].str.contains("DT"))
| (plantlist['block'].str.contains("DT"))
| (plantlist['name'].str.contains("Dampfturbine"))
| (plantlist['block'].str.contains("Dampfturbine"))
| (plantlist['name'].str.contains("Dampfkraftwerk"))
| (plantlist['block'].str.contains("Dampfkraftwerk"))
| (plantlist['name'].str.contains("DKW"))
| (plantlist['block'].str.contains("DKW")))
& ((plantlist['technology'] == '') | (plantlist['technology'].isnull())), 'technology'] = 'Steam turbine'
# Define technology CB
plantlist.loc[((plantlist['name'].str.contains("motor"))
| (plantlist['block'].str.contains("motor"))
| (plantlist['name'].str.contains("Motor"))
| (plantlist['block'].str.contains("Motor")))
& ((plantlist['technology'] == '') | (plantlist['technology'].isnull())), 'technology'] = 'Combustion Engine'
# Identify stroage technologies
plantlist.loc[(plantlist['fuel'] == 'Other fuels') & ((plantlist[
'fuel_basis'] == 'Sonstige Speichertechnologien') & (plantlist['technology'].isnull())), 'technology'] = 'Storage technologies'
# Set technology ST for all technologies which could not be identified
plantlist.loc[((plantlist['technology'] == '')
| (plantlist['technology'].isnull())), 'technology'] = 'Steam turbine'
# Add country Code
plantlist['country_code'] = plantlist['state']
dict_state_country = {
'Brandenburg': 'DE',
'Baden-Württemberg': 'DE',
'Niedersachsen': 'DE',
'Bayern': 'DE',
'Mecklenburg-Vorpommern': 'DE',
'Sachsen-Anhalt': 'DE',
'Hessen': 'DE',
'Nordrhein-Westfalen': 'DE',
'Berlin': 'DE',
'Saarland': 'DE',
'Thüringen': 'DE',
'Sachsen': 'DE',
'Bremen': 'DE',
'Schleswig-Holstein': 'DE',
'Hamburg': 'DE',
'Rheinland-Pfalz': 'DE',
'Österreich': 'AT',
'Luxemburg': 'LU',
'Schweiz': 'CH',
}
plantlist['country_code'].replace(dict_state_country, inplace=True)
# Check if all types have been translated
for plant_type in plantlist['country_code'].unique():
if (plant_type not in dict_state_country.values()) & (str(plant_type) != 'nan'):
logger.error('Untranslated type: ' + str(plant_type))
# Efficiencies
data_efficiencies_bnetza = pd.read_csv(os.path.join('input/data/DE', 'input_efficiency_de.csv'),
sep=',', # CSV field separator, default is ','
decimal='.', # Decimal separator, default is '.')
index_col='id',
encoding='utf8')
data_efficiencies_bnetza['efficiency_net'] = pd.to_numeric(
data_efficiencies_bnetza['efficiency_net'],
errors='coerce')
data_efficiencies_bnetza = data_efficiencies_bnetza.dropna(subset=['efficiency_net'])
plantlist = pd.merge(
plantlist,
data_efficiencies_bnetza,
left_index=True,
right_index=True,
how='left')
plantlist.head()
plantlist.iloc[:,6:-1].head()
plantlist_for_efficiency_analysis = plantlist
plantlist_for_efficiency_analysis = plantlist_for_efficiency_analysis.dropna(subset=['efficiency_net'])
# Plot efficiencies for lignite, coal, oil, and natural gas
fuel_for_plot = ['Lignite', 'Hard coal', 'Oil', 'Natural gas']
col_dict = {'Lignite': 'brown', 'Hard coal': 'grey', 'Oil': 'k', 'Natural gas': 'orange'}
fig, ax = plt.subplots(figsize=(16,8))
for fuels in fuel_for_plot:
sub_df = plantlist_for_efficiency_analysis[plantlist_for_efficiency_analysis.fuel == fuels]
if len(sub_df['efficiency_net']) > 10:
x = np.array(sub_df['commissioned_float'].astype(int))
fit = np.polyfit(x, sub_df['efficiency_net'], deg=1)
ax.plot(x, fit[0]*x + fit[1], color=col_dict[fuels])
sub_df.plot(ax=ax,
kind='scatter',
x='commissioned_float',
y='efficiency_net',
c=col_dict[fuels],
label=fuels)
data_efficiencies_literature = pd.read_csv(os.path.join('input/data/DE','input_efficiency_literature_by_fuel_technology.csv'),
sep=',', # CSV field separator, default is ','
decimal='.', # Decimal separator, default is '.')
encoding='utf8')
#data_efficiencies_literature['technology'] = data_efficiencies_literature['technology'].str.upper()
data_efficiencies_literature = data_efficiencies_literature.set_index(['fuel','technology'])
data_efficiencies_literature
plantlist = plantlist.join(data_efficiencies_literature,on=['fuel','technology'])
plantlist['efficiency_literature'] = plantlist['efficiency_intercept'] + plantlist['efficiency_slope']*plantlist[['commissioned_float','retrofit_float']].max(axis=1)
plantlist.head()
data_plant_locations = pd.read_csv(os.path.join('input/data/DE','input_plant_locations_de.csv'),
sep=',', # CSV field separator, default is ','
decimal='.', # Decimal separator, default is '.')
encoding='utf8')
data_plant_locations = data_plant_locations.set_index('id')
data_plant_locations['lat'] = pd.to_numeric(data_plant_locations['lat'],
errors='coerce')
data_plant_locations['lon'] = pd.to_numeric(data_plant_locations['lon'],
errors='coerce')
plantlist = pd.merge(plantlist,
data_plant_locations,
left_index=True,
right_index=True,
how='left')
plantlist.head()
plantlist[plantlist.lat.isnull()]
# read energy source level allocation table
energy_source_level_allocator = pd.read_csv(os.path.join('input', 'energy_source_level_allocator.csv'),
sep=',', # CSV field separator, default is ','
decimal='.', # Decimal separator, default is '.')
index_col='fuel',
encoding='utf8')
plantlist = pd.merge(energy_source_level_allocator, plantlist,
left_index = True,
right_on='fuel',
how='outer')
plantlist
# Merge uba_name_aggregate and uba_name
plantlist.loc[plantlist['uba_name_aggregate'].isnull(), 'uba_name_aggregate'] = plantlist['uba_name'][plantlist['uba_name_aggregate'].isnull()]
# Drop columns not relevant for output
colsToDrop = ['bnetza_id',
'capacity',
'uba_name',
'uba_capacity_original',
'uba_chp_capacity_original',
'uba_city',
'uba_commissioned',
'uba_company',
'uba_company_aggregate',
'uba_fuel',
'uba_postcode',
'uba_state',
'uba_technology',
'uba_technology_aggregate',
'retrofit',
'uba_commissioned_1',
'uba_commissioned_2',
'uba_commissioned_3',
'commissioned_1',
'commissioned_2',
'commissioned_3',
'fuel_basis',
'fuel_multiple1',
'fuel_multiple2',
'efficiency_gross',
'efficiency_intercept',
'efficiency_slope',
'source_type',
'date'
]
plantlist = plantlist.drop(colsToDrop, axis=1)
# Rename columns
plantlist = plantlist.rename(columns={'commissioned': 'commissioned_original',
'commissioned_float': 'commissioned',
'retrofit_float': 'retrofit',
'capacity_float': 'capacity_net_bnetza',
'uba_capacity': 'capacity_gross_uba',
'uba_chp_capacity': 'chp_capacity_uba',
'efficiency_net': 'efficiency_data',
'efficiency_literature': 'efficiency_estimate',
'uba_name_aggregate': 'name_uba',
'name': 'name_bnetza',
'block': 'block_bnetza',
'country_code': 'country',
'fuel': 'energy_source',
})
# Sort columns
columns_sorted = [
'name_bnetza',
'block_bnetza',
'name_uba',
'company',
'street',
'postcode',
'city',
'state',
'country',
'capacity_net_bnetza',
'capacity_gross_uba',
'energy_source',
'technology',
'chp',
'chp_capacity_uba',
'commissioned',
'commissioned_original',
'retrofit',
'shutdown',
'status',
'type',
'lat',
'lon',
'eic_code_plant',
'eic_code_block',
'efficiency_data',
'efficiency_source',
'efficiency_estimate',
'energy_source_level_1',
'energy_source_level_2',
'energy_source_level_3',
'eeg',
'network_node',
'voltage',
'network_operator',
'merge_comment',
'comment']
plantlist = plantlist.reindex(columns=columns_sorted)
plantlist.head()
# Round capacity values as well as the efficiency estimate to five decimals-
plantlist.capacity_net_bnetza = plantlist.capacity_net_bnetza.round(decimals=5)
plantlist.capacity_gross_uba = plantlist.capacity_gross_uba.round(decimals=5)
plantlist.efficiency_estimate = plantlist.efficiency_estimate.round(decimals=5)
pivot_status_capacity = pd.pivot_table(
plantlist,
values='capacity_net_bnetza',
columns='status',
index='energy_source',
aggfunc=np.sum
)
pivot_status_capacity.sort_values(by='operating', inplace=True, ascending=0)
pivot_status_capacity_plot=pivot_status_capacity.plot(kind='barh',
stacked=True,
legend=True,
figsize=(12, 6))
pivot_status_capacity_plot.set_xlabel("MW")
pivot_status_capacity_plot
plantlist_filtered = plantlist
pivot_age_capacity = pd.pivot_table(
plantlist_filtered,
values='capacity_net_bnetza',
columns='energy_source',
index='commissioned',
aggfunc=np.sum,
dropna=True
)
pivot_age_capacity_plot=pivot_age_capacity.plot(kind='bar',
stacked=True,
legend=True,
figsize=(17, 10))
pivot_age_capacity_plot.set_ylabel("MW")
xaxis_labels = pivot_age_capacity.index.astype(int)
pivot_age_capacity_plot.set_xticklabels(xaxis_labels)
pivot_age_capacity_plot
plantlist_for_plot = plantlist.copy(deep=True)
plantlist_for_plot['capacity_float'] = pd.to_numeric(plantlist_for_plot['capacity_net_bnetza'],
errors='coerce')
plantlist_for_plot['commissioned_float'] = pd.to_numeric(plantlist_for_plot['commissioned'],
errors='coerce')
age_capacity_plot = plantlist_for_plot.plot(kind='scatter', x='commissioned_float', y='capacity_float', figsize=(17, 10))
age_capacity_plot.set_xlabel("commissioned")
age_capacity_plot.set_ylabel("MW")
age_capacity_plot
plantlist[plantlist.capacity_net_bnetza == 0]
#Show all Plants with commisioning dates below 1900
plantlist[plantlist['commissioned'] <= 1900]
# Show all Plants with invalid commisioning dates
plantlist[plantlist['commissioned'].isnull()]
# TODO: improve this comparison, it creates many false positives
capacitycomparison = pd.DataFrame(plantlist.capacity_net_bnetza / plantlist.capacity_gross_uba)
capacitycomparison['Name'] = plantlist.name_bnetza
capacitycomparison['Block'] = plantlist.block_bnetza
capacitycomparison['BnetzaCapacity'] = plantlist.capacity_net_bnetza
capacitycomparison['UBACapacity'] = plantlist.capacity_gross_uba
capacitycomparison.dropna(inplace=True)
capacitycomparison.sort_values(by=0)
output_path = 'output'
plantlist.to_csv(
os.path.join(output_path, 'conventional_power_plants_DE.csv'),
encoding='utf-8', index_label='id'
)
plantlist.to_excel(
os.path.join(output_path, 'conventional_power_plants_DE.xlsx'),
sheet_name='plants', index_label='id'
)
plantlist.to_sql(
'conventional_power_plants_DE',
sqlite3.connect(os.path.join(output_path ,'conventional_power_plants.sqlite')),
if_exists="replace", index_label='id'
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Download settings
Step2: 4. Define functions
Step3: 5.2 Download the UBA Plant list
Step4: 6. Translate contents
Step5: 6.2 Fuel types
Step6: 6.3 Power plant status
Step7: 6.4 CHP Capability
Step8: 6.5 EEG
Step9: 6.6 UBA Columns
Step10: 7. Process data
Step11: Manual adjustments
Step12: 7.2 Merge data from UBA List
Step13: 7.2.1 case 1-1
Step14: 7.2.2 case n-1
Step15: 7.2.3 case 1-n
Step16: 7.2.4 Merge into plantlist
Step17: 7.3 Delete fuels not in focus
Step18: 7.4 Add Columns for shutdown and retrofit
Step19: 7.5 Convert input colums to usable data types
Step20: 7.6 Identify generation technology
Step21: 7.6.2 Identify generation technology based on BNetzA information
Step22: 7.7 Add country code
Step23: 7.8 Add efficiency data
Step24: 7.8.1.2 Plot efficiencies by year of commissioning
Step25: 7.8.2 Efficiencies from literature
Step26: 7.8.2.2 Apply efficiency approximation from literature
Step27: 7.9 Add geodata and EIC Codes
Step28: 7.10 Allocate energy source levels
Step29: 8. Define final output
Step30: 8.1 Round values
Step31: 8.2 Verification
Step32: 8.2.2 Power plant age
Step33: 8.2.3 Block size vs year of commissioning
Step34: 8.3 Logical checks
Step35: 8.3.2 Commissioning Dates
Step36: 8.3.3 Compare UBA and BNetzA data
Step37: 9. Result export
|
6,907
|
<ASSISTANT_TASK:>
Python Code::
# create sequences of images, input sequences and output words for an image
def create_sequences(tokenizer, max_length, descriptions, photos, vocab_size):
X1, X2, y = list(), list(), list()
# walk through each image identifier
for key, desc_list in descriptions.items():
# walk through each description for the image
for desc in desc_list:
# encode the sequence
seq = tokenizer.texts_to_sequences([desc])[0]
# split one sequence into multiple X,y pairs
for i in range(1, len(seq)):
# split into input and output pair
in_seq, out_seq = seq[:i], seq[i]
# pad input sequence
in_seq = pad_sequences([in_seq], maxlen=max_length)[0]
# encode output sequence
out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]
# store
X1.append(photos[key][0])
X2.append(in_seq)
y.append(out_seq)
return array(X1), array(X2), array(y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
6,908
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# le code qui suit n'est pas indispensable, il génère automatiquement un menu
# dans le notebook
from jyquickhelper import add_notebook_menu
add_notebook_menu()
url = "https://www.insee.fr/fr/statistiques/fichier/1892086/pop-totale-france.xls"
url = "pop-totale-france.txt"
import pandas
df=pandas.read_csv(url, sep="\t", encoding="latin-1")
df.head(n=5)
df=pandas.read_csv(url, sep="\t", encoding="latin-1", skiprows=3)
df.head(n=5)
df.tail(n=5)
from actuariat_python.data import population_france_year
df = population_france_year()
df.head(n=3)
df.tail(n=3)
from actuariat_python.data import table_mortalite_france_00_02
df=table_mortalite_france_00_02()
df.head()
df.plot(x="Age",y=["Homme", "Femme"],xlim=[0,100])
from actuariat_python.data import fecondite_france
df=fecondite_france()
df.head()
df.plot(x="age", y=["2005","2015"])
from actuariat_python.data import table_mortalite_euro_stat
table_mortalite_euro_stat()
import os
os.stat("mortalite.txt")
import pandas
df = pandas.read_csv("mortalite.txt", sep="\t", encoding="utf8", low_memory=False)
df.head()
df [ ((df.age=="Y60") | (df.age=="Y61")) & (df.annee == 2000) & (df.pays=="FR") & (df.genre=="F")]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Population française janvier 2017
Step2: La récupération de ces données est implémentée dans la fonction population_france_year
Step3: D'après cette table, il y a plus de personnes âgées de 110 ans que de 109 ans. C'est dû au fait que la dernière ligne aggrège toutes les personnes âgées de plus de 110 ans.
Step4: Taux de fécondité (France)
Step5: Table de mortalité étendue 1960-2010
|
6,909
|
<ASSISTANT_TASK:>
Python Code:
def decaying_sin(params, x):
amp = params['amp']
phaseshift = params['phase']
freq = params['frequency']
decay = params['decay']
return amp * np.sin(x*freq + phaseshift) * np.exp(-x*x*decay)
x = np.linspace(0.0, 10.0, 100)
default_params = {"amp" : 10.0, "decay" : 0.05, "phase" : 1.0, "frequency" : 4.0}
data = decaying_sin(default_params, x)
eps = np.linspace(0.0, 10.0, 100)
eps.fill(0.01)
a = plt.plot(x, data)
def objective_function(params):
model = decaying_sin(params, x)
return (data - model) / eps
params = lmfit.Parameters()
params.add('amp', value=1)
params.add('decay', value=0.1)
params.add('phase', value=0.1)
params.add('frequency', value=1.0)
fig, ax = plt.subplots()
a = ax.plot(x, data)
b = ax.plot(x, decaying_sin(params, x))
fig, ax2 = plt.subplots()
def plotter(params, a, b):
current_data = decaying_sin(params, x)
ax2.plot(x, data)
ax2.plot(x, current_data)
axes = plt.gca()
axes.set_ylim(-10.0, 10.0)
out = lmfit.minimize(objective_function, params, iter_cb=plotter)
out.params.pretty_print()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotting function for default parameters
Step2: Defining objective function
|
6,910
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib as mpl
from matplotlib import cm
import matplotlib.pyplot as plt
from qutip import *
from qutip.piqs import *
#TLS parameters
N = 6
ntls = N
nds = num_dicke_states(ntls)
[jx, jy, jz] = jspin(N)
jp = jspin(N, "+")
jm = jp.dag()
w0 = 1
gE = 0.1
gD = 0.01
gP = 0.1
gCP = 0.1
gCE = 0.1
gCD = 0.1
h = w0 * jz
#photonic parameters
nphot = 16
wc = 1
kappa = 1
ratio_g = 2
g = ratio_g/np.sqrt(N)
a = destroy(nphot)
#TLS liouvillian
system = Dicke(N = N)
system.hamiltonian = h
system.emission = 0
system.dephasing = gD
system.pumping = gP
system.collective_pumping = 0
system.collective_emission = 0
system.collective_dephasing = 0
liouv = system.liouvillian()
system
#TLS liouvillian 2
system2 = Dicke(N = N)
system2.hamiltonian = h
system2.emission = gE
system2.dephasing = gD
system2.pumping = 0
system2.collective_pumping = 0
system2.collective_emission = 0
system2.collective_dephasing = 0
liouv2 = system2.liouvillian()
#TLS liouvillian 3
system3 = Dicke(N = N)
system3.hamiltonian = h
system3.emission = gE
system3.dephasing = gD
system3.pumping = 0#gP
system3.collective_pumping = gCP
system3.collective_emission = 0
system3.collective_dephasing = 0
liouv3 = system3.liouvillian()
#TLS liouvillian 4
system4 = Dicke(N = N)
system4.hamiltonian = h
system4.emission = gE
system4.dephasing = gD
system4.pumping = 0
system4.collective_pumping = 0
system4.collective_emission = gCE
system4.collective_dephasing = 0
liouv4 = system4.liouvillian()
print(system)
#photonic liouvilian
h_phot = wc * a.dag() * a
c_ops_phot = [np.sqrt(kappa) * a]
liouv_phot = liouvillian(h_phot, c_ops_phot)
#identity operators
id_tls = to_super(qeye(nds))
id_phot = to_super(qeye(nphot))
# light-matter superoperator
h_int = g * tensor(a + a.dag(), jx)
liouv_int = -1j* spre(h_int) + 1j* spost(h_int)
# 1 total liouvillian
liouv_sum = super_tensor(liouv_phot, id_tls) + super_tensor(id_phot, liouv)
liouv_tot = liouv_sum + liouv_int
#2 total liouvillian
liouv_sum2 = super_tensor(liouv_phot, id_tls) + super_tensor(id_phot, liouv2)
liouv_tot2 = liouv_sum2 + liouv_int
#3 total liouvillian
liouv_sum3 = super_tensor(liouv_phot, id_tls) + super_tensor(id_phot, liouv3)
liouv_tot3 = liouv_sum3 + liouv_int
#4 total liouvillian
liouv_sum4 = super_tensor(liouv_phot, id_tls) + super_tensor(id_phot, liouv4)
liouv_tot4 = liouv_sum4 + liouv_int
#total operators
jz_tot = tensor(qeye(nphot), jz)
jp_tot = tensor(qeye(nphot), jp)
jm_tot = tensor(qeye(nphot), jm)
jpjm_tot = tensor(qeye(nphot), jp*jm)
nphot_tot = tensor(a.dag()*a, qeye(nds))
adag_tot = tensor(a.dag(), qeye(nds))
a_tot = tensor(a, qeye(nds))
# 4 calculate steady state
rho_ss4 = steadystate(liouv_tot4, method="direct")
nphot_ss4 = expect(nphot_tot, rho_ss4)
psi4 = rho_ss4.ptrace(0)
print("Ensemble 4 is ok")
# takes a couple of minutes
# 1 calculate steady state
rho_ss = steadystate(liouv_tot, method="direct")
nphot_ss = expect(nphot_tot, rho_ss)
psi = rho_ss.ptrace(0)
# 2 calculate steady state
rho_ss2 = steadystate(liouv_tot2, method="direct")
nphot_ss2 = expect(nphot_tot, rho_ss2)
psi2 = rho_ss2.ptrace(0)
# 3 calculate steady state
rho_ss3 = steadystate(liouv_tot3, method="direct")
nphot_ss3 = expect(nphot_tot, rho_ss3)
psi3 = rho_ss3.ptrace(0)
# 4 calculate steady state
rho_ss4 = steadystate(liouv_tot4, method="direct")
nphot_ss4 = expect(nphot_tot, rho_ss4)
psi4 = rho_ss4.ptrace(0)
# calculate Wigner function for photonic states
nx = 1000
xvec = np.linspace(-6, 6, nx)
W = wigner(psi, xvec, xvec)
print("1 ok")
W2 = wigner(psi2, xvec, xvec)
print("2 ok")
W3 = wigner(psi3, xvec, xvec)
print("3 ok")
W4 = wigner(psi4, xvec, xvec)
print("4 ok")
# strings for the plot title
g_string = np.round(g,4)
gE_string = np.round(gE,4)
gD_string = np.round(gD,4)
gP_string = np.round(gP,4)
gCE_string = np.round(gCE,4)
gCP_string = np.round(gCP,4)
gCD_string = np.round(gCD,4)
k_string = np.round(kappa,4)
plt.rc('text', usetex = True)
label_size = 25
plt.rc('xtick', labelsize=label_size)
plt.rc('ytick', labelsize=label_size)
# 1
Wp = np.round(W,3)
wmap = wigner_cmap(Wp) # Generate Wigner colormap
nrm = mpl.colors.Normalize(0, Wp.max())
# 2
Wp2 = np.round(W2,3)
wmap2 = wigner_cmap(Wp2) # Generate Wigner colormap
nrm2 = mpl.colors.Normalize(0, Wp2.max())
# 3
Wp3 = np.round(W3,3)
wmap3 = wigner_cmap(Wp3) # Generate Wigner colormap
nrm3 = mpl.colors.Normalize(0, Wp3.max())
# 4
Wp4 = np.round(W4,3)
wmap4 = wigner_cmap(Wp4) # Generate Wigner colormap
nrm4 = mpl.colors.Normalize(0, Wp4.max())
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))
axes[0,0].contourf(xvec, xvec, Wp, 100, cmap=wmap, norm=nrm)
axes[0,1].contourf(xvec, xvec, Wp2, 100, cmap=wmap2, norm=nrm2)
axes[1,0].contourf(xvec, xvec, Wp3, 100, cmap=wmap3, norm=nrm3)
axes[1,1].contourf(xvec, xvec, Wp4, 100, cmap=wmap4, norm=nrm4)
axes[1,0].set_xlabel(r'$x$', fontsize = label_size)
axes[1,1].set_xlabel(r'$x$', fontsize = label_size)
#axes[0,0].set_ylabel(r'$p$', fontsize = label_size)
axes[0, 0].set_title(r"$\kappa$, $\gamma_\phi$, $\gamma_\uparrow$",
fontsize = label_size, position=(0.2, 0.85))
axes[0, 1].set_title(r"$\kappa$, $\gamma_\phi$, $\gamma_\downarrow$",
fontsize = label_size, position=(0.8, 0.85))
axes[1, 0].set_title(r"$\kappa$, $\gamma_\phi$, $\gamma_\downarrow$, $\gamma_\Uparrow$",
fontsize = label_size, position=(0.3, 0.85))
axes[1, 1].set_title(r"$\kappa$, $\gamma_\phi$, $\gamma_\downarrow$, $\gamma_\Downarrow$",
fontsize = label_size, position=(0.7, 0.85))
axes[0,0].set_xticks([-5,0,5])
axes[0,0].set_yticks([-5,0,5])
axes[0,0].set_ylabel(r'$p$', fontsize = label_size)
axes[0,1].set_xticks([-5,0,5])
axes[0,1].set_yticks([-5,0,5])
axes[1,0].set_ylabel(r'$p$', fontsize = label_size)
axes[1,0].set_xticks([-5,0,5])
axes[1,0].set_yticks([-5,0,5])
axes[1,1].set_xticks([-5,0,5])
axes[1,1].set_yticks([-5,0,5])
plt.show()
plt.close()
rhoss_list = []
rhoss_list.append(rho_ss)
rhoss_list.append(rho_ss2)
rhoss_list.append(rho_ss3)
rhoss_list.append(rho_ss4)
wigner_list = []
wigner_list.append(W)
wigner_list.append(W2)
wigner_list.append(W3)
wigner_list.append(W4)
# save data
save_file = False
if save_file == True:
file_name = str("superradiance_rhoss_Nmax{}.npz".format(N))
np.savez('{}'.format(file_name), rhoss_list)
file_name = str("superradiance_wigner_list_Nmax{}.npz".format(N))
np.savez('{}'.format(file_name), wigner_list)
qutip.about()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Wigner function Visualization
Step2: The Wigner function of the photonic part of the system displays the two displaced squeezed blobs typical of superradiance depending on the local and collective incoherent terms affecting the dynamics [2,3].
Step3: References
|
6,911
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
mpl.rcParams['figure.figsize'] = (13,9) # change default figure size
cmap1 = 'Blues'
x = np.arange(0, np.pi, 0.1)
y = np.arange(0, 2*np.pi, 0.1)
xx, yy = np.meshgrid(x, y)
zz = np.clip(6*(np.cos(xx) * np.sin(yy) + np.random.rand(*xx.shape)*np.cos(yy)), 0, 5.5)
fig, ax = plt.subplots(figsize=(12, 1))
cb = mpl.colorbar.ColorbarBase(ax, orientation='horizontal', cmap=cmap1)
col_levels = np.arange(0,6.5,0.5) # sequence from 0 to 6 with step 0.5
tick_loc = np.arange(0,7,1) # sequence from 0 to 6 with step 1
fmt = '%1.2f' # tick labels format with two decimal places
fig, ax = plt.subplots(figsize=(12, 1))
cb = mpl.colorbar.ColorbarBase(ax, cmap=cmap1,
boundaries=col_levels,
ticks=tick_loc,
format=fmt,
orientation='horizontal')
cb.ax.tick_params(labelsize=20)
cbkw = dict(ticks=tick_loc, format=fmt, orientation='vertical')
fig, ax = plt.subplots()
p = ax.contourf(x, y, zz, cmap=cmap1, levels=col_levels)
cb1 = fig.colorbar(p, ax=ax, **cbkw)
cb1.ax.tick_params(labelsize=20)
def myplot(cmap=cmap1):
Test a colormap on random data with other params being the same
fig, ax = plt.subplots()
p = ax.contourf(x, y, zz, cmap=cmap, levels=col_levels)
cb1 = fig.colorbar(p, ax=ax, **cbkw)
cb1.ax.tick_params(labelsize=20)
cmap2 = 'gist_stern_r'
myplot(cmap2)
super_random_color_sequence = ['r', (0,0,1), '0.2', 'yellow', '#0044AB']
cmap3 = mpl.colors.ListedColormap(super_random_color_sequence)
myplot(cmap3)
c_white_trans = mpl.colors.colorConverter.to_rgba('w', alpha=0.0)
cmap4 = mpl.colors.LinearSegmentedColormap.from_list('some_name_for_this_cmap',
[c_white_trans, '#770000', 'red', 'yellow'], 64)
fig, ax = plt.subplots()
# First plot
p1 = ax.contourf(x, y, xx, cmap='viridis')
cb1 = fig.colorbar(p1, ax=ax, orientation='vertical', label='data1', pad=0.05)
# Second plot
p2 = ax.contourf(x, y, zz, cmap=cmap4, levels=col_levels)
cb2 = fig.colorbar(p2, ax=ax, label='data2', **cbkw)
cb2.ax.tick_params(labelsize=20)
import seaborn as sns
try:
sns.color_palette('jet')
except ValueError as e:
print(e)
HTML(html)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will start with the colormap used in the original question.
Step2: In the absence of the data used in the given figure, we create a random array
Step3: 1. Customizing a colorbar
Step4: Thus we got a simple colorbar with smooth colour range for levels between 0 and 1.
Step5: Now, to apply these settings to a data plot, we use a method colorbar() instead of the base colorbar class. We also put colour levels and colour map name in the contourf(). Other keywords are passed as before, but let's pack them into a dictinary.
Step7: We will use this plot for other examples below, so let's make a function of it.
Step8: 1.2 Colour map
Step9: But what if you want to customize the colormap or even create your own one? There are several ways to do this. Below are some of the easiest examples.
Step10: 1.2.2 LinearSegmentedColormap
Step11: To demonstrate the colour transparency we overlay two plots
Step12: 1.2.3 And kids, remember to avoid the rainbow or jet colormaps!
Step13: Further reading
|
6,912
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
test_data = pd.read_csv("../data/person-video-sparse-multiple-choice.csv")
test_data.head()
import crowdtruth
from crowdtruth.configuration import DefaultConfig
class TestConfig(DefaultConfig):
inputColumns = ["videolocation", "subtitles", "imagetags", "subtitletags"]
outputColumns = ["selected_answer"]
# processing of a closed task
open_ended_task = True
def processJudgments(self, judgments):
# pre-process output to match the values in annotation_vector
for col in self.outputColumns:
# transform to lowercase
judgments[col] = judgments[col].apply(lambda x: str(x).lower())
# remove square brackets from annotations
judgments[col] = judgments[col].apply(lambda x: str(x).replace('[',''))
judgments[col] = judgments[col].apply(lambda x: str(x).replace(']',''))
# remove the quotes around the annotations
judgments[col] = judgments[col].apply(lambda x: str(x).replace('"',''))
return judgments
data, config = crowdtruth.load(
file = "../data/person-video-sparse-multiple-choice.csv",
config = TestConfig()
)
data['judgments'].head()
results = crowdtruth.run(data, config)
results["units"].head()
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(results["units"]["uqs"])
plt.xlabel("Video Fragment Quality Score")
plt.ylabel("Video Fragment")
results["units"]["unit_annotation_score"].head()
results["workers"].head()
plt.hist(results["workers"]["wqs"])
plt.xlabel("Worker Quality Score")
plt.ylabel("Workers")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Declaring a pre-processing configuration
Step2: Our test class inherits the default configuration DefaultConfig, while also declaring some additional attributes that are specific to the Person Type/Role Annotation in Video task
Step3: Pre-processing the input data
Step4: Computing the CrowdTruth metrics
Step5: results is a dict object that contains the quality metrics for the video fragments, annotations and crowd workers.
Step6: The uqs column in results["units"] contains the video fragment quality scores, capturing the overall workers agreement over each video fragment. Here we plot its histogram
Step7: The unit_annotation_score column in results["units"] contains the video fragment-annotation scores, capturing the likelihood that an annotation is expressed in a video fragment. For each video fragment, we store a dictionary mapping each annotation to its video fragment-relation score.
Step8: The worker metrics are stored in results["workers"]
Step9: The wqs columns in results["workers"] contains the worker quality scores, capturing the overall agreement between one worker and all the other workers.
|
6,913
|
<ASSISTANT_TASK:>
Python Code:
info_struct=dict()
info_struct['addr']=10000
info_struct['content']=''
class Server(object):
content=''
def recv(self, info):
pass
def send(self, info):
pass
def show(self):
pass
class infoServer(Server):
def recv(self,info):
self.content=info
return 'recv OK!'
def send(self, info):
pass
def show(self):
print('SHOW:%s'%self.content)
class serverProxy(object):
pass
class infoServerProxy(serverProxy):
server=''
def __init__(self,server):
self.server=server
def recv(self,info):
return self.server.recv(info)
def show(self):
self.server.show()
class WhiteInfoServerProxy(infoServerProxy):
whilte_list=[]
def recv(self,info):
try:
assert type(info)==dict
except:
return 'info structure is not correct'
addr = info.get('addr',0)
if not addr in self.whilte_list:
return 'Your address is not the white list'
else:
content=info.get('content','')
return self.server.recv(content)
def addWhite(self, addr):
self.whilte_list.append(addr)
def rmvWhite(self, addr):
self.whilte_list.remove(addr)
def clearWhite(self):
self.whilte_list=[]
info_struct=dict()
info_struct['addr']=10010
info_struct['content']='Hello World!'
info_server = infoServer()
info_server_proxy = WhiteInfoServerProxy(info_server)
print(info_server_proxy.recv(info_struct))
info_server_proxy.show()
info_server_proxy.addWhite(10010)
print(info_server_proxy.recv(info_struct))
info_server_proxy.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: infoServer有接收和发送的功能,发送功能由于暂时用不到,保留。另外新加一个接口show,用来展示服务器接收的内容。接收的数据格式必须如info_struct所示,服务器仅接受info_struct的content字段。那么,如何给这个服务器设置一个白名单,使得只有白名单里的地址可以访问服务器呢?修改Server结构是个方法,但这显然不符合软件设计原则中的单一职责原则。在此基础之上,使用代理,是个不错的方法。代理配置如下:
|
6,914
|
<ASSISTANT_TASK:>
Python Code:
from sklearn import datasets
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
# Load the iris data
iris = datasets.load_iris()
# Create a variable for the feature data
X = iris.data
# Create a variable for the target data
y = iris.target
# Random split the data into four new datasets, training features, training outcome, test features,
# and test outcome. Set the size of the test data to be 30% of the full dataset.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Load the standard scaler
sc = StandardScaler()
# Compute the mean and standard deviation based on the training data
sc.fit(X_train)
# Scale the training data to be of mean 0 and of unit variance
X_train_std = sc.transform(X_train)
# Scale the test data to be of mean 0 and of unit variance
X_test_std = sc.transform(X_test)
# Feature Test Data, non-standardized
X_test[0:5]
# Feature Test Data, standardized.
X_test_std[0:5]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data
Step2: Split Data For Cross Validation
Step3: Standardize Feature Data
|
6,915
|
<ASSISTANT_TASK:>
Python Code:
%run "../Functions/1. Google form analysis.ipynb"
binarized = getAllBinarized()
answersCount = len(binarized.index)
totalScorePerQuestion = pd.DataFrame(data=np.dot(np.ones(answersCount),binarized),index=binarized.columns,columns=['score'])
totalScorePerQuestion['perc'] = totalScorePerQuestion['score'] * 100 / answersCount
display(totalScorePerQuestion.sort_values(by = 'score'))
biologists = getSurveysOfBiologists(gform)
nonBiologists = gform.drop(biologists.index)
biologistsScores = biologists.apply(getGFormRowScore, axis=1)
nonBiologistsScores = nonBiologists.apply(getGFormRowScore, axis=1)
#print(len(gform), len(biologists), len(nonBiologists))
#print(len(gform), len(biologistsScores), len(nonBiologistsScores))
#print(type(biologistsScores), len(biologistsScores),\
#type(nonBiologistsScores), len(nonBiologistsScores))
ttest = ttest_ind(biologistsScores, nonBiologistsScores)
ttest
biologistsScores.values
np.std(biologistsScores)
np.std(nonBiologistsScores)
gfBefores = getGFormBefores(gform)
biologistsBefores = getSurveysOfBiologists(gfBefores, hardPolicy = False)
nonBiologistsBefores = gfBefores.drop(biologistsBefores.index)
biologistsBeforesScores = biologistsBefores.apply(getGFormRowScore, axis=1)
nonBiologistsBeforesScores = nonBiologistsBefores.apply(getGFormRowScore, axis=1)
#print(len(gfBefores), len(biologistsBefores), len(nonBiologistsBefores))
#print(len(gfBefores), len(biologistsBeforesScores), len(nonBiologistsBeforesScores))
#print(type(biologistsScores), len(biologistsScores),\
#type(nonBiologistsScores), len(nonBiologistsScores))
ttest = ttest_ind(biologistsBeforesScores, nonBiologistsBeforesScores)
ttest
np.std(biologistsBeforesScores)
nonBiologistsBeforesScores
np.std(nonBiologistsBeforesScores)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sorted total answers to questions
Step2: Cross-samples t-tests
Step3: Conclusion
|
6,916
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.3,<2.4"
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
b.add_dataset('orb',
compute_times=phoebe.linspace(0,10,10),
dataset='orb01')
b.add_dataset('lc',
compute_times=phoebe.linspace(0,1,101),
dataset='lc01')
print(b.computes)
print(b.filter(context='compute'))
b.set_value(qualifier='irrad_method', value='none')
b.add_compute(phoebe.compute.phoebe, compute='preview', irrad_method='none')
print(b.filter(compute='preview', context='compute'))
b.add_compute('phoebe', compute='detailed', irrad_method='wilson')
print(b.get_compute('detailed'))
print(b.filter(qualifier='enabled', dataset='lc01'))
b.set_value(qualifier='enabled', dataset='lc01', compute='preview', value=False)
print(b.filter(qualifier='enabled', dataset='lc01'))
b.set_value_all('enabled@lc01', True)
print(b.filter(qualifier='enabled', dataset='lc01'))
b.run_compute(compute='preview')
print(b.models)
b.set_value(qualifier='incl', kind='orbit', value=90)
b.run_compute(compute='preview', model='run_with_incl_90')
b.set_value(qualifier='incl', kind='orbit', value=85)
b.run_compute(compute='preview', model='run_with_incl_85')
b.set_value(qualifier='incl', kind='orbit', value=80)
b.run_compute(compute='preview', model='run_with_incl_80')
print(b.models)
b.remove_model('latest')
print(b.models)
b.filter(model='run_with_incl_90')
b.filter(component='primary', model='run_with_incl_90')
b.get_parameter(qualifier='us', component='primary', model='run_with_incl_90')
b.get_value(qualifier='us', dataset='orb01', component='primary', model='run_with_incl_90')[:10]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we'll import our packages and initialize the default PHOEBE bundle.
Step2: And we'll attach some dummy datasets. See the datasets tutorial for more details.
Step3: Default Compute Options
Step4: Adding Compute Options
Step5: Editing Compute Options
Step6: as you can see, there is a copy for both of our compute options ('preview' and 'detailed').
Step7: or to enable/disable a dataset for all sets of compute options, we can use the set_value_all method
Step8: If the enabled parameter is missing for a set of compute options - it is likely that that particular backend does not support that dataset type.
Step9: Storing/Tagging Models
Step10: We will now have three new sets of synthetics which can be compared, plotted, or removed.
Step11: To remove a model, call remove_model.
Step12: Accessing Synthetics from Models
|
6,917
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import nibabel as nb
import matplotlib.pyplot as plt
# Let's create a short helper function to plot 3D NIfTI images
def plot_slice(fname):
# Load the image
img = nb.load(fname)
data = img.get_data()
# Cut in the middle of the brain
cut = int(data.shape[-1]/2) + 10
# Plot the data
plt.imshow(np.rot90(data[..., cut]), cmap="gray")
plt.gca().set_axis_off()
%%bash
ANAT_NAME=sub-01_ses-test_T1w
ANAT=/data/ds000114/sub-01/ses-test/anat/${ANAT_NAME}
bet ${ANAT} /output/${ANAT_NAME}_brain -m -f 0.3
fslmaths ${ANAT} -s 2 /output/${ANAT_NAME}_smooth
fslmaths /output/${ANAT_NAME}_smooth -mas /output/${ANAT_NAME}_brain_mask /output/${ANAT_NAME}_smooth_mask
f = plt.figure(figsize=(12, 4))
for i, img in enumerate(["T1w", "T1w_smooth",
"T1w_brain_mask", "T1w_smooth_mask"]):
f.add_subplot(1, 4, i + 1)
if i == 0:
plot_slice("/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_%s.nii.gz" % img)
else:
plot_slice("/output/sub-01_ses-test_%s.nii.gz" % img)
plt.title(img)
from nipype.interfaces import fsl
# Skullstrip process
skullstrip = fsl.BET(
in_file="/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz",
out_file="/output/sub-01_T1w_brain.nii.gz",
mask=True)
skullstrip.run()
# Smoothing process
smooth = fsl.IsotropicSmooth(
in_file="/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz",
out_file="/output/sub-01_T1w_smooth.nii.gz",
fwhm=4)
smooth.run()
# Masking process
mask = fsl.ApplyMask(
in_file="/output/sub-01_T1w_smooth.nii.gz",
out_file="/output/sub-01_T1w_smooth_mask.nii.gz",
mask_file="/output/sub-01_T1w_brain_mask.nii.gz")
mask.run()
f = plt.figure(figsize=(12, 4))
for i, img in enumerate(["T1w", "T1w_smooth",
"T1w_brain_mask", "T1w_smooth_mask"]):
f.add_subplot(1, 4, i + 1)
if i == 0:
plot_slice("/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_%s.nii.gz" % img)
else:
plot_slice("/output/sub-01_%s.nii.gz" % img)
plt.title(img)
from nipype.interfaces import fsl
# Skullstrip process
skullstrip = fsl.BET(
in_file="/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", mask=True)
bet_result = skullstrip.run() # skullstrip object
# Smooth process
smooth = fsl.IsotropicSmooth(
in_file="/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", fwhm=4)
smooth_result = smooth.run() # smooth object
# Mask process
mask = fsl.ApplyMask(in_file=smooth_result.outputs.out_file,
mask_file=bet_result.outputs.mask_file)
mask_result = mask.run()
f = plt.figure(figsize=(12, 4))
for i, img in enumerate([skullstrip.inputs.in_file, smooth_result.outputs.out_file,
bet_result.outputs.mask_file, mask_result.outputs.out_file]):
f.add_subplot(1, 4, i + 1)
plot_slice(img)
plt.title(img.split('/')[-1].split('.')[0].split('test_')[-1])
# Import Node and Workflow object and FSL interface
from nipype import Node, Workflow
from nipype.interfaces import fsl
# For reasons that will later become clear, it's important to
# pass filenames to Nodes as absolute paths
from os.path import abspath
in_file = abspath("/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz")
# Skullstrip process
skullstrip = Node(fsl.BET(in_file=in_file, mask=True), name="skullstrip")
# Smooth process
smooth = Node(fsl.IsotropicSmooth(in_file=in_file, fwhm=4), name="smooth")
# Mask process
mask = Node(fsl.ApplyMask(), name="mask")
# Initiation of a workflow
wf = Workflow(name="smoothflow", base_dir="/output/working_dir")
# First the "simple", but more restricted method
wf.connect(skullstrip, "mask_file", mask, "mask_file")
# Now the more complicated method
wf.connect([(smooth, mask, [("out_file", "in_file")])])
wf.write_graph("workflow_graph.dot")
from IPython.display import Image
Image(filename="/output/working_dir/smoothflow/workflow_graph.png")
wf.write_graph(graph2use='flat')
from IPython.display import Image
Image(filename="/output/working_dir/smoothflow/graph_detailed.png")
# Specify the base directory for the working directory
wf.base_dir = "/output/working_dir"
# Execute the workflow
wf.run()
f = plt.figure(figsize=(12, 4))
for i, img in enumerate(["/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz",
"/output/working_dir/smoothflow/smooth/sub-01_ses-test_T1w_smooth.nii.gz",
"/output/working_dir/smoothflow/skullstrip/sub-01_ses-test_T1w_brain_mask.nii.gz",
"/output/working_dir/smoothflow/mask/sub-01_ses-test_T1w_smooth_masked.nii.gz"]):
f.add_subplot(1, 4, i + 1)
plot_slice(img)
!tree /output/working_dir/smoothflow/ -I '*js|*json|*html|*pklz|_report'
from niflow.nipype1.workflows.fmri.fsl import create_susan_smooth
susan = create_susan_smooth(separate_masks=False)
susan.write_graph("susan_workflow.dot")
from IPython.display import Image
Image(filename="susan_workflow.png")
print("Inputs:\n", susan.inputs.inputnode)
print("Outputs:\n", susan.outputs.outputnode)
susan.inputs
from nipype import Function
extract_func = lambda list_out: list_out[0]
list_extract = Node(Function(input_names=["list_out"],
output_names=["out_file"],
function=extract_func),
name="list_extract")
# Initiate workflow with name and base directory
wf2 = Workflow(name="susanflow", base_dir="/output/working_dir")
# Create new skullstrip and mask nodes
skullstrip2 = Node(fsl.BET(in_file=in_file, mask=True), name="skullstrip")
mask2 = Node(fsl.ApplyMask(), name="mask")
# Connect the nodes to each other and to the susan workflow
wf2.connect([(skullstrip2, mask2, [("mask_file", "mask_file")]),
(skullstrip2, susan, [("mask_file", "inputnode.mask_file")]),
(susan, list_extract, [("outputnode.smoothed_files",
"list_out")]),
(list_extract, mask2, [("out_file", "in_file")])
])
# Specify the remaining input variables for the susan workflow
susan.inputs.inputnode.in_files = abspath(
"/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz")
susan.inputs.inputnode.fwhm = 4
wf2.write_graph(dotfilename='/output/working_dir/full_susanflow.dot', graph2use='colored')
from IPython.display import Image
Image(filename="/output/working_dir/full_susanflow.png")
wf2.write_graph(dotfilename='/output/working_dir/full_susanflow_toplevel.dot', graph2use='orig')
from IPython.display import Image
Image(filename="/output/working_dir/full_susanflow_toplevel.png")
wf2.run()
f = plt.figure(figsize=(12, 4))
for i, e in enumerate([["/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz", 'input'],
["/output/working_dir//susanflow/mask/sub-01_ses-test_T1w_smooth_masked.nii.gz",
'output']]):
f.add_subplot(1, 2, i + 1)
plot_slice(e[0])
plt.title(e[1])
%time wf2.run()
wf.inputs.smooth.fwhm = 1
wf.run()
# write your solution here
# importing Node and Workflow
from nipype import Workflow, Node
# importing all interfaces
from nipype.interfaces.fsl import ExtractROI, MCFLIRT, SliceTimer
# extracting all time levels but not the first four
extract = Node(ExtractROI(t_min=4, t_size=-1, output_type='NIFTI'),
name="extract")
# using MCFLIRT for motion correction to the mean volume
mcflirt = Node(MCFLIRT(mean_vol=True,
output_type='NIFTI'),
name="mcflirt")
# correcting for slice wise acquisition (acquired with interleaved order and time repetition was 2.5)
slicetimer = Node(SliceTimer(interleaved=True,
output_type='NIFTI',
time_repetition=2.5),
name="slicetimer")
# Initiation of a workflow
wf_ex1 = Workflow(name="exercise1", base_dir="/output/working_dir")
# connect nodes with each other
wf_ex1.connect([(extract, mcflirt, [('roi_file', 'in_file')]),
(mcflirt, slicetimer, [('out_file', 'in_file')])])
# providing a input file for the first extract node
extract.inputs.in_file = "/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz"
# write your solution here
wf_ex1.write_graph("workflow_graph.dot")
from IPython.display import Image
Image(filename="/output/working_dir/exercise1/workflow_graph.png")
wf_ex1.write_graph(graph2use='flat')
from IPython.display import Image
Image(filename="/output/working_dir/exercise1/graph_detailed.png")
wf_ex1.run()
! ls -lh /output/working_dir/exercise1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 1 - Command-line execution
Step2: This is simple and straightforward. We can see that this does exactly what we wanted by plotting the four steps of processing.
Step3: Example 2 - Interface execution
Step4: This is more verbose, although it does have its advantages. There's the automated input validation we saw previously, some of the options are named more meaningfully, and you don't need to remember, for example, that fslmaths' smoothing kernel is set in sigma instead of FWHM -- Nipype does that conversion behind the scenes.
Step5: Here we didn't need to name the intermediate files; Nipype did that behind the scenes, and then we passed the result object (which knows those names) onto the next step in the processing stream. This is somewhat more concise than the example above, but it's still a procedural script. And the dependency relationship between the stages of processing is not particularly obvious. To address these issues, and to provide solutions to problems we might not know we have yet, Nipype offers Workflows.
Step6: This looks mostly similar to what we did above, but we've left out the two crucial inputs to the ApplyMask step. We'll set those up by defining a Workflow object and then making connections among the Nodes.
Step7: The Workflow object has a method called connect that is going to do most of the work here. This routine also checks if inputs and outputs are actually provided by the nodes that are being connected.
Step8: Now the workflow is complete!
Step9: This representation makes the dependency structure of the workflow obvious. (By the way, the names of the nodes in this graph are the names we gave our Node objects above, so pick something meaningful for those!)
Step10: Here you see very clearly, that the output mask_file of the skullstrip node is used as the input mask_file of the mask node. For more information on graph visualization, see the Graph Visualization section.
Step11: The specification of base_dir is very important (and is why we needed to use absolute paths above) because otherwise all the outputs would be saved somewhere in the temporary files. Unlike interfaces, which by default spit out results to the local directly, the Workflow engine executes things off in its own directory hierarchy.
Step12: Perfect!
Step13: As you can see, the name of the working directory is the name we gave the workflow base_dir. And the name of the folder within is the name of the workflow object smoothflow. Each node of the workflow has its' own subfolder in the smoothflow folder. And each of those subfolders contains the output of the node as well as some additional files.
Step14: Calling this function will return a pre-written Workflow object
Step15: Let's display the graph to see what happens here.
Step16: We see that the workflow has an inputnode and an outputnode. While not strictly necessary, this is standard practice for workflows (especially those that are intended to be used as nested workflows in the context of a longer analysis graph) and makes it more clear how to connect inputs and outputs from this workflow.
Step17: Note that inputnode and outputnode are just conventions, and the Workflow object exposes connections to all of its component nodes
Step18: Let's see how we would write a new workflow that uses this nested smoothing step.
Step19: Now let's create a new workflow susanflow that contains the susan workflow as a sub-node. To be sure, let's also recreate the skullstrip and the mask node from the examples above.
Step20: First, let's see what this new processing graph looks like.
Step21: We can see how there is a nested smoothing workflow (blue) in the place of our previous smooth node. This provides a very detailed view, but what if you just wanted to give a higher-level summary of the processing steps? After all, that is the purpose of encapsulating smaller streams in a nested workflow. That, fortunately, is an option when writing out the graph
Step22: That's much more manageable. Now let's execute the workflow
Step23: As a final step, let's look at the input and the output. It's exactly what we wanted.
Step24: So, why are workflows so great?
Step25: That happened quickly! Workflows (actually this is handled by the Node code) are smart and know if their inputs have changed from the last time they are run. If they have not, they don't recompute; they just turn around and pass out the resulting files from the previous run. This is done on a node-by-node basis, also.
Step26: By changing an input value of the smooth node, this node will be re-executed. This triggers a cascade such that any file depending on the smooth node (in this case, the mask node, also recompute). However, the skullstrip node hasn't changed since the first time it ran, so it just coughed up its original files.
Step27: Defining all nodes
Step28: Creating a workflow
Step29: Exercise 2
Step30: We learnt 2 methods of plotting graphs
Step31: And more detailed graph
Step32: if everything works good, we're ready to run the workflow
Step33: we can now check the output
|
6,918
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pylab as pl
import astropy.io.fits as fits
import rtpipe
import rtlib_cython as rtlib
import astropy.units as units
import astropy.coordinates as coord
from astropy.time import Time
# confirm version is is earlier than 1.54 if using old dm scale
print(rtpipe.__version__)
dmdelay_new = lambda dm, f0, f1: 4.1488e-3*dm*(1./f0**2 - 1./f1**2) # inconsistent with rtpipe
dmdelay = lambda dm, f0, f1: 4.2e-3*dm*(1./f0**2 - 1./f1**2) # consistent with rtpipe, but requires scaling by 1.2%
#name = 'puppi_57648_C0531+33_0048_3756.99.ar.paz'
name = 'puppi_57648_C0531+33_0048_3757.00.ar.paz.pT'
fits.info(name)
hdu = fits.open(name)
hdu0, hdu1, hdu2, hdu3, hdu4 = hdu[0], hdu[1], hdu[2], hdu[3], hdu[4]
hdu0.header
nch0 = 512
obsfreq = 1780.
df = 1.5625
tint0 = 2.04780080448418e-05
#dm0 = 560.5
dm0 = 553.7 # equivalent to 560.5 in correct scaling
dmt_ao = dmdelay(dm0, obsfreq*1e-3, 1e4)/(24*3600)
bary_ao = 75.7555109 # seconds, from presto
mjd0 = int(57648) # subtract this for convenience. must be an integer mjd
#mjdfrac_ao = (37750 + 0.319370962786796)/(24*3600) - dmt_ao # start time from header
#mjdfrac_ao = (37750 + 0.336629511829426)/(24*3600) - dmt_ao # start time from header, second round
mjdfrac_ao = 57648.43692057008 - dmt_ao - mjd0 + bary_ao/(24*3600)
nint0 = 4096
dt_ao = tint0*nint0
tbin = 8
nint1 = nint0/tbin
tint0 = dt_ao/nint0
print('Orig time resolution {0}'.format(tint0))
tint1 = tint0*tbin
print('New time resolution {0}'.format(tint1))
data = fits.getdata(name, ext=4)
spec = data[0][9] # spectrum read as (npol, nchan, nint) array
specI = spec[:2].mean(axis=0) # average over first two to get stokes I **confirm**
specI = specI.reshape(nch0, nint1, tbin).mean(axis=2) # use array slicing to rebin
# flag bad channels
flagch = range(0, 16) + [99, 100, 101, 286, 287, 310, 311] + range(320, 356) + range(nch0-110, nch0)
specI[flagch] = 0
specI = np.ma.masked_equal(specI, 0)
fbin = 2
nch1 = nch0/fbin
print('Orig freq resolution {0}'.format(df))
print('New freq resolution {0}'.format(df*fbin))
specAO = specI.reshape(nch1, fbin, nint1).mean(axis=1) # use array slicing to rebin
meanspec = specAO.mean(axis=1)
specAO = specAO - meanspec[:,None]
specAO = (specAO-np.ma.mean(specAO))/np.ma.std(specAO) # convert to snr per pixel
#specAO = specAO/5 # scale by gain relative to VLA?
print(specAO.shape)
tbary_ao = np.linspace(mjdfrac_ao, mjdfrac_ao+(dt_ao/(24*3600)), nint1)
freqs_ao = np.linspace(obsfreq - fbin*df*nch1, obsfreq, nch1)
extent_ao = (tbary_ao[0], tbary_ao[-1], freqs_ao[0], freqs_ao[-1])
fig = pl.figure(figsize=(12,8))
pl.imshow(specAO, interpolation='nearest', aspect='auto', extent=extent_ao)
pl.colorbar()
pl.figure(figsize=(8,8))
pl.subplot(211)
pl.plot(tbary_ao, specAO.mean(axis=0))
pl.ylabel('Amp (arb)')
pl.xlabel('Time (s)')
pl.subplot(212)
pl.plot(freqs_ao, meanspec)
pl.xlabel('Freq (MHz)')
pl.ylabel('Amp (arb)')
sp = specAO.reshape(nch1, nint1/16, 16).mean(axis=2)
print(sp.mean(axis=0).max()/sp.mean(axis=0)[:20].std())
tpk_ao = tbary_ao[np.where(specAO.mean(axis=0) == specAO.mean(axis=0).max())][0]
print('%.10f' % tpk_ao)
sigma = 4
peakbins = np.where(specAO.mean(axis=0) >= sigma*specAO.mean(axis=0)[:256].std())
peaktimes = tbary_ao[peakbins]
window = (peaktimes.max()-peaktimes.min())*24*3600
print('{0} sigma limit selects {1} ms of pulse ({2} bins)'.format(sigma,
window*1e3,
len(peaktimes)))
print('Window width is a bit wider than properly-dedispersed pulse, due to extra sweep')
Sint_sys = specAO.mean(axis=0)[peakbins].mean()
noise = specAO.mean(axis=0)[:peakbins[0][0]].std()/np.sqrt(len(peaktimes))
Sint = (3./np.sqrt(600e6*window*2))*Sint_sys/noise
print('Integrated flux density over {0} ms pulse: {1} mJy'.format(window, Sint*1e3))
Sspec_sys = specAO[:, peakbins[0]].mean(axis=1)
noise_spec = specAO[:, :peakbins[0][0]].std()/np.sqrt(len(peaktimes))
Sspec = (3./np.sqrt(3.125e6*window*2))*Sspec_sys/noise_spec
print('A (less good) integrated flux density over 2 ms pulse: {0} mJy'.format(int(Sspec.mean()*1e3)))
fig = pl.figure(figsize=(15,7))
ax = fig.add_subplot(111)
pl.plot(1e-3*freqs_ao[::-1], Sspec, 'k.')
pl.text(0.75, 0.88, '57648, Arecibo', horizontalalignment='left', fontsize=24,
verticalalignment='center', transform=ax.transAxes)
pl.errorbar(1.600, 1.2*Sspec.max(), yerr=(3./np.sqrt(3.125e6*window*2)), fmt='k.', ecolor='k')
pl.ylim(-0.03, Sspec.max()*1.4)
pl.xlabel('Frequency (GHz)', fontsize=18)
pl.ylabel('Flux density (Jy)', fontsize=18)
xt = pl.setp(ax.get_xticklabels(), fontsize=18)
yt = pl.setp(ax.get_yticklabels(), fontsize=18)
ax.xaxis.set_tick_params(width=4, color='k')
ax.yaxis.set_tick_params(width=4, color='k')
fig.savefig('specAO_57648.pdf', format='pdf')
calstring = 2.0520 2.89698 0.00279
2.1800 ******* *******
2.3080 ******* *******
2.4360 3.53585 0.00377
2.5640 3.69554 0.00376
2.6920 3.85507 0.00423
2.8200 4.00438 0.00486
2.9480 4.11069 0.00562
3.0520 4.20375 0.00631
3.1800 4.29385 0.00662
3.3080 4.36557 0.00715
3.4360 4.43684 0.00786
3.5640 4.46937 0.00850
3.6920 4.52488 0.00860
3.8200 4.53571 0.00969
3.9480 4.54625 0.00859
# parse flux scale
freq = []
flux = []
eflux = []
for line in calstring.split('\n'):
if '*' not in line:
result = line.split()
freq.append(float(result[0]))
flux.append(float(result[1]))
eflux.append(float(result[2]))
calfreq = np.array(freq)
calflux = np.array(flux)
print(calfreq, calflux)
def getscannum(sdmfile):
sdm = rtpipe.parsesdm.getsdm(sdmfile)
for scan in sdm.scans():
try:
print('Scan {0} binary data file: {1}'.format(scan.idx, scan.bdf.fname))
bdfscan = int(scan.idx)
except IOError:
pass
return bdfscan
def read_cut(sdmfile, scan, segment, dm=558., dt=1, gainfile=None, **kwargs):
if not gainfile:
gainfile = '.'.join(sdmfile.split('.')[:-1] + ['GN'])
st = rtpipe.RT.set_pipeline(sdmfile, scan, dmarr=[dm], dtarr=[dt], flaglist=[('badap', 3., 0.2)],
uvoversample=1.5, gainfile=gainfile, flagantsol=True,
timesub='mean', logfile=False, savecands=False,
savenoise=False, **kwargs)
data = rtpipe.RT.pipeline_reproduce(st, candloc=[segment,0,0,0,0], product='data')
u, v, w = rtpipe.parsesdm.get_uvw_segment(st, segment)
return st, data, u, v, w
def correctdata(st, data, u, v, w, corr='ph,dm', lm = (-3.835e-04,5.406e-04)):
lm gives (ra, dec) = (5 31 58.703708986 33 8 52.5067634154)
as quoted in Chatterjee et al (2017)
data2 = data.copy()
if 'ph' in corr:
l1, m1 = lm
rtlib.phaseshift_threaded(data2, st, l1, m1, u, v)
if 'dm' in corr:
rtlib.dedisperse_par(data2, st['freq'], st['inttime'], st['dmarr'][0], [0, st['nbl']])
return data2
# get array2 for bin in array near value
def find_nearest(array, array2, value):
idx = (np.abs(array-value)).argmin()
return array2[idx]
def getscale(st):
# get flux scaling at nearest frequency
scale = []
for i in range(len(st['freq'])):
freq = st['freq'][i]
scale.append(find_nearest(calfreq, calflux, freq))
# print(i, st['freq'][i], scale)
scale = np.array(scale, dtype='complex64')[None,None,:,None]
return scale
def correct_all(st, data, u, v, w):
scale = getscale(st)
dataph = correctdata(st, data*scale, u, v, w, corr='ph')
dataphdm = correctdata(st, dataph, u, v, w, corr='dm')
return dataphdm
key = '57648'
read = {}
sdmfile = '16A-496_sb32698778_1_02h00m_000.57648.37452900463.cut/'
scannum = getscannum(sdmfile)
read[key] = read_cut(sdmfile, scannum, 7, npix_max=7400, chans=range(2,256))
st, data, u, v, w = read[key]
st['dmarr'] = [dm0]
scale = getscale(st)
dataph = correctdata(st, data*scale, u, v, w, corr='ph,dm')
intrange = (30, 60)
dint = intrange[1] - intrange[0]
specVLA = dataph[intrange[0]:intrange[1]].mean(axis=3).mean(axis=1).real
specVLA = (specVLA-specVLA.mean())/specVLA.std() # in SNR units
print(specAO.shape, specVLA.shape)
dmt_vla = dmdelay(dm0, st['freq'][-1], 1e4)/(24*3600)
topo_vla = 75.7533100 # seconds, from presto
mjdfrac_vla = st['segmenttimes'][7][0] + intrange[0]*st['inttime']/(24*3600) - mjd0 - dmt_vla + topo_vla/(24*3600)
print('VLA dMJD: {0}'.format(mjdfrac_vla))
print('AO dMJD: {0}'.format(mjdfrac_ao))
print('Diff: {0} s'.format((mjdfrac_ao-mjdfrac_vla)*24*3600))
tbary_vla = np.linspace(mjdfrac_vla, mjdfrac_vla+(dint*st['inttime']/(24*3600)), dint)
freqs_vla = 1e3*st['freq']
extent_vla = (tbary_vla[0], tbary_vla[-1], freqs_vla[0], freqs_vla[-1])
# put them together
fig = pl.figure(figsize=(8,8))
fig.add_subplot(211)
pl.imshow(specVLA.transpose(), interpolation='nearest', origin='bottom', extent=extent_vla, aspect='auto')
fig.add_subplot(212)
pl.imshow(specAO, interpolation='nearest', aspect='auto', extent=extent_ao)
tpk_vla = tbary_vla[np.where(specVLA.mean(axis=1) == specVLA.mean(axis=1).max())][0]
print('%.10f' % tpk_vla)
gap = 30
specregrid = np.zeros(shape=(nch1+st['nchan']+gap, len(tbary_ao)))
for idx in range(len(tbary_ao)):
specregrid[254+gap:, idx] = specAO[:, idx]
idxs_vla = [np.argmin(np.abs(tbary_vla-tbary_ao[i])) for i in range(len(tbary_ao))]
for idx_ao in range(len(idxs_vla)):
idx_vla = idxs_vla[idx_ao]
specregrid[:254, idx_ao] += specVLA[idx_vla, ::-1]
# assumes fixed relative gain between VLA and AO == 3
fig = pl.figure(figsize=(12,12))
ax = fig.add_subplot(211)
pl.imshow(specregrid, interpolation='nearest', aspect='equal', vmax=0.8*specregrid.max(), cmap='Greys')#, vmin=-0.8)
ax.fill_between(np.arange(0, len(tbary_ao)), 254*np.ones(len(tbary_ao)),
(254+gap)*np.ones(len(tbary_ao)), facecolor='k')
pl.xlabel('Time (ms)', fontsize=14)
pl.ylabel('Frequency (GHz)', fontsize=14)
xticks = np.arange(0, 600, 100)
pl.xticks(xticks, np.array(tbin*2e-2*xticks, dtype='int'))
ntot = nch1+len(st['freq'])+gap
yticks = np.arange(0, ntot, 80)
print(yticks)
pl.yticks(yticks)
pl.yticks(yticks, [st['freq'][-1], st['freq'][-80], st['freq'][-160], st['freq'][-240],
np.round(1e-3*freqs_ao[ntot-320], 3),
np.round(1e-3*freqs_ao[ntot-400], 3),
np.round(1e-3*freqs_ao[ntot-480], 3)])
pl.xlim(150, len(tbary_ao))
pl.ylim(ntot-50, 0)
xt = pl.setp(ax.get_xticklabels(), fontsize=14)
yt = pl.setp(ax.get_yticklabels(), fontsize=14)
ax.xaxis.set_tick_params(width=3, color='k')
ax.yaxis.set_tick_params(width=3, color='k')
ax.text(170, 330, "Arecibo", rotation=90, fontsize=20)
ax.text(170, 130, "VLA", rotation=90, fontsize=20)
fig.savefig('aovla_spec.pdf', format='pdf')
# some residual dm sweep?
print(24*3600*(tbary_ao[np.where(specAO[128:].mean(axis=0) == specAO[128:].mean(axis=0).max())][0] -
tbary_ao[np.where(specAO[:128].mean(axis=0) == specAO[:128].mean(axis=0).max())][0]))
dmt_ao_delta = dmdelay(dm0, obsfreq*1e-3, st['freq'][-1]) - dmdelay(560, obsfreq*1e-3, st['freq'][-1])
dmt_ao_inter = dmdelay(dm0, obsfreq*1e-3, (obsfreq-500)*1e-3) - dmdelay(560, obsfreq*1e-3, (obsfreq-500)*1e-3)
print(dmt_ao_delta, dmt_ao_inter)
# 4 bursts with ao, vla, effelsberg coverage
# (ami-la covers all 4 vla bursts)
# 57643 (AO-C, *VLA-S)
# 57645 (AO-L, *VLA-S)
# 57648 (*AO-L, *VLA-S, Eff-C)
# 57649 (AO-L, *VLA-S, Eff-C)
# * shows detections
# limits assume 2 ms pulse width
#s43d = np.array([[3.0, 4.0], [0.508, 0.0036*5]])
#s43l = np.array([[4.0], [0.0036*5]]) # useless data!
s45d = np.array([[1.38, 3.0], [0.002*5, (5/2.)*0.064]])
s45l = np.array([[1.38], [0.002*5]]) # too high?
s48d = np.array([[1.4, 3.0, 4.85], [0.057, (5/2.)*0.111, 0.028*5]]) # fixed for 2 ms width
s48l = np.array([[4.85], [0.028*5]])
s49d = np.array([[1.42, 3.0, 4.9], [0.002*5, (5/2.)*0.167, 0.028*5]])
s49l = np.array([[1.42, 4.9], [0.002*5, 0.028*5]])
fig = pl.figure(figsize=(10,5))
ax = fig.add_subplot(111)
# overplot upper limit symbols
#ax.plot(s43l[0], s43l[1], 'cv', ms=10)
ax.plot(s45l[0], s45l[1], 'kv', ms=10)
ax.plot(s48l[0], s48l[1], 'kv', ms=10)
ax.plot(s49l[0], s49l[1], 'kv', ms=10)
# plot lines with points first
#ax.plot(s43d[0], s43d[1], 'c.-', ms=10, label='12 Sep 2016')
ax.plot(s45d[0], s45d[1], 'k.--', ms=10, label='57645')
ax.plot(s48d[0], s48d[1], 'k.-.', ms=10, label='57648')
ax.plot(s49d[0], s49d[1], 'k.-', ms=10, label='57649')
ax.set_xlabel('Frequency (GHz)', fontsize=14)
ax.set_ylabel('Integrated Flux density (Jy; 2 ms)', fontsize=14)
ax.legend(fontsize=14)
xt = pl.setp(ax.get_xticklabels(), fontsize=14)
yt = pl.setp(ax.get_yticklabels(), fontsize=14)
ax.xaxis.set_tick_params(width=3, color='k')
ax.yaxis.set_tick_params(width=3, color='k')
fig.savefig('multispec.pdf', format='pdf')
def speclim(s):
freqs = s[0]
fluxes = s[1]
for i in range(len(freqs)-1):
freqi = freqs[i]
freqi1 = freqs[i+1]
# correct for plotting offset
if freqi <= 1.5:
freqi = 1.4
elif freqi1 > 4.2:
freqi1 = 4.85
print(freqi, freqi1)
print(fluxes[i], fluxes[i+1])
print(np.log10(fluxes[i]/fluxes[i+1])/np.log10(freqi/freqi1))
speclim(s45d)
speclim(s48d)
speclim(s49d)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Useful functions
Step2: Read coherently dedispersed Arecibo dynamic spectrum
Step3: Define python names for Arecibo header info
Step4: Read dynamic spectrum and average down a bit
Step5: Arecibo burst SNR
Step6: crude integrated flux measurement
Step7: Make nice Arecibo burst spectrum like the VLA ones
Step10: VLA
Step11: Read data with rtpipe and phase it to FRB 121102
Step12: Select VLA data for comparison to Arecibo
Step13: Calculate dm delay and topocentric correction for VLA
Step14: Regrid VLA and AO data to same fixed image grid
Step15: Plot it
Step16: Calculate residual sweep and compare to DM model
Step17: Burst spectra with and without detections
Step18: Calculate implied spectral index and limits
|
6,919
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.datasets import load_breast_cancer
import numpy as np
from functools import reduce
# Import our custom utilities
from imp import reload
from utils import irf_jupyter_utils
from utils import irf_utils
reload(irf_jupyter_utils)
reload(irf_utils)
load_breast_cancer = load_breast_cancer()
X_train, X_test, y_train, y_test, rf = irf_jupyter_utils.generate_rf_example(n_estimators=10)
print("Training feature dimensions", X_train.shape, sep = ":\n")
print("\n")
print("Training outcome dimensions", y_train.shape, sep = ":\n")
print("\n")
print("Test feature dimensions", X_test.shape, sep = ":\n")
print("\n")
print("Test outcome dimensions", y_test.shape, sep = ":\n")
print("\n")
print("first 5 rows of the training set features", X_train[:2], sep = ":\n")
print("\n")
print("first 5 rows of the training set outcomes", y_train[:2], sep = ":\n")
all_rf_tree_data = irf_utils.get_rf_tree_data(rf=rf,
X_train=X_train, y_train=y_train,
X_test=X_test, y_test=y_test)
#all_rf_tree_data
np.random.seed(12)
gen_random_leaf_paths = irf_utils.generate_rit_samples(all_rf_tree_data=all_rf_tree_data,
bin_class_type=1)
#for i in range(7):
# print(next(gen_random_leaf_paths))
rit0 = irf_utils.build_tree(feature_paths=gen_random_leaf_paths,
max_depth=3,
noisy_split=False,
num_splits=2)
for node in rit0.traverse_depth_first():
print(node[0], node[1]._val)
for idx, node in enumerate(rit0.leaf_nodes()):
print(idx, node[1]._val)
print("Root:\n", rit0._val)
#print("Some child:\n", tree.children[0].children[1]._val)
len(rit0)
# If noisy split is False, this should pass
#assert(len(rit0) == 1 + 5 + 5**2)
assert(len(rit0) <= 1 + 5 + 5**2)
M = 10
np.random.seed(12)
# Create the weighted randomly sampled paths as a generator
gen_random_leaf_paths = irf_utils.generate_rit_samples(all_rf_tree_data=all_rf_tree_data,
bin_class_type=1)
# Create the RIT object
rit = irf_utils.build_tree(feature_paths=gen_random_leaf_paths,
max_depth=3,
noisy_split=False,
num_splits=2)
# Get the intersected node values
rit_intersected_values = [node[1]._val for node in rit.traverse_depth_first()]
# Leaf node values i.e. final intersected features
rit_leaf_node_values = [node[1]._val for node in rit.leaf_nodes()]
rit_leaf_node_union_value = reduce(np.union1d, rit_leaf_node_values)
rit_outputs = {"rit": rit,
"rit_intersected_values": rit_intersected_values,
"rit_leaf_node_values": rit_leaf_node_values,
"rit_leaf_node_union_value": rit_leaf_node_union_value}
rit_outputs['rit_intersected_values']
def get_rit_tree_data(all_rf_tree_data,
bin_class_type=1,
random_state=12,
#M=10, # number of trees (RIT) to build
feature_paths=gen_random_leaf_paths,
max_depth=3,
noisy_split=False,
num_splits=2):
A wrapper for the Random Intersection Trees (RIT) algorithm
# Set the random seed for reproducibility
np.random.seed(12)
# Create the weighted randomly sampled paths as a generator
gen_random_leaf_paths = irf_utils.generate_rit_samples(
all_rf_tree_data=all_rf_tree_data,
bin_class_type=bin_class_type)
# Create the RIT object
rit = irf_utils.build_tree(feature_paths=gen_random_leaf_paths,
max_depth=max_depth,
noisy_split=noisy_split,
num_splits=num_splits)
# Get the intersected node values
# CHECK remove this for the final value
rit_intersected_values = [node[1]._val for node in rit.traverse_depth_first()]
# Leaf node values i.e. final intersected features
rit_leaf_node_values = [node[1]._val for node in rit.leaf_nodes()]
rit_leaf_node_union_value = reduce(np.union1d, rit_leaf_node_values)
rit_outputs = {"rit": rit,
"rit_intersected_values": rit_intersected_values,
"rit_leaf_node_values": rit_leaf_node_values,
"rit_leaf_node_union_value": rit_leaf_node_union_value}
return rit_outputs
a = get_rit_tree_data(all_rf_tree_data=all_rf_tree_data,
bin_class_type=1,
random_state=12,
#M=10, # number of trees (RIT) to build
max_depth=3,
noisy_split=False,
num_splits=2)
a['rit_intersected_values']
b = irf_utils.get_rit_tree_data(all_rf_tree_data=all_rf_tree_data,
bin_class_type=1,
random_state=12,
M=1,
max_depth=3,
noisy_split=False,
num_splits=2)
b['rit0']['rit_intersected_values']
c = irf_utils.get_rit_tree_data(all_rf_tree_data=all_rf_tree_data,
bin_class_type=1,
random_state=12,
M=10,
max_depth=3,
noisy_split=False,
num_splits=2)
c['rit1']
# Print the feature ranking
print("Feature ranking:")
feature_importances_rank_idx = all_rf_tree_data['feature_importances_rank_idx']
feature_importances = all_rf_tree_data['feature_importances']
for f in range(X_train.shape[1]):
print("%d. feature %d (%f)" % (f + 1
, feature_importances_rank_idx[f]
, feature_importances[feature_importances_rank_idx[f]]))
# Plot the feature importances of the forest
feature_importances_std = all_rf_tree_data['feature_importances_std']
plt.figure()
plt.title("Feature importances")
plt.bar(range(X_train.shape[1])
, feature_importances[feature_importances_rank_idx]
, color="r"
, yerr = feature_importances_std[feature_importances_rank_idx], align="center")
plt.xticks(range(X_train.shape[1]), feature_importances_rank_idx)
plt.xlim([-1, X_train.shape[1]])
plt.show()
uniq_feature_paths = all_rf_tree_data['dtree0']['all_uniq_leaf_paths_features']
leaf_node_classes = all_rf_tree_data['dtree0']['all_leaf_node_classes']
ones_only = [i for i, j in zip(uniq_feature_paths, leaf_node_classes)
if j == 1]
ones_only
ones_only_seven = ones_only[-7:]
ones_only_seven
# Construct a binary version of the RIT manually!
# This should come in useful for unit tests!
node0 = ones_only_seven[0]
node1 = np.intersect1d(node0, ones_only_seven[1])
node2 = np.intersect1d(node1, ones_only_seven[2])
node3 = np.intersect1d(node1, ones_only_seven[3])
node4 = np.intersect1d(node0, ones_only_seven[4])
node5 = np.intersect1d(node4, ones_only_seven[5])
node6 = np.intersect1d(node4, ones_only_seven[6])
intersected_nodes_seven = [node0, node1, node2, node3, node4, node5, node6]
for idx, node in enumerate(intersected_nodes_seven):
print("node" + str(idx), node)
rit_output = reduce(np.union1d, (node2, node3, node5, node6))
rit_output
ones_only_seven_gen = (n for n in ones_only_seven)
#for i in range(len(ones_only_seven)):
# print(next(ones_only_seven_gen))
rit_man0 = irf_utils.build_tree(
feature_paths=ones_only_seven_gen,
max_depth=3,
noisy_split=False,
num_splits=2)
print("Root:\n", rit_man0._val)
print("Root:\n", rit_man0._val)
print("node1:\n", rit_man0.children[0]._val)
print("node4:\n", rit_man0.children[1]._val)
print("node2:\n", rit_man0.children[0].children[0]._val)
print("node3:\n", rit_man0.children[0].children[1]._val)
print("node5:\n", rit_man0.children[1].children[0]._val)
print("node6:\n", rit_man0.children[1].children[1]._val)
for node in rit_man0.traverse_depth_first():
print(node[1]._val)
for node in rit_man0.leaf_nodes():
print(node[1]._val)
# Now plot the trees individually
irf_jupyter_utils.draw_tree(decision_tree = all_rf_tree_data['rf_obj'].estimators_[0])
irf_jupyter_utils.pretty_print_dict(inp_dict = all_rf_tree_data['dtree0'])
# Count the number of samples passing through the leaf nodes
sum(all_rf_tree_data['dtree0']['tot_leaf_node_values'])
irf_jupyter_utils.pretty_print_dict(inp_dict = all_rf_tree_data['dtree0']['all_leaf_paths_features'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Check out the data
Step3: Step 2
Step4: STEP 3
Step5: Print out all of the intersected nodes and their ids
Step7: Print out all of the leaf node values
Step8: Perform Manual CHECKS on the irf_utils
Step 4
Step9: Plot Ranked Feature Importances
Step10: Manually construct a binary RIT
Step11: Manually extract the last seven values
Step12: Manually leaf node 1's paths as a generator
Step13: Create a binary RIT without the randomized split option
Step14: Check Output of Decision Tree Extracted Data
Step15: Compare to our dict of extracted data from the tree
Step16: Check output against the diagram
|
6,920
|
<ASSISTANT_TASK:>
Python Code:
def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
len(reviews)
reviews[0]
labels[0]
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
from collections import Counter
import numpy as np
# Create three Counter objects to store positive, negative and total counts
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
# TODO: Loop over all the words in all the reviews and increment the counts in the appropriate counter objects
print('# of reviews')
print(len(reviews))
print('# of labels')
print(len(labels))
for i in range(len(labels)):
if labels[i] == 'POSITIVE':
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
# Examine the counts of the most common words in positive reviews
positive_counts.most_common()
# Examine the counts of the most common words in negative reviews
negative_counts.most_common()
# Create Counter object to store positive/negative ratios
pos_neg_ratios = Counter()
#for term, count in (total_counts.most_common()):
# print(total_counts[term])
# print(total_counts[count])
for term,cnt in list(total_counts.most_common()):
if(cnt > 100):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"]))
print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"]))
print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"]))
# TODO: Convert ratios to logs
for word,ratio in pos_neg_ratios.most_common():
pos_neg_ratios[word] = np.log(ratio)
print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"]))
print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"]))
print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"]))
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
# Note: Above is the code Andrew uses in his solution video,
# so we've included it here to avoid confusion.
# If you explore the documentation for the Counter class,
# you will see you could also find the 30 least common
# words like this: pos_neg_ratios.most_common()[:-31:-1]
from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png')
# TODO: Create set named "vocab" containing all of the words from all of the reviews
vocab = set(total_counts.keys())
vocab_size = len(vocab)
print(vocab_size)
from IPython.display import Image
Image(filename='sentiment_network_2.png')
# TODO: Create layer_0 matrix with dimensions 1 by vocab_size, initially filled with zeros
layer_0 = np.zeros((1, vocab_size))
layer_0.shape
from IPython.display import Image
Image(filename='sentiment_network.png')
# Create a dictionary of words in the vocabulary mapped to index positions
# (to be used in layer_0)
word2index = {}
for i,word in enumerate(vocab):
word2index[word] = i
# display the map of words to indices
word2index
def update_input_layer(review):
Modify the global layer_0 to represent the vector form of review.
The element at a given index of layer_0 should represent
how many times the given word occurs in the review.
Args:
review(string) - the string of the review
Returns:
None
global layer_0
# clear out previous state by resetting the layer to be all 0s
layer_0 *= 0
# TODO: count how many times each word is used in the given review and store the results in layer_0
# count how many times each word is used in the given review and store the results in layer_0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
def get_target_for_label(label):
Convert a label to `0` or `1`.
Args:
label(string) - Either "POSITIVE" or "NEGATIVE".
Returns:
`0` or `1`.
if label == 'POSITIVE':
return 1
else:
return 0
# TODO: Your code here
labels[0]
get_target_for_label(labels[0])
labels[1]
get_target_for_label(labels[1])
import time
import sys
import numpy as np
# Encapsulate our neural network in a class
class SentimentNetwork:
def __init__(self, reviews, labels, hidden_nodes = 10, learning_rate = 0.1):
Create a SentimenNetwork with the given settings
Args:
reviews(list) - List of reviews used for training
labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews
hidden_nodes(int) - Number of nodes to create in the hidden layer
learning_rate(float) - Learning rate to use while training
# Assign a seed to our random number generator to ensure we get
# reproducable results during development
np.random.seed(1)
# process the reviews and their associated labels so that everything
# is ready for training
self.pre_process_data(reviews, labels)
# Build the network to have the number of hidden nodes and the learning rate that
# were passed into this initializer. Make the same number of input nodes as
# there are vocabulary words and create a single output node.
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
# TODO: populate review_vocab with all of the words in the given reviews
# Remember to split reviews into individual words
# using "split(' ')" instead of "split()".
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
# Convert the vocabulary set to a list so we can access words via indices
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
# TODO: populate label_vocab with all of the words in the given labels.
# There is no need to split the labels because each one is a single word.
# Convert the label vocabulary set to a list so we can access labels via indices
self.label_vocab = list(label_vocab)
# Store the sizes of the review and label vocabularies.
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
# Create a dictionary of words in the vocabulary mapped to index positions
self.word2index = {}
# TODO: populate self.word2index with indices for all the words in self.review_vocab
# like you saw earlier in the notebook
for i,word in enumerate(self.review_vocab):
self.word2index[word] = i
# Create a dictionary of labels mapped to index positions
self.label2index = {}
# TODO: do the same thing you did for self.word2index and self.review_vocab,
# but for self.label2index and self.label_vocab instead
for i,word in enumerate(self.label_vocab):
self.label2index[word] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Store the number of nodes in input, hidden, and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Store the learning rate
self.learning_rate = learning_rate
# Initialize weights
# TODO: initialize self.weights_0_1 as a matrix of zeros. These are the weights between
# the input layer and the hidden layer.
self.weights_0_1 = np.zeros((self.input_nodes, self.hidden_nodes))
# TODO: initialize self.weights_1_2 as a matrix of random values.
# These are the weights between the hidden layer and the output layer.
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
# TODO: Create the input layer, a two-dimensional matrix with shape
# 1 x input_nodes, with all values initialized to zero
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# TODO: You can copy most of the code you wrote for update_input_layer
# earlier in this notebook.
#
# However, MAKE SURE YOU CHANGE ALL VARIABLES TO REFERENCE
# THE VERSIONS STORED IN THIS OBJECT, NOT THE GLOBAL OBJECTS.
# For example, replace "layer_0 *= 0" with "self.layer_0 *= 0"
for word in review.split(" "):
# NOTE: This if-check was not in the version of this method created in Project 2,
# and it appears in Andrew's Project 3 solution without explanation.
# It simply ensures the word is actually a key in word2index before
# accessing it, which is important because accessing an invalid key
# with raise an exception in Python. This allows us to ignore unknown
# words encountered in new reviews.
if(word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] += 1
def get_target_for_label(self,label):
# TODO: Copy the code you wrote for get_target_for_label
# earlier in this notebook.
if label == 'POSITIVE':
return 1
else:
return 0
def sigmoid(self,x):
# TODO: Return the result of calculating the sigmoid activation function
# shown in the lectures
return (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
# TODO: Return the derivative of the sigmoid activation function,
# where "output" is the original output from the sigmoid fucntion
return output * (1 - output)
def train(self, training_reviews, training_labels):
# make sure out we have a matching number of reviews and labels
assert(len(training_reviews) == len(training_labels))
# Keep track of correct predictions to display accuracy during training
correct_so_far = 0
# Remember when we started for printing time statistics
start = time.time()
# loop through all the given reviews and run a forward and backward pass,
# updating weights for every item
for i in range(len(training_reviews)):
# TODO: Get the next review and its correct label
# TODO: Implement the forward pass through the network.
# That means use the given review to update the input layer,
# then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Do not use an activation function for the hidden layer,
# but use the sigmoid activation function for the output layer.
# TODO: Implement the back propagation pass here.
# That means calculate the error for the forward pass's prediction
# and update the weights in the network according to their
# contributions toward the error, as calculated via the
# gradient descent and back propagation algorithms you
# learned in class.
# TODO: Keep track of correct predictions. To determine if the prediction was
# correct, check that the absolute value of the output error
# is less than 0.5. If so, add one to the correct_so_far count.
# For debug purposes, print out our prediction accuracy and speed
# throughout the training process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \
+ " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
Attempts to predict the labels for the given testing_reviews,
and uses the test_labels to calculate the accuracy of those predictions.
# keep track of how many correct predictions we make
correct = 0
# we'll time how many predictions per second we make
start = time.time()
# Loop through each of the given reviews and call run to predict
# its label.
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
# For debug purposes, print out our prediction accuracy and speed
# throughout the prediction process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct) + " #Tested:" + str(i+1) \
+ " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
Returns a POSITIVE or NEGATIVE prediction for the given review.
# TODO: Run a forward pass through the network, like you did in the
# "train" function. That means use the given review to
# update the input layer, then calculate values for the hidden layer,
# and finally calculate the output layer.
#
# Note: The review passed into this function for prediction
# might come from anywhere, so you should convert it
# to lower case prior to using it.
# TODO: The output layer should now contain a prediction.
# Return `POSITIVE` for predictions greater-than-or-equal-to `0.5`,
# and `NEGATIVE` otherwise.
pass
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.test(reviews[-1000:],labels[-1000:])
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)
mlp.train(reviews[:-1000],labels[:-1000])
from IPython.display import Image
Image(filename='sentiment_network.png')
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
review_counter = Counter()
for word in reviews[0].split(" "):
review_counter[word] += 1
review_counter.most_common()
# TODO: -Copy the SentimentNetwork class from Projet 3 lesson
# -Modify it to reduce noise, like in the video
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.train(reviews[:-1000],labels[:-1000])
mlp.test(reviews[-1000:],labels[-1000:])
Image(filename='sentiment_network_sparse.png')
layer_0 = np.zeros(10)
layer_0
layer_0[4] = 1
layer_0[9] = 1
layer_0
weights_0_1 = np.random.randn(10,5)
layer_0.dot(weights_0_1)
indices = [4,9]
layer_1 = np.zeros(5)
for index in indices:
layer_1 += (1 * weights_0_1[index])
layer_1
Image(filename='sentiment_network_sparse_2.png')
layer_1 = np.zeros(5)
for index in indices:
layer_1 += (weights_0_1[index])
layer_1
# TODO: -Copy the SentimentNetwork class from Project 4 lesson
# -Modify it according to the above instructions
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.train(reviews[:-1000],labels[:-1000])
mlp.test(reviews[-1000:],labels[-1000:])
Image(filename='sentiment_network_sparse_2.png')
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.plotting import figure, show, output_file
from bokeh.io import output_notebook
output_notebook()
hist, edges = np.histogram(list(map(lambda x:x[1],pos_neg_ratios.most_common())), density=True, bins=100, normed=True)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="Word Positive/Negative Affinity Distribution")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555")
show(p)
frequency_frequency = Counter()
for word, cnt in total_counts.most_common():
frequency_frequency[cnt] += 1
hist, edges = np.histogram(list(map(lambda x:x[1],frequency_frequency.most_common())), density=True, bins=100, normed=True)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="The frequency distribution of the words in our corpus")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555")
show(p)
# TODO: -Copy the SentimentNetwork class from Project 5 lesson
# -Modify it according to the above instructions
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.05,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
mlp.test(reviews[-1000:],labels[-1000:])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.8,learning_rate=0.01)
mlp.train(reviews[:-1000],labels[:-1000])
mlp.test(reviews[-1000:],labels[-1000:])
mlp_full = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=0,polarity_cutoff=0,learning_rate=0.01)
mlp_full.train(reviews[:-1000],labels[:-1000])
Image(filename='sentiment_network_sparse.png')
def get_most_similar_words(focus = "horrible"):
most_similar = Counter()
for word in mlp_full.word2index.keys():
most_similar[word] = np.dot(mlp_full.weights_0_1[mlp_full.word2index[word]],mlp_full.weights_0_1[mlp_full.word2index[focus]])
return most_similar.most_common()
get_most_similar_words("excellent")
get_most_similar_words("terrible")
import matplotlib.colors as colors
words_to_visualize = list()
for word, ratio in pos_neg_ratios.most_common(500):
if(word in mlp_full.word2index.keys()):
words_to_visualize.append(word)
for word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]:
if(word in mlp_full.word2index.keys()):
words_to_visualize.append(word)
pos = 0
neg = 0
colors_list = list()
vectors_list = list()
for word in words_to_visualize:
if word in pos_neg_ratios.keys():
vectors_list.append(mlp_full.weights_0_1[mlp_full.word2index[word]])
if(pos_neg_ratios[word] > 0):
pos+=1
colors_list.append("#00ff00")
else:
neg+=1
colors_list.append("#000000")
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
words_top_ted_tsne = tsne.fit_transform(vectors_list)
p = figure(tools="pan,wheel_zoom,reset,save",
toolbar_location="above",
title="vector T-SNE for most polarized words")
source = ColumnDataSource(data=dict(x1=words_top_ted_tsne[:,0],
x2=words_top_ted_tsne[:,1],
names=words_to_visualize,
color=colors_list))
p.scatter(x="x1", y="x2", size=8, source=source, fill_color="color")
word_labels = LabelSet(x="x1", y="x2", text="names", y_offset=6,
text_font_size="8pt", text_color="#555555",
source=source, text_align='center')
p.add_layout(word_labels)
show(p)
# green indicates positive words, black indicates negative words
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: Lesson
Step3: Project 1
Step4: We'll create three Counter objects, one for words from postive reviews, one for words from negative reviews, and one for all the words.
Step5: TODO
Step6: Run the following two cells to list the words used in positive reviews and negative reviews, respectively, ordered from most to least commonly used.
Step7: As you can see, common words like "the" appear very often in both positive and negative reviews. Instead of finding the most common words in positive or negative reviews, what you really want are the words found in positive reviews more often than in negative reviews, and vice versa. To accomplish this, you'll need to calculate the ratios of word usage between positive and negative reviews.
Step8: Examine the ratios you've calculated for a few words
Step9: Looking closely at the values you just calculated, we see the following
Step10: Examine the new ratios you've calculated for the same words from before
Step11: If everything worked, now you should see neutral words with values close to zero. In this case, "the" is near zero but slightly positive, so it was probably used in more positive reviews than negative reviews. But look at "amazing"'s ratio - it's above 1, showing it is clearly a word with positive sentiment. And "terrible" has a similar score, but in the opposite direction, so it's below -1. It's now clear that both of these words are associated with specific, opposing sentiments.
Step12: End of Project 1.
Step13: Project 2
Step14: Run the following cell to check your vocabulary size. If everything worked correctly, it should print 74074
Step15: Take a look at the following image. It represents the layers of the neural network you'll be building throughout this notebook. layer_0 is the input layer, layer_1 is a hidden layer, and layer_2 is the output layer.
Step16: TODO
Step17: Run the following cell. It should display (1, 74074)
Step18: layer_0 contains one entry for every word in the vocabulary, as shown in the above image. We need to make sure we know the index of each word, so run the following cell to create a lookup table that stores the index of every word.
Step20: TODO
Step21: Run the following cell to test updating the input layer with the first review. The indices assigned may not be the same as in the solution, but hopefully you'll see some non-zero values in layer_0.
Step23: TODO
Step24: Run the following two cells. They should print out'POSITIVE' and 1, respectively.
Step25: Run the following two cells. They should print out 'NEGATIVE' and 0, respectively.
Step29: End of Project 2.
Step30: Run the following cell to create a SentimentNetwork that will train on all but the last 1000 reviews (we're saving those for testing). Here we use a learning rate of 0.1.
Step31: Run the following cell to test the network's performance against the last 1000 reviews (the ones we held out from our training set).
Step32: Run the following cell to actually train the network. During training, it will display the model's accuracy repeatedly as it trains so you can see how well it's doing.
Step33: That most likely didn't train very well. Part of the reason may be because the learning rate is too high. Run the following cell to recreate the network with a smaller learning rate, 0.01, and then train the new network.
Step34: That probably wasn't much different. Run the following cell to recreate the network one more time with an even smaller learning rate, 0.001, and then train the new network.
Step35: With a learning rate of 0.001, the network should finall have started to improve during training. It's still not very good, but it shows that this solution has potential. We will improve it in the next lesson.
Step36: Project 4
Step37: Run the following cell to recreate the network and train it. Notice we've gone back to the higher learning rate of 0.1.
Step38: That should have trained much better than the earlier attempts. It's still not wonderful, but it should have improved dramatically. Run the following cell to test your model with 1000 predictions.
Step39: End of Project 4.
Step40: Project 5
Step41: Run the following cell to recreate the network and train it once again.
Step42: That should have trained much better than the earlier attempts. Run the following cell to test your model with 1000 predictions.
Step43: End of Project 5.
Step44: Project 6
Step45: Run the following cell to train your network with a small polarity cutoff.
Step46: And run the following cell to test it's performance. It should be
Step47: Run the following cell to train your network with a much larger polarity cutoff.
Step48: And run the following cell to test it's performance.
Step49: End of Project 6.
|
6,921
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncar', 'sandbox-2', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
6,922
|
<ASSISTANT_TASK:>
Python Code:
# Obtain sample data and set new Grass mapset
import urllib
from zipfile import ZipFile
import os.path
zip_path = "/home/jovyan/work/tmp/nc_spm_08_grass7.zip"
mapset_path = "/home/jovyan/grassdata"
if not os.path.exists(zip_path):
urllib.urlretrieve("https://grass.osgeo.org/sampledata/north_carolina/nc_spm_08_grass7.zip", zip_path)
if not os.path.exists(mapset_path):
with ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(mapset_path)
# a proper directory is already set, download files
import urllib
urllib.urlretrieve("http://ncsu-geoforall-lab.github.io/geospatial-modeling-course/grass/data/lid_be_pts.txt", "lid_be_pts.txt")
urllib.urlretrieve("http://ncsu-geoforall-lab.github.io/geospatial-modeling-course/grass/data/lid_mr_pts.txt", "lid_mr_pts.txt")
urllib.urlretrieve("http://ncsu-geoforall-lab.github.io/geospatial-modeling-course/grass/data/lid_be_pts.txt", "lid_be_pts.txt")
# using Python to initialize GRASS GIS
import os
import sys
import subprocess
from IPython.display import Image
# create GRASS GIS runtime environment
gisbase = subprocess.check_output(["grass", "--config", "path"]).strip()
os.environ['GISBASE'] = gisbase
sys.path.append(os.path.join(gisbase, "etc", "python"))
# do GRASS GIS imports
import grass.script as gs
import grass.script.setup as gsetup
# set GRASS GIS session data
rcfile = gsetup.init(gisbase, "/home/jovyan/grassdata", "nc_spm_08_grass7", "user1")
# using Python to initialize GRASS GIS
# default font displays
os.environ['GRASS_FONT'] = 'sans'
# overwrite existing maps
os.environ['GRASS_OVERWRITE'] = '1'
gs.set_raise_on_error(True)
gs.set_capture_stderr(True)
# using Python to initialize GRASS GIS
# set display modules to render into a file (named map.png by default)
os.environ['GRASS_RENDER_IMMEDIATE'] = 'cairo'
os.environ['GRASS_RENDER_FILE_READ'] = 'TRUE'
os.environ['GRASS_LEGEND_FILE'] = 'legend.txt'
!g.region rural_1m res=2 -p
!r.in.xyz input=lid_be_pts.txt output=lid_be_binn2m method=n
!r.in.xyz input=lid_mr_pts.txt output=lid_mr_binn2m method=n
!d.erase
!d.rast lid_mr_binn2m
!d.legend lid_mr_binn2m at=2,20,2,5
Image(filename="map.png")
!r.report lid_mr_binn2m unit=p
!r.univar lid_mr_binn2m
!d.rast lid_be_binn2m
!d.legend lid_be_binn2m at=2,20,2,5
!r.report lid_be_binn2m unit=p
!r.univar lid_be_binn2m
Image(filename="map.png")
!v.patch P079214,P079215,P079218,P079219 out=planimetry_rural
!d.vect planimetry_rural
Image(filename="map.png")
!g.region rural_1m res=6 -ap
!r.in.xyz input=lid_be_pts.txt out=lid_be_binn6m meth=n
!d.erase
!d.rast lid_be_binn6m
!d.legend lid_be_binn6m at=2,20,2,5
Image(filename="map.png")
!r.report lid_be_binn6m unit=p
!r.univar lid_be_binn6m
Image(filename="map.png")
!r.in.xyz input=lid_be_pts.txt out=lid_be_binmean6m meth=mean
!r.colors lid_be_binmean6m color=elevation
!d.rast lid_be_binmean6m
!d.legend lid_be_binmean6m at=2,40,2,5
!r.in.xyz input=lid_mr_pts.txt out=lid_mr_binmean6m meth=mean
!r.colors lid_mr_binmean6m co=elevation
!d.rast lid_mr_binmean6m
!d.legend lid_mr_binmean6m at=2,40,2,5
Image(filename="map.png")
!r.in.xyz input=lid_be_pts.txt out=lid_be_binrange6m meth=range
!r.in.xyz input=lid_mr_pts.txt out=lid_mr_binrange6m meth=range
!d.erase
!d.rast lid_be_binrange6m
!d.legend lid_be_binrange6m at=2,40,2,5
!d.rast lid_mr_binrange6m
!d.legend lid_mr_binrange6m at=2,40,2,5
Image(filename="map.png")
!d.vect planimetry_rural
!d.vect lakes type=boundary co=violet
!d.vect streams co=blue
Image(filename="map.png")
!g.region rural_1m -p
!d.erase
!d.rast ortho_2001_t792_1m
!d.rast lid_be_binrange6m val=0.5-20.
!d.erase
!d.rast ortho_2001_t792_1m
!d.rast lid_mr_binrange6m val=0.5-20.
Image(filename="map.png")
!g.region rural_1m -p
!v.in.ascii -ztr input=lid_be_pts.txt out=elev_lid_bepts z=3
!d.erase
!d.rast ortho_2001_t792_1m
Image(filename="map.png")
!d.vect elev_lid_bepts size=2 col=red
Image(filename="map.png")
!d.vect elev_lidrural_mrpts size=4 col=0:100:0
!d.vect elev_lid792_bepts size=2 col=yellow
Image(filename="map.png")
!g.region rural_1m -p
!v.extract elev_lidrural_mrpts out=elev_lidrur_first type=point where="Return=1"
!v.surf.rst input=elev_lid792_bepts elevation=elev_lidrural_1m npmin=120 segmax=25
!v.surf.rst input=elev_lidrur_first elevation=elev_lidrurfirst_1m npmin=120 segmax=25 tension=100 smooth=0.5
!d.erase
!d.rast elev_lidrural_1m
!d.rast elev_lidrurfirst_1m
Image(filename="map.png")
!g.region rural_1m -p
!d.erase
!d.rast ortho_2001_t792_1m
Image(filename="map.png")
!d.vect elev_lidrural_mrpts where=return=1 col=red size=2
!d.vect elev_lidrural_mrpts where=return=2 col=green size=3
!d.vect elev_lidrural_mrpts where=return=3 col=blue
!d.vect elev_lidrural_mrpts where=return=4 col=yellow
Image(filename="map.png")
# end the GRASS session
os.remove(rcfile)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In startup pannel set GIS Data Directory to path to datasets,
Step2: Range of coordinates at lower resolution
Step3: Decrease resolution and try the above steps with
Step4: Compute a raster map representing mean elevation for each 6m cell.
Step5: Compute range
Step6: Identify the features that are associated with large range values.
Step7: Display only the high values of range (0.5-20m) overlayed over orthophoto.
Step8: Do not forget to zoom/set the display to computational region
Step9: Interpolation
Step10: Display bare ground and multiple return points over orthophoto
Step11: Display our imported points
Step12: Display points available in the data set
Step13: Extract first return to get points for DSM.
Step14: Remove all layers except for elev_lidrural_1m and elev_lidrurfirst_1m and
Step15: Condition for subset in GUI
Step16: Can you guess what is in the area that does not have any 1st return points?
|
6,923
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import random
import numpy as np
import pickle
import matplotlib.pyplot
from matplotlib.pyplot import imshow
from PIL import Image
from scipy.spatial import distance
from igraph import *
from tqdm import tqdm
images, pca_features, pca = pickle.load(open('../data/features_caltech101.p', 'rb'))
for i, f in list(zip(images, pca_features))[0:5]:
print("image: %s, features: %0.2f,%0.2f,%0.2f,%0.2f... "%(i, f[0], f[1], f[2], f[3]))
num_images = 10000
if len(images) > num_images:
sort_order = sorted(random.sample(xrange(len(images)), num_images))
images = [images[i] for i in sort_order]
pca_features = [pca_features[i] for i in sort_order]
kNN = 30
graph = Graph(directed=True)
graph.add_vertices(len(images))
for i in tqdm(range(len(images))):
distances = [ distance.cosine(pca_features[i], feat) for feat in pca_features ]
idx_kNN = sorted(range(len(distances)), key=lambda k: distances[k])[1:kNN+1]
for j in idx_kNN:
graph.add_edge(i, j, weight=distances[j])
summary(graph)
pickle.dump([images, graph], open('../data/graph_caltech101_30knn.p', 'wb'))
#images, graph = pickle.load(open('../data/graph_caltech101_30knn.p', 'rb'))
def get_concatenated_images(indexes, thumb_height):
thumbs = []
for idx in indexes:
img = Image.open(images[idx])
img = img.convert('RGB')
img = img.resize((int(img.width * thumb_height / img.height), thumb_height))
thumbs.append(img)
concat_image = np.concatenate([np.asarray(t) for t in thumbs], axis=1)
return concat_image
# pick two random indices
idx1 = int(len(images) * random.random())
idx2 = int(len(images) * random.random())
# run get_shortest_paths
path = graph.get_shortest_paths(idx1, to=idx2, mode=OUT, output='vpath', weights='weight')[0]
# retrieve the images, concatenate into one, and display them
results_image = get_concatenated_images(path, 200)
matplotlib.pyplot.figure(figsize = (16,12))
imshow(results_image)
# pick two random indices
idx1 = int(len(images) * random.random())
idx2 = int(len(images) * random.random())
# run get_shortest_paths
path = graph.get_shortest_paths(idx1, to=idx2, mode=OUT, output='vpath', weights='weight')[0]
# retrieve the images, concatenate into one, and display them
results_image = get_concatenated_images(path, 200)
matplotlib.pyplot.figure(figsize = (16,12))
imshow(results_image)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, open your saved feature vectors with pickle, and ensure the images are in the correct paths.
Step2: The following cell is optional. If you wish to restrict your graph to a smaller set of images (perhaps for testing purposes), this cell will take a random subsample of your image set of size num_images. Set this number however you wish or skip the cell if you intend to use all the images.
Step3: Next, we are going to build our graph. The graph will contain one vertex for every image. The edges of the graph are found by taking the k nearest neighbor images to each image in the set, and adding an edge between them whose distance is given by cosine distance.
Step4: Once the graph has been saved, we can save it along with the image paths to disk, so we can load them and use them later.
Step5: Later, we can retrieve them in the following way (uncomment the following line).
Step6: For the sake of convenience, we define a helper function which will concatenate a sequence of images into a single image so we can display the sequences in this notebook. The helper function takes a thumb_height and resizes all the images so they have that as their height.
Step7: Now we can do a query. We'll just pick two random indices (idx1 and idx2) and run igraph's get_shortest_paths method, using the cosine distance ('weight' as the weights).
Step8: Not too bad! Let's try again...
|
6,924
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import os
import requests
import easydict
import linecache
import pprint
import random
import itertools
pp = pprint.PrettyPrinter(indent=4)
species = 'yeast' # species of interest to load of and save the resut for
if species=='human':
association_file_name = 'goa_human.gaf.gz' # human
association_file_url = 'http://geneontology.org/gene-associations/goa_human.gaf.gz'
expression_file = 'WholeBlood.Gene.Filter6_20.TPM10_20.normalizedLimmaVoom.txt'
elif species=='yeast':
association_file_name = 'sgd.gaf.gz' # yeast
association_file_url = 'http://current.geneontology.org/annotations/sgd.gaf.gz'
expression_file = 'EisenYeastData_Measures.txt'
expression_url = 'http://www.i3s.unice.fr/~pasquier/web/userfiles/downloads/datasets/EisenYeastData_Measures.txt'
args = easydict.EasyDict({
"go_dir": 'gene_ontology/raw/', # directory to the Gene Ontology 'go.obo' file
"association_file_dir": 'species/{}/association_file/raw'.format(species), # directory to the human association file
"gene_expression_raw_dir": 'species/{}/gene_expression/raw'.format(species), # directory to the raw gene expression data
"result_gene_ontology_dir": 'species/{}/gene_expression/processed'.format(species), # directory in which the results would be saved
"max_num_pairs": -1, # maximum number of pairs randomly chosen (-1 means all)
"download_gene_ontology": True, # download the latest version of gene ontology into the specified directory above
"download_association_file": True, # download association file of the specieis of interest into the specified directory above
#"threshold": 0.8, # absolute pearson correlations below this cutoff point would be removed
"seed": 2021 # seed to make sure the random negative samples are reproducable
})
os.makedirs(args.result_gene_ontology_dir, exist_ok=True) # create 'result_gene_ontology_dir' folder (if it does not exist already)
np.random.seed(args.seed)
random.seed(args.seed)
subontology_map = {"C":"CC", "P":"BP", "F":"MF"}
f"{args.gene_expression_raw_dir}/{expression_file}"
if species=='yeast':
if os.path.exists(f"{args.gene_expression_raw_dir}/{expression_file}") is not True:
os.makedirs(args.gene_expression_raw_dir, exist_ok=True) # create 'gene_expression_raw_dir' folder (if it does not exist already)
r = requests.get(expression_url, allow_redirects=True)
open('{}/{}'.format(args.gene_expression_raw_dir, expression_file), 'wb').write(r.content)
elif species=='human':
assert os.path.exists(f"{args.gene_expression_raw_dir}/{expression_file}") is True, f"\nYou need to download the expression file first using the link and guideline provided above! \nPut the {expression_file} file in '{args.gene_expression_raw_dir}/' directory."
if args.download_gene_ontology:
os.makedirs(args.go_dir, exist_ok=True) # create 'data_loc' folder (if it does not exist already)
print("Downloading the latest version of Gene Ontology into '{}'...".format(args.go_dir))
url = 'http://current.geneontology.org/ontology/go.obo'
r = requests.get(url, allow_redirects=True)
open('{}/go.obo'.format(args.go_dir), 'wb').write(r.content)
print("Gene Ontology {}".format(linecache.getline('{}/go.obo'.format(args.go_dir), 2))) # Now: releases/2020-10-09
Reading Gene Ontology to extract Terms and their Descriptive Names
with open("{}/go.obo".format(args.go_dir)) as f:
content = f.readlines()
content = "".join([x for x in content])
content = content.split("[Typedef]")[0].split("[Term]")
print("Information of the last GO term in the file:\n~~~~~~~~~~~~~~~~~~~~~~~~~{}".format(content[-1]))
Going through every GO term and extract information needed ('id', 'alt_id', 'namespace', and 'is_obsolete')
go_term_dict = {}
for c in content:
go_id = ''
for l in c.split("\n"):
# id
if "id: GO:" in l[0:len("id: GO:")]:
go_id = l.split("id: ")[1]
go_term_dict[go_id] = {}
# alt_id
if "alt_id:" in l[0:len("alt_id")+1]:
go_term_dict[go_id].setdefault("alt_id", []).append(l.split("alt_id: ")[1])
# namespace
if "namespace:" in l[0:len("namespace")+1]:
go_term_dict[go_id]["namespace"] = l.split("namespace: ")[1]
# is_obsolete
if "is_obsolete:" in l[0:len("is_obsolete")+1]:
go_term_dict[go_id]["is_obsolete"] = l.split("is_obsolete: ")[1]
printing how the key:values are organized for every GO term
for i in range(15):
print(list(go_term_dict)[i], end=": ")
pp.pprint(go_term_dict[list(go_term_dict)[i]])
grouping GO terms based on the sub-ontologies they belong to
subontology_go_term_dict = {}
for go_id in go_term_dict:
if not go_term_dict[go_id].get('is_obsolete', False): # or => if 'is_obsolete' not in go_term_dict[go_id]:
subontology_go_term_dict.setdefault(go_term_dict[go_id]['namespace'].split('_')[1][0].upper(), []).append(go_id)
including 'alt_id' into the sub-ontology's groups of GO terms
for go_id in go_term_dict:
if go_term_dict[go_id].get('alt_id', False): # or => if 'alt_id' in go_term_dict[go_id]:
for alt_id in go_term_dict[go_id].get('alt_id'):
subontology_go_term_dict[go_term_dict[go_id]['namespace'].split('_')[1][0].upper()].append(alt_id)
printing how the key:values are organized for different sub-ontologies
for subontology in subontology_go_term_dict:
print("{} ({}):: {} <= {} GO term (with 'alt_id') => {}".format(
subontology,
subontology_map[subontology],
" ".join(subontology_go_term_dict[subontology][:3]),
len(subontology_go_term_dict[subontology]),
" ".join(subontology_go_term_dict[subontology][-3:])))
if args.download_association_file:
os.makedirs(args.association_file_dir, exist_ok=True) # create 'data_loc' folder (if it does not exist already)
print("Downloading the latest version of association file into '{}'...".format(args.association_file_dir))
r = requests.get(association_file_url, allow_redirects=True)
open('{}/{}'.format(args.association_file_dir, association_file_name), 'wb').write(r.content)
print("Done!")
df = pd.read_csv("{}/{}".format(args.association_file_dir, association_file_name), sep='\t', comment="!", skip_blank_lines=True, header=None, dtype=str)
df = df.iloc[:,[1, 2, 3, 4, 6, 8]]
if len(df[df[3].isnull()])==0:
df = df[~df[3].str.contains("NOT")]
df = df.dropna().reset_index(drop=True)
else:
df = df[df[3].isnull()]
df = df.dropna().reset_index(drop=True)
df = df.drop(df.columns[2], axis=1)
df
keeping track of the gene ids and their mappings
protein_gene_id_map = {}
for gene_id, protein_id in zip(df[1], df[2]):
protein_gene_id_map[protein_id] = gene_id
df = df[(df[6]!='ND') & (df[6]!='IEA')].reset_index(drop=True)
df
protein dictionary to keep track of annotations for proteins (from each sub-ontology)
proteins_dict = {}
for index, row in df.iterrows():
gene = row[1]
go_term_id = row[4]
subontology = row[8]
if go_term_id in subontology_go_term_dict[subontology]:
proteins_dict.setdefault(gene, dict()).setdefault(subontology, set()).add(go_term_id)
printing how the key:values are organized for every gene/protein
for i in range(5):
print(list(proteins_dict)[i], end=": ")
pp.pprint(proteins_dict[list(proteins_dict)[i]])
print("\nTotal number of genes/proteins annotated:", len(proteins_dict))
keeping track of fully annotated genes/proteins
fully_annotated_proteins_wo_iea = []
for protein in proteins_dict:
if len(proteins_dict[protein]) == 3:
fully_annotated_proteins_wo_iea.append(protein)
print("Out of {} proteins {} are (experimentally or manually) annotated by all three sub-ontologies.".format(len(proteins_dict), len(fully_annotated_proteins_wo_iea)))
df_gene_expression = pd.read_csv("{}/{}".format(args.gene_expression_raw_dir, expression_file), sep='\t')
df_gene_expression.iloc[:, 0] = [i.split(".")[0] for i in df_gene_expression.iloc[:,0]] # useful for human
df_gene_expression
if species == 'yeast':
from fancyimpute import KNN#, NuclearNormMinimization, SoftImpute, BiScaler
XY_incomplete = df_gene_expression.to_numpy()[:, 1:]
XY_filled_knn = KNN(k=10).fit_transform(XY_incomplete)
XY_filled_knn = np.round(XY_filled_knn, 2)
if species == 'yeast':
# replacing the incomplete dataframe with the imputed one
df_gene_expression.iloc[:, 1:] = XY_filled_knn
df_gene_expression
if species == 'human':
import urllib.parse
import urllib.request
GENENAME_ids = {}
url = 'https://www.uniprot.org/uploadlists/'
params = {
'from': 'ENSEMBL_ID',
'to': 'ACC',
'format': 'tab',
'query': " ".join([i.split(".")[0] for i in df_gene_expression.Name])
}
data = urllib.parse.urlencode(params)
data = data.encode('utf-8')
req = urllib.request.Request(url, data)
with urllib.request.urlopen(req) as f:
response = f.read()
#print(response.decode('utf-8'))
for i, mapping in enumerate(response.decode('utf-8').strip().split("\n")):
if i!=0:
id1, id2 = mapping.split("\t")
GENENAME_ids[id1] = id2
if species == 'human':
df_gene_expression = df_gene_expression[df_gene_expression['Name'].isin(GENENAME_ids)] # for the nagation add ~
df_gene_expression = df_gene_expression.reset_index(drop=True)
df_gene_expression.Name = [GENENAME_ids[i] for i in list(df_gene_expression.Name)]
df_gene_expression
df_gene_expression = df_gene_expression[df_gene_expression.iloc[:,0].isin(fully_annotated_proteins_wo_iea)] # for the nagation add ~
df_gene_expression = df_gene_expression.reset_index(drop=True)
df_gene_expression
rpearson = np.abs(np.corrcoef(df_gene_expression.iloc[:, 1:].to_numpy()))
seq_list = {i:seq for i, seq in enumerate(df_gene_expression.iloc[:,0].to_numpy())}
print(seq_list)
list_of_pairs_full = list(itertools.combinations(list(seq_list.keys()), 2))
print("Full length of pairs:", len(list_of_pairs_full))
if args.max_num_pairs == -1:
list_of_pairs = [list_of_pairs_full[i] for i in np.random.choice(len(list_of_pairs_full), len(list_of_pairs_full), replace=False)]
args.max_num_pairs = len(list_of_pairs)
else:
list_of_pairs = [list_of_pairs_full[i] for i in np.random.choice(len(list_of_pairs_full), len(list_of_pairs_full), replace=False)]
file = f'{species}_gene_expression.tsv'
print(f"Saving data into the file '{file}' with the binning strategy.")
with open(f"{args.result_gene_ontology_dir}/{file}", 'w') as fw:
fw.write("Gene_1\tGene_2\tExpression_Value\n")
#while len(list_of_pairs)<args.max_num_pairs:
k = 0
for pair in list_of_pairs:
r = rpearson[pair[0], pair[1]]
if 0.8<=r:# and k<=args.max_num_pairs:
k = k + 1
# transformation of the correlation coefficient into a Fishers’ Z-score will be done by arctanh (inverse hyperbolic tangent function)
fw.write("{}\t{}\t{}\n".format(seq_list[pair[0]], seq_list[pair[1]], np.round(np.arctanh(r), 8))) # The Fisher transform equals the inverse hyperbolic tangen/arctanh
#fw.write("{} {} {}\n".format(seq_list[pair[0]], seq_list[pair[1]], np.round(r, 8))) # The Fisher transform equals the inverse hyperbolic tangen/arctanh
k2 = 0
for pair in list_of_pairs:
r = rpearson[pair[0], pair[1]]
if 0.6<=r and r<0.8 and k2<k:# and k<=args.max_num_pairs:
k2 = k2 + 1
fw.write("{}\t{}\t{}\n".format(seq_list[pair[0]], seq_list[pair[1]], np.round(np.arctanh(r), 8))) # The Fisher transform equals the inverse hyperbolic tangen/arctanh
k2 = 0
for pair in list_of_pairs:
r = rpearson[pair[0], pair[1]]
if 0.4<=r and r<0.6 and k2<k:# and k<=args.max_num_pairs:
k2 = k2 + 1
fw.write("{}\t{}\t{}\n".format(seq_list[pair[0]], seq_list[pair[1]], np.round(np.arctanh(r), 8))) # The Fisher transform equals the inverse hyperbolic tangen/arctanh
k2 = 0
for pair in list_of_pairs:
r = rpearson[pair[0], pair[1]]
if 0.2<=r and r<0.4 and k2<k:# and k<=args.max_num_pairs:
k2 = k2 + 1
fw.write("{}\t{}\t{}\n".format(seq_list[pair[0]], seq_list[pair[1]], np.round(np.arctanh(r), 8))) # The Fisher transform equals the inverse hyperbolic tangen/arctanh
k2 = 0
for pair in list_of_pairs:
r = rpearson[pair[0], pair[1]]
if 0.0<=r and r<0.2 and k2<k:# and k<=args.max_num_pairs:
k2 = k2 + 1
fw.write("{}\t{}\t{}\n".format(seq_list[pair[0]], seq_list[pair[1]], np.round(np.arctanh(r), 8))) # The Fisher transform equals the inverse hyperbolic tangen/arctanh
df = pd.read_csv(f"species/{species}/gene_expression/processed/{species}_gene_expression.tsv", sep="\t", dtype=str)
df
ge_genes = set(list(df.Gene_1) + list(df.Gene_2))
print(f"Number of {species} genes:", len(ge_genes))
with open(f'{args.result_gene_ontology_dir}/{species}_gene_expression_genes.tsv', 'w') as fw:
for gene in sorted(ge_genes):
fw.write(f"{gene}\n")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configuration<a id='2'></a>
Step2: asserting raw data exist
Step9: Loading Gene Ontology<a id='3'></a>
Step11: Loading Genes and Annotations<a id='4'></a>
Step14: removing 'ND' and 'IEA' annotations
Step16: Taking into account only fully annotated genes/proteins
Step17: Loading Gene Expression data<a id='5'></a>
Step18: Imputing the missing values if needed (using fancyimpute package)
Step19: Removing proteins without complete annotation
Step20: Computing Absolute Pearson Correlation<a id='6'></a>
Step21: Saving the Results<a id='7'></a>
Step22: back to top<br>
|
6,925
|
<ASSISTANT_TASK:>
Python Code:
# We'll make the number of bins, B
B = 50
plt.figure(0)
plt.hist(X[:, 0], bins = B, normed = True)
plt.title("Dimension 1 ($x$-axis)")
plt.figure(1)
plt.hist(X[:, 1], bins = B, normed = True)
plt.title("Dimension 2 ($y$-axis)")
rng = np.random.RandomState(74)
t = rng.normal(size = (2, 2))
Xr = X @ t
plt.scatter(Xr[:, 0], Xr[:, 1])
plt.figure(0)
plt.hist(Xr[:, 0], bins = B, normed = True)
plt.title("Dimension 1 ($x$-axis)")
plt.figure(1)
plt.hist(Xr[:, 1], bins = B, normed = True)
plt.title("Dimension 2 ($y$-axis)")
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components = 2, random_state = 12)
Xt = pca.fit_transform(Xr)
plt.scatter(Xt[:, 0], Xt[:, 1])
plt.figure(0)
plt.hist(Xt[:, 0], bins = B, normed = True)
plt.title("Dimension 1 ($x$-axis)")
plt.figure(1)
plt.hist(Xt[:, 1], bins = B, normed = True)
plt.title("Dimension 2 ($y$-axis)")
# First, define the method that, given a 1D histogram of the data, will return the
# solution to the generalized eigenvalue problem as defined in Eq. 2 of the Fergus paper.
import scipy.linalg as sla
def solve(hist):
hist : 1D histogram
W = pairwise.rbf_kernel(hist.reshape(-1, 1))
P = np.diag(hist)
PW = P @ W # matrix-matrix multiplication in Python 3
PWP = PW @ P
Ds = np.diag(PWP.sum(axis = 0)) # sum along the COLUMNS of PWP
Dh = np.diag(PW.sum(axis = 0)) # sum along the COLUMNS of PW
# Solve the generalized eigenvalue problem for symmetric matrices (this is, after all, a laplacian).
L = Ds - PWP
b = P @ Dh
v, U = sla.eigh(L, b = b)
return [v, U]
# Second, we'll create the 1D histograms for each dimension.
h1, e1 = np.histogram(Xt[:, 0], bins = B)
h2, e2 = np.histogram(Xt[:, 1], bins = B)
# Sidestep: add a small amount to each histogram bin, then renormalize to a density.
h1 = np.array(h1, dtype = np.float) + 0.01
h1 /= h1.sum()
h2 = np.array(h2, dtype = np.float) + 0.01
h2 /= h2.sum()
# Finally, we'll invoke the solver we wrote.
v1, U1 = solve(h1)
v2, U2 = solve(h2)
# How do the eigenvalues and eigenfunctions look?
plt.figure(0)
plt.title("Eigenvalues for axis 1")
plt.plot(v1)
plt.figure(1)
plt.title("Eigenvalues for axis 2")
plt.plot(v2)
plt.figure(0)
plt.title("Axis 1")
plt.plot(U1[:, 0], label = "EF 1")
plt.plot(U1[:, 1], label = "EF 2")
plt.plot(U1[:, 2], label = "EF 3")
plt.legend(loc = 0)
plt.figure(1)
plt.title("Axis 2")
plt.plot(U2[:, 0], label = "EF 1")
plt.plot(U2[:, 1], label = "EF 2")
plt.plot(U2[:, 2], label = "EF 3")
plt.legend(loc = 0)
# Assign the eigenfunctions
EF = U2[:, 1:3] # Take the eigenfunctions, starting at index 1 (inclusive) and ending at 3 (exclusive).
EV = v2[1:3] # Take the eigenvalues associated with the eigenfunctions
# Create the x-axis from the histogram edges.
x1 = np.array([e1[i:i + 2].mean() for i in range(e1.shape[0] - 1)]) # Finds the midpoints of the histogram bins.
x2 = np.array([e2[i:i + 2].mean() for i in range(e2.shape[0] - 1)])
# Now, use the eigenfunctions (y-axis) to create the interpolators.
f1 = interp1d(x1, EF[:, 0])
f2 = interp1d(x2, EF[:, 1])
# What do they look like?
newdata = np.linspace(x1.min(), x1.max(), 100)
plt.figure(0)
plt.title("$x_1$ dimension")
plt.plot(x1, EF[:, 0], 'o', label = 'Original')
plt.plot(newdata, f1(newdata), '-', label = 'Interpolator')
plt.legend(loc = 0)
newdata = np.linspace(x2.min(), x2.max(), 100)
plt.figure(1)
plt.title("$x_2$ dimension")
plt.plot(x2, EF[:, 1], 'o', label = 'Original')
plt.plot(newdata, f2(newdata), '-', label = 'Interpolator')
plt.legend(loc = 0)
# We now have our eigenfunctions, at least in 1D. Let's see if we can reconstruct that heatmap from Fig 1.
plt.title("Original Data")
_ = plt.hexbin(Xt[:, 0], Xt[:, 1])
x = np.linspace(-2, 2.5, num = 100) # Our original (rotated) data varies from about (-2, 2.5) on the x-axis.
y = np.linspace(-1, 1.0, num = 100) # ...and from about (-1, 1) on the y-axis.
# Evaluate the eigenfunctions on each dimension.
fx = f1(x)
fy = f2(y)
# Plot the heatmaps along the grid.
plt.figure(0)
plt.title("Dim $x_1$: $\sigma_1$ = {:.2f}".format(EV[0]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You see the data are easily separable along both the $x$ and $y$ axes. Put another way--if someone gives you an $x$ value of a data point and asks you what cluster the point comes from, it's easy to make that assessment. Same with just providing the $y$ value of a data point--you could easily predict what cluster it comes from.
Step2: We've effectively rotated the data about 45 degrees. If we now plot the 1D histograms for the $x$ and $y$ axes again...
Step3: Good luck telling me where the clusters are! Now the dimensions are highly correlated. This is why, if this was the original data we were given, we need to perform a rotation on the data--decorrelate the axes so they become separable in one dimension (i.e. histograms).
Step5: The $x$ axis is still a lost cause, but now the $y$ axis has become separable again!
Step6: We can see things happening with the eigenfunctions--and, clearly, the eigenfunctions associated with the smallest eigenvalues are useless, but we knew this from looking at the examples in the Fergus paper itself--but let's continue on to get a feel for things in the interpolation process.
|
6,926
|
<ASSISTANT_TASK:>
Python Code:
import sqlite3
import sys
from glob import glob
conn = sqlite3.connect('/users/mikespears/Desktop/mydb.db') #file-based db
#conn = sqlite3.connect(':memory:') # in-memory db
c = conn.cursor()
c.execute('''DROP TABLE IF EXISTS uoftcoders''')
c.execute('''CREATE TABLE uoftcoders (date, title, attendance)''')
conn.commit()
c.execute('''INSERT INTO uoftcoders VALUES (?, ?, ?)''', ["March 7, 2019", "Full Text Search with SQLITE", 12])
c.execute('''INSERT INTO uoftcoders VALUES (?, ?, ?)''', ["March 14, 2019", "Software defined radio", 15])
conn.commit()
c.execute('''SELECT * FROM uoftcoders WHERE date = ?''', ["March 7, 2019"])
print(c.fetchall())
c.execute('''SELECT * FROM uoftcoders WHERE attendance > ?''', [10])
print(c.fetchall())
#basic pattern matching with LIKE
c.execute('''SELECT * FROM uoftcoders WHERE date LIKE ?''', ["%march_%"])
print(c.fetchall())
c.execute('''DROP TABLE IF EXISTS files''')
c.execute('''CREATE VIRTUAL TABLE files USING fts4(filename, text)''')
#c.execute('''CREATE VIRTUAL TABLE files USING fts4(filename, text, tokenize=porter)''')
#c.execute('''CREATE VIRTUAL TABLE files USING fts4(filename, text, tokenize=unicode61)''')
conn.commit()
# Insert all the files in a directory (that contains only plaintext files)
textFileDir = "/users/mikespears/downloads/science/"
allFiles = glob(textFileDir + "*.*")
for path in allFiles:
with open(path) as file:
try:
text = file.read()
c.execute('''INSERT INTO files(filename, text) VALUES(?, ?)''', [path, text])
#print(path)
except:
e = sys.exc_info()[0]
print("Read or insert error: %s (in %s)" % (e, path))
continue
conn.commit()
c.execute('''SELECT COUNT(*) from files''')
print(c.fetchall())
#basic token search text column
for row in c.execute('''SELECT filename FROM FILES WHERE text MATCH ?''', ['relativity']):
print(row)
#implicit 'AND'
for row in c.execute('''SELECT filename FROM FILES WHERE text MATCH ?''', ['special relativity']):
print(row)
# OR
for row in c.execute('''SELECT filename FROM FILES WHERE text MATCH ?''', ['special OR relativity']):
print(row)
#search for a phrase
for row in c.execute('''SELECT filename FROM FILES WHERE text MATCH ?''', ['"special relativity"']):
print(row)
#exclude a token
for row in c.execute('''SELECT filename FROM FILES WHERE text MATCH ?''', ['relativity -special']):
print(row)
#NEAR operator
for row in c.execute('''SELECT filename FROM FILES WHERE text MATCH ?''', ["relativity NEAR special"]):
print(row)
#specify a column in the search term
for row in c.execute('''SELECT filename FROM FILES WHERE text MATCH ?''', ['filename:faq']):
print(row)
for row in c.execute('''SELECT snippet(files, ">>>>", "<<<<", "..."), filename FROM FILES WHERE text MATCH ?''', ['relativity']):
print ("\n\n--------%s\n\n" % row[1])
print(row[0])
#get offset
for row in c.execute('''SELECT offsets(FILES) FROM FILES WHERE text MATCH ?''', ['filename:faq']):
print(row)
#get matchinfo
for row in c.execute('''SELECT matchinfo(FILES) FROM FILES WHERE files MATCH ?''', ['filename:faq']):
print(row)
#direct access to full-text index
c.execute('''DROP TABLE IF EXISTS files_terms''')
c.execute('''CREATE VIRTUAL TABLE files_terms USING fts4aux(files)''')
conn.commit()
for row in c.execute('''SELECT * from files_terms ORDER BY occurrences DESC'''):
print(row)
conn.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Connect to a database file or create an in-memory database
Step2: SQLite basics
Step3: Full-Text-Search
|
6,927
|
<ASSISTANT_TASK:>
Python Code:
import seaborn as sns; sns.set_style("whitegrid")
import random
from matplotlib import pyplot as plt
%matplotlib inline
import numpy as np
class MIR(object):
'''Class for the MIR exam.
Parameters
----------
study_level: int
The level of preparation you have. Must be an integer between
0 (you answer randomly everything) and 10 (you know all the
correct answers).
n_questions: int
The number of questions that the exam hast. The MIR has 225
questions, but this could change in principle.
answer_unknowns: bool
Whether we will randomly answer the questions we don't know
or we will decide to not answer them.
'''
def __init__(self, study_level, n_questions, answer_unknowns):
self.study_level = study_level
self.n_questions = n_questions
self.answer_unknowns = answer_unknowns
def solveExam(self):
'''Answer every question in the exam. Return the mark out of 10'''
score = 0
for question in range(0, self.n_questions):
score = self._answerQuestion(score)
return((score / (3 * self.n_questions)) * 10)
def _answerQuestion(self, score):
'''Answer a question. A higher study_level gives a higher chance
of getting it right. If we dont know the answer, randomly guess
if answer_unknowns is True. Returns the updated score'''
chance = random.randint(1, 10)
if chance <= self.study_level:
score += 3
elif self.answer_unknowns:
result = random.choice(['a', 'b', 'c', 'd'])
answer = random.choice(['a', 'b', 'c', 'd'])
if result == answer:
score += 3
else:
score -= 1
return(score)
def _getScoreProgress(self):
'''Answer all questions in a single exam and returns the score
progress as a list.'''
score_Progress = []
score = 0
for question in range(0, self.n_questions):
score = self._answerQuestion(score)
score_Progress.append((score / (3 * self.n_questions)) * 10)
return(score_Progress)
itera = 0
mir = MIR(study_level = 0, n_questions=225, answer_unknowns=True)
max_iter = 10000
while itera < max_iter:
itera += 1
plt.plot(mir._getScoreProgress())
plt.ylabel("Score / 10")
plt.xlabel("Question number")
plt.title("{} random MIR exams".format(max_iter))
plt.xlim(0, 225)
plt.show()
def solve_ManyExams(study_level, max_iter=1000):
'''
Returns two lists:
distrib1: The marks answering randomly unknown questions
distrib2: The marks answering only the questions you know.'''
iteration = 0
distrib1 = []
distrib2 = []
while iteration < max_iter:
mir1 = MIR(
study_level=study_level, n_questions=225, answer_unknowns=True)
mir2 = MIR(
study_level=study_level, n_questions=225, answer_unknowns=False)
distrib1.append(mir1.solveExam())
distrib2.append(mir2.solveExam())
iteration += 1
return(distrib1, distrib2)
distrib1, distrib2 = solve_ManyExams(study_level=5)
sns.distplot(distrib1, hist=False, label='Answering unknowns')
sns.distplot(distrib2, hist=False, label='Not answering unknowns')
plt.xlabel("Score / 10")
plt.ylabel("Density")
plt.title("10000 MIRs, study level of 5/10")
plt.legend()
plt.show()
print("Answering unknowns:\t%.2f+-%.2f\n" % (np.mean(distrib1), np.std(distrib1)))
print("Not Answering unknowns:\t%.2f+-%.2f" % (np.mean(distrib2), np.std(distrib2)))
mean_list = []
std_list = []
for study_level in range(0,11):
distrib1, distrib2 = solve_ManyExams(study_level, max_iter=50000) # Lots of iterations to get clean distributions
mean_diff = np.mean(distrib1) - np.mean(distrib2)
std_diff = np.std(distrib1) - np.std(distrib2)
mean_list.append(mean_diff)
std_list.append(std_diff)
plt.plot(list(range(0,11)),mean_list, label="Mean score gain")
plt.plot(list(range(0,11)),std_list, label="Variability gain")
plt.xlabel("Study level")
plt.ylabel("Score difference / 10")
plt.xticks([x for x in range(0, 11)])
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What happens if we take the exam 10000 times randomly, without having studied at all?
Step2: There's chances that you can luck out and get close to a 1.5/10 even if you haven't studied at all! Notice, however, that if you are unlucky, you might score a -1.5/10. Ouch!
Step3: Answering the questions you don't know doesn't affect your mean score. It just increases the variability, making it possible to get slightly higher (and lower) marks.
|
6,928
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd
import time
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("poster")
from bs4 import BeautifulSoup
import requests
import csv
import os
import random
import sys
import json
sys.path.insert(0, '/aclImdb/')
# function to get name of movie from each URL
def get_movie(url):
'''
Scrapes a given URL from IMDB.com. The URL's page contains many reviews for one particular movie.
This function returns the name of that movie.
'''
pageText = requests.get(url)
# Keep asking for the page until you get it. Sleep if necessary.
while (pageText==None):
time.sleep(5)
pageText = requests.get(url)
soup = BeautifulSoup(pageText.text,"html.parser")
# Some of our URL's are expired! Return None if so.
if soup == None or soup.find("div",attrs={"id":"tn15title"}) == None:
return None
return soup.find("div",attrs={"id":"tn15title"}).find("a").get_text()
# get all urls for train and test, neg and pos
with open('aclImdb/train/urls_pos.txt','r') as f:
train_pos_urls = f.readlines()
with open('aclImdb/train/urls_neg.txt','r') as f:
train_neg_urls = f.readlines()
with open('aclImdb/test/urls_pos.txt','r') as f:
test_pos_urls = f.readlines()
with open('aclImdb/test/urls_neg.txt','r') as f:
test_neg_urls = f.readlines()
print len(train_pos_urls), len(train_neg_urls), len(test_pos_urls), len(test_neg_urls)
def make_url_dict(url_list):
'''
Input: List of URLs.
Output: Dictionary of URL: movie based on scraped movie title.
'''
url_dict = dict(zip(url_list, [None]*len(url_list)))
index = 0
for url in url_list:
if url_dict[url] == None:
url_dict[url] = get_movie(url)
# Every once in awhile, let us know how many URLs we have digested out of 12,500 total.
if random.random() < 0.001:
print index
index += 1
time.sleep(0.001)
%time
train_pos_dict = make_url_dict(train_pos_urls)
fp = open("url_movie_train_pos.json","w")
json.dump(train_pos_dict, fp)
fp.close()
print len(train_pos_dict.keys()), len(list(set(list(train_pos_urls))))
%time
train_neg_dict = make_url_dict(train_neg_urls)
fp = open("url_movie_train_neg.json","w")
json.dump(train_neg_dict, fp)
fp.close()
%time
test_pos_dict = make_url_dict(test_pos_urls)
fp = open("url_movie_test_pos.json","w")
json.dump(test_pos_dict, fp)
fp.close()
%time
test_neg_dict = make_url_dict(test_neg_urls)
fp = open("url_movie_test_neg.json","w")
json.dump(test_neg_dict, fp)
fp.close()
# Reload
with open("url_movie_tr_pos.json", "r") as fd:
train_pos_dict = json.load(fd)
with open("url_movie_train_neg.json", "r") as fd:
train_neg_dict = json.load(fd)
with open("url_movie_test_pos.json", "r") as fd:
test_pos_dict = json.load(fd)
with open("url_movie_test_neg.json", "r") as fd:
test_neg_dict = json.load(fd)
def data_collect(directory, pos, url_dict, url_list):
'''
Inputs:
directory: Directory to collect reviews from. ex) 'aclImdb/train/pos/'
Pos: True or False, depending on whether the reviews are labelled positive or not.
url_dict: the relevant URL-Movie dictionary (created above) for the particular category
url_list: the list of URLs for that particular category
'''
# Column names for the data frame
review_df = pd.DataFrame(columns=['movie_id', 'stars', 'positive', 'text', 'url', 'movie_name'])
# Crawl over the directory, attaining relevant data for each of the .txt review files.
train_pos_names = list(os.walk(directory))[0][2]
for review in train_pos_names:
# Andrew L. Maas's stanford group encoded the reviewID and number of stars for a review in the file's name.
# For example, "0_10.txt" means reviewID 0 received 10 stars. The reviews are in the same order as the URLs,
# so the reviewID is precisely the location of that movie's URL in the respective URL list.
stars = int(review.split("_")[1].split(".")[0])
movieID = int(review.split("_")[0]) #everything before the underscore
fp = open('%(dir)s%(review)s' % {'dir': directory, 'review': review}, 'r')
text = fp.read()
url = url_list[movieID]
movie_name = url_dict[url]
reviewDict = {'movie_id': [movieID], 'stars': [stars], 'positive': [pos], 'text': [text], 'url': [url], 'movie_name': [movie_name]}
review_df = review_df.append(pd.DataFrame(reviewDict))
return review_df
# First get the positive reviews for the train_df.
train_df = data_collect('aclImdb/train/pos/', True, train_pos_dict, train_pos_urls)
# Then append the negative reviews
train_df = train_df.append(data_collect('aclImdb/train/neg/', False, train_neg_dict, train_neg_urls))
# First get the positive reviews for the train_df.
test_df = data_collect('aclImdb/test/pos/', True, test_pos_dict, test_pos_urls)
# Then append the negative reviews
test_df = test_df.append(data_collect('aclImdb/test/neg/', False, test_neg_dict, test_neg_urls))
train_df_dict = {feature: train_df[feature].values.tolist() for feature in train_df.columns.values}
test_df_dict = {feature: test_df[feature].values.tolist() for feature in test_df.columns.values}
# Train
fp = open("train_df_dict.json","w")
json.dump(train_df_dict, fp)
fp.close()
# Test
fp = open("test_df_dict.json","w")
json.dump(test_df_dict, fp)
fp.close()
with open("train_df_dict.json", "r") as fd:
train_df_dict = json.load(fd)
with open("test_df_dict.json", "r") as fd:
test_df_dict = json.load(fd)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Below we write a function to scrape an IMDB url and return a movie name.
Step2: Now let's get the list of URLs for each of our data sets
Step3: Let's see how long each list is.
Step4: There are 12500 reviews in each sub data set. Each review has a corresponding URL. However, the URL lists have duplicates, as two reviews can be for the same movie and thus be found on the same IMDB webpage.
Step5: Let's make a dictionary of stored movie names for each subdata set, saving into a JSON file so we only have to do this once.
Step6: If we did this right for training positives, the length of the dictionary keys should be equal to the number of unique URLs in its URL list.
Step7: Now that we have saved movie names associated with each URL, we can finally create our data table of reviews. We will define a function data_collect which iterates over our directories, making a pandas dataframe out of all the reviews in a particular category (e.g. Test Set, Positive Reviews).
Step8: Data Collection
Step9: Now we'll create a testing data frame.
Step10: Let's create a dictionary out of each dataframe so that we can save each in JSON format.
Step11: Let's reopen.
|
6,929
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
from tensorflow import keras
x = tf.constant([[5, 2], [1, 3]])
print(x)
x.numpy()
print("dtype:", x.dtype)
print("shape:", x.shape)
print(tf.ones(shape=(2, 1)))
print(tf.zeros(shape=(2, 1)))
x = tf.random.normal(shape=(2, 2), mean=0.0, stddev=1.0)
x = tf.random.uniform(shape=(2, 2), minval=0, maxval=10, dtype="int32")
initial_value = tf.random.normal(shape=(2, 2))
a = tf.Variable(initial_value)
print(a)
new_value = tf.random.normal(shape=(2, 2))
a.assign(new_value)
for i in range(2):
for j in range(2):
assert a[i, j] == new_value[i, j]
added_value = tf.random.normal(shape=(2, 2))
a.assign_add(added_value)
for i in range(2):
for j in range(2):
assert a[i, j] == new_value[i, j] + added_value[i, j]
a = tf.random.normal(shape=(2, 2))
b = tf.random.normal(shape=(2, 2))
c = a + b
d = tf.square(c)
e = tf.exp(d)
a = tf.random.normal(shape=(2, 2))
b = tf.random.normal(shape=(2, 2))
with tf.GradientTape() as tape:
tape.watch(a) # Start recording the history of operations applied to `a`
c = tf.sqrt(tf.square(a) + tf.square(b)) # Do some math using `a`
# What's the gradient of `c` with respect to `a`?
dc_da = tape.gradient(c, a)
print(dc_da)
a = tf.Variable(a)
with tf.GradientTape() as tape:
c = tf.sqrt(tf.square(a) + tf.square(b))
dc_da = tape.gradient(c, a)
print(dc_da)
with tf.GradientTape() as outer_tape:
with tf.GradientTape() as tape:
c = tf.sqrt(tf.square(a) + tf.square(b))
dc_da = tape.gradient(c, a)
d2c_da2 = outer_tape.gradient(dc_da, a)
print(d2c_da2)
class Linear(keras.layers.Layer):
y = w.x + b
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
# Instantiate our layer.
linear_layer = Linear(units=4, input_dim=2)
# The layer can be treated as a function.
# Here we call it on some data.
y = linear_layer(tf.ones((2, 2)))
assert y.shape == (2, 4)
assert linear_layer.weights == [linear_layer.w, linear_layer.b]
class Linear(keras.layers.Layer):
y = w.x + b
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
# Instantiate our lazy layer.
linear_layer = Linear(4)
# This will also call `build(input_shape)` and create the weights.
y = linear_layer(tf.ones((2, 2)))
# Prepare a dataset.
(x_train, y_train), _ = tf.keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(x_train.reshape(60000, 784).astype("float32") / 255, y_train)
)
dataset = dataset.shuffle(buffer_size=1024).batch(64)
# Instantiate our linear layer (defined above) with 10 units.
linear_layer = Linear(10)
# Instantiate a logistic loss function that expects integer targets.
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Instantiate an optimizer.
optimizer = tf.keras.optimizers.SGD(learning_rate=1e-3)
# Iterate over the batches of the dataset.
for step, (x, y) in enumerate(dataset):
# Open a GradientTape.
with tf.GradientTape() as tape:
# Forward pass.
logits = linear_layer(x)
# Loss value for this batch.
loss = loss_fn(y, logits)
# Get gradients of the loss wrt the weights.
gradients = tape.gradient(loss, linear_layer.trainable_weights)
# Update the weights of our linear layer.
optimizer.apply_gradients(zip(gradients, linear_layer.trainable_weights))
# Logging.
if step % 100 == 0:
print("Step:", step, "Loss:", float(loss))
class ComputeSum(keras.layers.Layer):
Returns the sum of the inputs.
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
# Create a non-trainable weight.
self.total = tf.Variable(initial_value=tf.zeros((input_dim,)), trainable=False)
def call(self, inputs):
self.total.assign_add(tf.reduce_sum(inputs, axis=0))
return self.total
my_sum = ComputeSum(2)
x = tf.ones((2, 2))
y = my_sum(x)
print(y.numpy()) # [2. 2.]
y = my_sum(x)
print(y.numpy()) # [4. 4.]
assert my_sum.weights == [my_sum.total]
assert my_sum.non_trainable_weights == [my_sum.total]
assert my_sum.trainable_weights == []
# Let's reuse the Linear class
# with a `build` method that we defined above.
class MLP(keras.layers.Layer):
Simple stack of Linear layers.
def __init__(self):
super(MLP, self).__init__()
self.linear_1 = Linear(32)
self.linear_2 = Linear(32)
self.linear_3 = Linear(10)
def call(self, inputs):
x = self.linear_1(inputs)
x = tf.nn.relu(x)
x = self.linear_2(x)
x = tf.nn.relu(x)
return self.linear_3(x)
mlp = MLP()
# The first call to the `mlp` object will create the weights.
y = mlp(tf.ones(shape=(3, 64)))
# Weights are recursively tracked.
assert len(mlp.weights) == 6
mlp = keras.Sequential(
[
keras.layers.Dense(32, activation=tf.nn.relu),
keras.layers.Dense(32, activation=tf.nn.relu),
keras.layers.Dense(10),
]
)
class ActivityRegularization(keras.layers.Layer):
Layer that creates an activity sparsity regularization loss.
def __init__(self, rate=1e-2):
super(ActivityRegularization, self).__init__()
self.rate = rate
def call(self, inputs):
# We use `add_loss` to create a regularization loss
# that depends on the inputs.
self.add_loss(self.rate * tf.reduce_sum(inputs))
return inputs
# Let's use the loss layer in a MLP block.
class SparseMLP(keras.layers.Layer):
Stack of Linear layers with a sparsity regularization loss.
def __init__(self):
super(SparseMLP, self).__init__()
self.linear_1 = Linear(32)
self.regularization = ActivityRegularization(1e-2)
self.linear_3 = Linear(10)
def call(self, inputs):
x = self.linear_1(inputs)
x = tf.nn.relu(x)
x = self.regularization(x)
return self.linear_3(x)
mlp = SparseMLP()
y = mlp(tf.ones((10, 10)))
print(mlp.losses) # List containing one float32 scalar
# Losses correspond to the *last* forward pass.
mlp = SparseMLP()
mlp(tf.ones((10, 10)))
assert len(mlp.losses) == 1
mlp(tf.ones((10, 10)))
assert len(mlp.losses) == 1 # No accumulation.
# Let's demonstrate how to use these losses in a training loop.
# Prepare a dataset.
(x_train, y_train), _ = tf.keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(x_train.reshape(60000, 784).astype("float32") / 255, y_train)
)
dataset = dataset.shuffle(buffer_size=1024).batch(64)
# A new MLP.
mlp = SparseMLP()
# Loss and optimizer.
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.SGD(learning_rate=1e-3)
for step, (x, y) in enumerate(dataset):
with tf.GradientTape() as tape:
# Forward pass.
logits = mlp(x)
# External loss value for this batch.
loss = loss_fn(y, logits)
# Add the losses created during the forward pass.
loss += sum(mlp.losses)
# Get gradients of the loss wrt the weights.
gradients = tape.gradient(loss, mlp.trainable_weights)
# Update the weights of our linear layer.
optimizer.apply_gradients(zip(gradients, mlp.trainable_weights))
# Logging.
if step % 100 == 0:
print("Step:", step, "Loss:", float(loss))
# Instantiate a metric object
accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
# Prepare our layer, loss, and optimizer.
model = keras.Sequential(
[
keras.layers.Dense(32, activation="relu"),
keras.layers.Dense(32, activation="relu"),
keras.layers.Dense(10),
]
)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
for epoch in range(2):
# Iterate over the batches of a dataset.
for step, (x, y) in enumerate(dataset):
with tf.GradientTape() as tape:
logits = model(x)
# Compute the loss value for this batch.
loss_value = loss_fn(y, logits)
# Update the state of the `accuracy` metric.
accuracy.update_state(y, logits)
# Update the weights of the model to minimize the loss value.
gradients = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
# Logging the current accuracy value so far.
if step % 200 == 0:
print("Epoch:", epoch, "Step:", step)
print("Total running accuracy so far: %.3f" % accuracy.result())
# Reset the metric's state at the end of an epoch
accuracy.reset_state()
class F1Score(keras.metrics.Metric):
def __init__(self, name="f1_score", dtype="float32", threshold=0.5, **kwargs):
super().__init__(name=name, dtype=dtype, **kwargs)
self.threshold = 0.5
self.true_positives = self.add_weight(
name="tp", dtype=dtype, initializer="zeros"
)
self.false_positives = self.add_weight(
name="fp", dtype=dtype, initializer="zeros"
)
self.false_negatives = self.add_weight(
name="fn", dtype=dtype, initializer="zeros"
)
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.math.greater_equal(y_pred, self.threshold)
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
true_positives = tf.cast(y_true & y_pred, self.dtype)
false_positives = tf.cast(~y_true & y_pred, self.dtype)
false_negatives = tf.cast(y_true & ~y_pred, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
true_positives *= sample_weight
false_positives *= sample_weight
false_negatives *= sample_weight
self.true_positives.assign_add(tf.reduce_sum(true_positives))
self.false_positives.assign_add(tf.reduce_sum(false_positives))
self.false_negatives.assign_add(tf.reduce_sum(false_negatives))
def result(self):
precision = self.true_positives / (self.true_positives + self.false_positives)
recall = self.true_positives / (self.true_positives + self.false_negatives)
return precision * recall * 2.0 / (precision + recall)
def reset_state(self):
self.true_positives.assign(0)
self.false_positives.assign(0)
self.false_negatives.assign(0)
m = F1Score()
m.update_state([0, 1, 0, 0], [0.3, 0.5, 0.8, 0.9])
print("Intermediate result:", float(m.result()))
m.update_state([1, 1, 1, 1], [0.1, 0.7, 0.6, 0.0])
print("Final result:", float(m.result()))
# Prepare our layer, loss, and optimizer.
model = keras.Sequential(
[
keras.layers.Dense(32, activation="relu"),
keras.layers.Dense(32, activation="relu"),
keras.layers.Dense(10),
]
)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
# Create a training step function.
@tf.function # Make it fast.
def train_on_batch(x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = loss_fn(y, logits)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
return loss
# Prepare a dataset.
(x_train, y_train), _ = tf.keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(x_train.reshape(60000, 784).astype("float32") / 255, y_train)
)
dataset = dataset.shuffle(buffer_size=1024).batch(64)
for step, (x, y) in enumerate(dataset):
loss = train_on_batch(x, y)
if step % 100 == 0:
print("Step:", step, "Loss:", float(loss))
class Dropout(keras.layers.Layer):
def __init__(self, rate):
super(Dropout, self).__init__()
self.rate = rate
def call(self, inputs, training=None):
if training:
return tf.nn.dropout(inputs, rate=self.rate)
return inputs
class MLPWithDropout(keras.layers.Layer):
def __init__(self):
super(MLPWithDropout, self).__init__()
self.linear_1 = Linear(32)
self.dropout = Dropout(0.5)
self.linear_3 = Linear(10)
def call(self, inputs, training=None):
x = self.linear_1(inputs)
x = tf.nn.relu(x)
x = self.dropout(x, training=training)
return self.linear_3(x)
mlp = MLPWithDropout()
y_train = mlp(tf.ones((2, 2)), training=True)
y_test = mlp(tf.ones((2, 2)), training=False)
# We use an `Input` object to describe the shape and dtype of the inputs.
# This is the deep learning equivalent of *declaring a type*.
# The shape argument is per-sample; it does not include the batch size.
# The functional API focused on defining per-sample transformations.
# The model we create will automatically batch the per-sample transformations,
# so that it can be called on batches of data.
inputs = tf.keras.Input(shape=(16,), dtype="float32")
# We call layers on these "type" objects
# and they return updated types (new shapes/dtypes).
x = Linear(32)(inputs) # We are reusing the Linear layer we defined earlier.
x = Dropout(0.5)(x) # We are reusing the Dropout layer we defined earlier.
outputs = Linear(10)(x)
# A functional `Model` can be defined by specifying inputs and outputs.
# A model is itself a layer like any other.
model = tf.keras.Model(inputs, outputs)
# A functional model already has weights, before being called on any data.
# That's because we defined its input shape in advance (in `Input`).
assert len(model.weights) == 4
# Let's call our model on some data, for fun.
y = model(tf.ones((2, 16)))
assert y.shape == (2, 10)
# You can pass a `training` argument in `__call__`
# (it will get passed down to the Dropout layer).
y = model(tf.ones((2, 16)), training=True)
inputs = tf.keras.Input(shape=(784,), dtype="float32")
x = keras.layers.Dense(32, activation="relu")(inputs)
x = keras.layers.Dense(32, activation="relu")(x)
outputs = keras.layers.Dense(10)(x)
model = tf.keras.Model(inputs, outputs)
# Specify the loss, optimizer, and metrics with `compile()`.
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train the model with the dataset for 2 epochs.
model.fit(dataset, epochs=2)
model.predict(dataset)
model.evaluate(dataset)
class CustomModel(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.loss_tracker = keras.metrics.Mean(name="loss")
self.accuracy = keras.metrics.SparseCategoricalAccuracy()
self.loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
self.optimizer = keras.optimizers.Adam(learning_rate=1e-3)
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
loss = self.loss_fn(y, y_pred)
gradients = tape.gradient(loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(gradients, self.trainable_weights))
# Update metrics (includes the metric that tracks the loss)
self.loss_tracker.update_state(loss)
self.accuracy.update_state(y, y_pred)
# Return a dict mapping metric names to current value
return {"loss": self.loss_tracker.result(), "accuracy": self.accuracy.result()}
@property
def metrics(self):
# We list our `Metric` objects here so that `reset_states()` can be
# called automatically at the start of each epoch.
return [self.loss_tracker, self.accuracy]
inputs = tf.keras.Input(shape=(784,), dtype="float32")
x = keras.layers.Dense(32, activation="relu")(inputs)
x = keras.layers.Dense(32, activation="relu")(x)
outputs = keras.layers.Dense(10)(x)
model = CustomModel(inputs, outputs)
model.compile()
model.fit(dataset, epochs=2)
from tensorflow.keras import layers
class Sampling(layers.Layer):
Uses (z_mean, z_log_var) to sample z, the vector encoding a digit.
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
class Encoder(layers.Layer):
Maps MNIST digits to a triplet (z_mean, z_log_var, z).
def __init__(self, latent_dim=32, intermediate_dim=64, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation=tf.nn.relu)
self.dense_mean = layers.Dense(latent_dim)
self.dense_log_var = layers.Dense(latent_dim)
self.sampling = Sampling()
def call(self, inputs):
x = self.dense_proj(inputs)
z_mean = self.dense_mean(x)
z_log_var = self.dense_log_var(x)
z = self.sampling((z_mean, z_log_var))
return z_mean, z_log_var, z
class Decoder(layers.Layer):
Converts z, the encoded digit vector, back into a readable digit.
def __init__(self, original_dim, intermediate_dim=64, **kwargs):
super(Decoder, self).__init__(**kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation=tf.nn.relu)
self.dense_output = layers.Dense(original_dim, activation=tf.nn.sigmoid)
def call(self, inputs):
x = self.dense_proj(inputs)
return self.dense_output(x)
class VariationalAutoEncoder(layers.Layer):
Combines the encoder and decoder into an end-to-end model for training.
def __init__(self, original_dim, intermediate_dim=64, latent_dim=32, **kwargs):
super(VariationalAutoEncoder, self).__init__(**kwargs)
self.original_dim = original_dim
self.encoder = Encoder(latent_dim=latent_dim, intermediate_dim=intermediate_dim)
self.decoder = Decoder(original_dim, intermediate_dim=intermediate_dim)
def call(self, inputs):
z_mean, z_log_var, z = self.encoder(inputs)
reconstructed = self.decoder(z)
# Add KL divergence regularization loss.
kl_loss = -0.5 * tf.reduce_mean(
z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1
)
self.add_loss(kl_loss)
return reconstructed
# Our model.
vae = VariationalAutoEncoder(original_dim=784, intermediate_dim=64, latent_dim=32)
# Loss and optimizer.
loss_fn = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
# Prepare a dataset.
(x_train, _), _ = tf.keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
x_train.reshape(60000, 784).astype("float32") / 255
)
dataset = dataset.shuffle(buffer_size=1024).batch(32)
@tf.function
def training_step(x):
with tf.GradientTape() as tape:
reconstructed = vae(x) # Compute input reconstruction.
# Compute loss.
loss = loss_fn(x, reconstructed)
loss += sum(vae.losses) # Add KLD term.
# Update the weights of the VAE.
grads = tape.gradient(loss, vae.trainable_weights)
optimizer.apply_gradients(zip(grads, vae.trainable_weights))
return loss
losses = [] # Keep track of the losses over time.
for step, x in enumerate(dataset):
loss = training_step(x)
# Logging.
losses.append(float(loss))
if step % 100 == 0:
print("Step:", step, "Loss:", sum(losses) / len(losses))
# Stop after 1000 steps.
# Training the model to convergence is left
# as an exercise to the reader.
if step >= 1000:
break
original_dim = 784
intermediate_dim = 64
latent_dim = 32
# Define encoder model.
original_inputs = tf.keras.Input(shape=(original_dim,), name="encoder_input")
x = layers.Dense(intermediate_dim, activation="relu")(original_inputs)
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
z = Sampling()((z_mean, z_log_var))
encoder = tf.keras.Model(inputs=original_inputs, outputs=z, name="encoder")
# Define decoder model.
latent_inputs = tf.keras.Input(shape=(latent_dim,), name="z_sampling")
x = layers.Dense(intermediate_dim, activation="relu")(latent_inputs)
outputs = layers.Dense(original_dim, activation="sigmoid")(x)
decoder = tf.keras.Model(inputs=latent_inputs, outputs=outputs, name="decoder")
# Define VAE model.
outputs = decoder(z)
vae = tf.keras.Model(inputs=original_inputs, outputs=outputs, name="vae")
# Add KL divergence regularization loss.
kl_loss = -0.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)
vae.add_loss(kl_loss)
# Loss and optimizer.
loss_fn = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
# Prepare a dataset.
(x_train, _), _ = tf.keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
x_train.reshape(60000, 784).astype("float32") / 255
)
dataset = dataset.map(lambda x: (x, x)) # Use x_train as both inputs & targets
dataset = dataset.shuffle(buffer_size=1024).batch(32)
# Configure the model for training.
vae.compile(optimizer, loss=loss_fn)
# Actually training the model.
vae.fit(dataset, epochs=1)
import numpy as np
input_dim = 784
classes = 10
# This is the main network we'll actually use to predict labels.
main_network = keras.Sequential(
[keras.layers.Dense(64, activation=tf.nn.relu), keras.layers.Dense(classes),]
)
# It doesn't need to create its own weights, so let's mark its layers
# as already built. That way, calling `main_network` won't create new variables.
for layer in main_network.layers:
layer.built = True
# This is the number of weight coefficients to generate. Each layer in the
# main network requires output_dim * input_dim + output_dim coefficients.
num_weights_to_generate = (classes * 64 + classes) + (64 * input_dim + 64)
# This is the hypernetwork that generates the weights of the `main_network` above.
hypernetwork = keras.Sequential(
[
keras.layers.Dense(16, activation=tf.nn.relu),
keras.layers.Dense(num_weights_to_generate, activation=tf.nn.sigmoid),
]
)
# Loss and optimizer.
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)
# Prepare a dataset.
(x_train, y_train), _ = tf.keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(x_train.reshape(60000, 784).astype("float32") / 255, y_train)
)
# We'll use a batch size of 1 for this experiment.
dataset = dataset.shuffle(buffer_size=1024).batch(1)
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
# Predict weights for the outer model.
weights_pred = hypernetwork(x)
# Reshape them to the expected shapes for w and b for the outer model.
# Layer 0 kernel.
start_index = 0
w0_shape = (input_dim, 64)
w0_coeffs = weights_pred[:, start_index : start_index + np.prod(w0_shape)]
w0 = tf.reshape(w0_coeffs, w0_shape)
start_index += np.prod(w0_shape)
# Layer 0 bias.
b0_shape = (64,)
b0_coeffs = weights_pred[:, start_index : start_index + np.prod(b0_shape)]
b0 = tf.reshape(b0_coeffs, b0_shape)
start_index += np.prod(b0_shape)
# Layer 1 kernel.
w1_shape = (64, classes)
w1_coeffs = weights_pred[:, start_index : start_index + np.prod(w1_shape)]
w1 = tf.reshape(w1_coeffs, w1_shape)
start_index += np.prod(w1_shape)
# Layer 1 bias.
b1_shape = (classes,)
b1_coeffs = weights_pred[:, start_index : start_index + np.prod(b1_shape)]
b1 = tf.reshape(b1_coeffs, b1_shape)
start_index += np.prod(b1_shape)
# Set the weight predictions as the weight variables on the outer model.
main_network.layers[0].kernel = w0
main_network.layers[0].bias = b0
main_network.layers[1].kernel = w1
main_network.layers[1].bias = b1
# Inference on the outer model.
preds = main_network(x)
loss = loss_fn(y, preds)
# Train only inner model.
grads = tape.gradient(loss, hypernetwork.trainable_weights)
optimizer.apply_gradients(zip(grads, hypernetwork.trainable_weights))
return loss
losses = [] # Keep track of the losses over time.
for step, (x, y) in enumerate(dataset):
loss = train_step(x, y)
# Logging.
losses.append(float(loss))
if step % 100 == 0:
print("Step:", step, "Loss:", sum(losses) / len(losses))
# Stop after 1000 steps.
# Training the model to convergence is left
# as an exercise to the reader.
if step >= 1000:
break
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: You can get its value as a NumPy array by calling .numpy()
Step3: Much like a NumPy array, it features the attributes dtype and shape
Step4: A common way to create constant tensors is via tf.ones and tf.zeros (just like np.ones and np.zeros)
Step5: You can also create random constant tensors
Step6: Variables
Step7: You update the value of a Variable by using the methods .assign(value), .assign_add(increment), or .assign_sub(decrement)
Step8: Doing math in TensorFlow
Step9: Gradients
Step10: By default, variables are watched automatically, so you don't need to manually watch them
Step11: Note that you can compute higher-order derivatives by nesting tapes
Step13: Keras layers
Step14: You would use a Layer instance much like a Python function
Step15: The weight variables (created in __init__) are automatically
Step17: You have many built-in layers available, from Dense to Conv2D to LSTM to
Step18: Layer gradients
Step20: Trainable and non-trainable weights
Step22: Layers that own layers
Step23: Note that our manually-created MLP above is equivalent to the following
Step25: Tracking losses created by layers
Step27: Any model incorporating this layer will track this regularization loss
Step28: These losses are cleared by the top-level layer at the start of each forward
Step29: Keeping track of training metrics
Step30: In addition to this, similarly to the self.add_loss() method, you have access
Step31: Let's test-drive it
Step32: Compiled functions
Step33: Training mode & inference mode
Step34: The Functional API for model-building
Step35: The Functional API tends to be more concise than subclassing, and provides a few other
Step36: You can always subclass the Model class (it works exactly like subclassing
Step39: End-to-end experiment example 1
Step41: Next, we have a Decoder class, which maps the probabilistic latent space coordinates
Step43: Finally, our VariationalAutoEncoder composes together an encoder and a decoder, and
Step44: Now, let's write a training loop. Our training step is decorated with a @tf.function to
Step45: As you can see, building and training this type of model in Keras
Step46: Much more concise, right?
Step47: The use of the Functional API and fit reduces our example from 65 lines to 25 lines
Step48: This is our training loop. For each batch of data
|
6,930
|
<ASSISTANT_TASK:>
Python Code:
# Ignore numpy warnings
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
%matplotlib inline
# Some defaults:
plt.rcParams['figure.figsize'] = (12, 6) # Default plot size
%reset -f
import pycuda
from pycuda import compiler
import pycuda.driver as drv
import pycuda.driver as cuda
drv.init()
print("%d device(s) found." % drv.Device.count())
for ordinal in range(drv.Device.count()):
dev = drv.Device(ordinal)
print "Device #%d: %s" % (ordinal, dev.name())
print " Compute Capability: %d.%d" % dev.compute_capability()
print " Total Memory: %s KB" % (dev.total_memory()//(1024))
atts = [(str(att), value)
for att, value in dev.get_attributes().iteritems()]
atts.sort()
for att, value in atts:
print " %s: %s" % (att, value)
import pycuda.autoinit
import pycuda.driver as cuda
(free,total)=cuda.mem_get_info()
print("Global memory occupancy:%f%% free"%(free*100/total))
for devicenum in range(cuda.Device.count()):
device=cuda.Device(devicenum)
attrs=device.get_attributes()
#Beyond this point is just pretty printing
print("\n===Attributes for device %d"%devicenum)
for (key,value) in attrs.iteritems():
print("%s:%s"%(str(key),str(value)))
! jupyter-nbconvert "01 PyCUDA verify CUDA 8.0.ipynb" --to slides --reveal-prefix=reveal.js --post serve --ServerPostProcessor.ip="0.0.0.0"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: PyCUDA Imports
Step2: Available CUDA Devices
|
6,931
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
from scipy import stats
Ns=np.arange(20,200,4);
K=10000;
ps=np.zeros((Ns.size,3))
res=np.zeros(4)
cs=np.zeros((Ns.size,8))
i=0
for N in Ns:
for k in range(K):
x1=np.zeros(N);x1[N/2:]=1
x2=np.mod(range(N),2)
y= 42+x1+x2+x1*x2+np.random.randn(N)*2
tot=np.square(y-y.mean()).sum()
x=np.ones((N,4))
x[:,1]=x1*x2
x[:,2]=x1*(1-x2)
x[:,3]=(1-x1)*x2
res[0]=np.linalg.lstsq(x,y)[1]
x=np.ones((N,2))
x[:,1]=x1
res[1]=tot-np.linalg.lstsq(x,y)[1]
x[:,1]=x2
res[2]=tot-np.linalg.lstsq(x,y)[1]
res[3]=tot-res[0]-res[1]-res[2]
mss=res/np.float32(np.array([N-4,1,1,1]))
F=mss[1:]/mss[0]
p=1-stats.f.cdf(F,1,N-4)
p=p<0.05
ps[i,:]+=np.int32(p)
cs[i,p[0]*4+p[1]*2+p[2]]+=1
i+=1
ps/=float(K)
cs/=float(K)
for k in range(ps.shape[1]): plt.plot(Ns/4, ps[:,k])
plt.legend(['A','B','X'],loc=2)
plt.xlabel('N per cell')
plt.ylabel('expected power');
plt.figure(figsize=(7,6))
for k in [0,1,2,3,6,7]: plt.plot(Ns/4, cs[:,k])
plt.legend(['nothing','X','B','BX','AB','ABX'],loc=2)
plt.xlabel('N per cell')
plt.ylabel('pattern frequency');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we look at the probability that the various configurations of significant and non-significant results will be obtained.
|
6,932
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import colors
import matplotlib.pylab as plt
from oedes.fvm import mesh1d
from oedes import context,init_notebook,testing,models
import numpy as np
from oedes.functions import Aux2
init_notebook()
class CustomMobility(models.MobilityModel):
def mu_func(self, T, E, c):
mu0 = 5e-11
mu1 = 5e-9
W = 0.04
f0 = 0.3
f = c / 0.3e27
return (mu1 - mu0) * Aux2((f0 - f) / W) + mu0
def mobility(self, parent, ctx, eq):
mu_cell = self.mu_func(ctx.varsOf(eq.thermal)['T'], ctx.varsOf(eq.poisson)['Ecellm'], ctx.varsOf(eq)['c'])
mu_face = eq.mesh.faceaverage(mu_cell)
ctx.varsOf(eq).update(mu_face = mu_face, mu_cell = mu_cell)
mesh = mesh1d(2e-6)
def solve(mu_ions, mobility_model, additional_params=None, voltage=5.):
model = models.BaseModel()
models.std.electronic_device(model, mesh, 'pn', mobility_model = mobility_model)
cation, anion, initial_salt = models.std.add_ions(model, mesh, zc=1, za=-1)
model.setUp()
xinit = initial_salt(0.1e27)
params = {'T': 300.,
'electron.energy': 0.,
'electron.N0': 0.3e27,
'hole.energy': -2.,
'hole.N0': 0.3e27,
'electrode0.workfunction': 2.,
'electrode1.workfunction': 0.,
'electrode0.voltage': voltage,
'electrode1.voltage': 0,
'cation.mu': mu_ions,
'anion.mu': mu_ions,
'npi': 0,
'epsilon_r': 3.
}
if additional_params is not None:
params.update(additional_params)
c = context(model,x=xinit)
c.transient(params, 1, 1e-9)
return c
def transientplot(data):
N0 = 5e27
n = 20
for it, t in enumerate(10**np.linspace(-5, -1, n + 1)):
out = data.attime(t).output()
c = 1 - (1. - it / n)
ncolor = colors.rgb2hex((1,1 - c,1 - c))
pcolor = colors.rgb2hex((1 - c,1 - c,1))
plt.plot(mesh.cells['center'] * 1e9,out['electron.c'] / N0,ncolor)
plt.plot(mesh.cells['center'] * 1e9,out['hole.c'] / N0,pcolor)
testing.store(out['electron.c'], rtol=1e-7, atol=1e-3 * N0)
testing.store(out['hole.c'], rtol=1e-7, atol=1e-3 * N0)
plt.yscale('log')
plt.ylim([1e-5, 1.])
plt.xlabel('$x$ [nm]')
plt.ylabel('$c/N_0$')
mu_params = {'electron.mu':5e-11,'hole.mu':5e-11}
c=solve(5e-11, models.MobilityFromParams(), mu_params)
transientplot(c)
c=solve(mu_ions=5e-11, mobility_model=CustomMobility())
transientplot(c)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model and parameters
Step2: Results
Step3: Concentration dependent mobility
|
6,933
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False)
DO NOT MODIFY THIS CELL
def fully_connected(prev_layer, num_units):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
return layer
DO NOT MODIFY THIS CELL
def conv_layer(prev_layer, layer_depth):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu)
return conv_layer
DO NOT MODIFY THIS CELL
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]]})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
def fully_connected(prev_layer, num_units, is_training):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=None, use_bias=False)
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth, is_training):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=None)
conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, is_training)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, is_training)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training:True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training:False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training:False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
is_training:False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
is_training:False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
def fully_connected(prev_layer, num_units, is_training):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=None, use_bias=False)
gamma = tf.Variable(tf.ones([num_units]))
beta = tf.Variable(tf.zeros([num_units]))
pop_mean = tf.Variable(tf.zeros([num_units]), trainable=False)
pop_variance = tf.Variable(tf.ones([num_units]), trainable=False)
epsilon = 1e-3
def b_training():
batch_mean, batch_variance = tf.nn.moments(layer, [0])
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(layer, batch_mean, batch_variance, beta, gamma, epsilon)
def b_infering():
return tf.nn.batch_normalization(layer, pop_mean, pop_variance, beta, gamma, epsilon)
bnorm_layer = tf.cond(is_training, b_training, b_infering)
return tf.nn.relu(bnorm_layer)
def conv_layer(prev_layer, layer_depth, is_training):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
in_channels = prev_layer.get_shape().as_list()[3]
out_channels = layer_depth*4
weights = tf.Variable(
tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.05))
layer = tf.nn.conv2d(prev_layer, weights, strides=[1,strides, strides, 1], padding='SAME')
gamma = tf.Variable(tf.ones([out_channels]))
beta = tf.Variable(tf.zeros([out_channels]))
pop_mean = tf.Variable(tf.zeros([out_channels]), trainable=False)
pop_variance = tf.Variable(tf.ones([out_channels]), trainable=False)
epsilon = 1e-3
def b_training():
batch_mean, batch_variance = tf.nn.moments(layer, [0,1,2], keep_dims=False)
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(layer, batch_mean, batch_variance, beta, gamma, epsilon)
def b_infering():
return tf.nn.batch_normalization(layer, pop_mean, pop_variance, beta, gamma, epsilon)
bnorm_layer = tf.cond(is_training, b_training, b_infering)
return tf.nn.relu(bnorm_layer)
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, is_training)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, is_training)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys,
is_training:True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training:False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys,
is_training:False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training:False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
is_training:False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
is_training:False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Batch Normalization using tf.layers.batch_normalization<a id="example_1"></a>
Step6: We'll use the following function to create convolutional layers in our network. They are very basic
Step8: Run the following cell, along with the earlier cells (to load the dataset and define the necessary functions).
Step10: With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.)
Step12: TODO
Step13: TODO
Step15: With batch normalization, you should now get an accuracy over 90%. Notice also the last line of the output
Step17: TODO
Step18: TODO
|
6,934
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
%matplotlib inline
import numpy.random as rnd
import numpy
N = 2000
bruit1 = rnd.normal(size=(N,))
temps = numpy.arange(N)
bruit1[:5], temps[:5]
import random
bruit2 = numpy.zeros((N,))
for i in range(0, 10):
h = random.randint(0, N-1)
bruit2[h] = rnd.normal() + 10
serie = []
y = 10
for i in range(N//2+100):
serie.append(y + bruit1[i] + 0.0004 * temps[i] + bruit2[i])
if i > 30:
y = 0.9 * serie[-2]
Y = numpy.array(serie[-1000:])
Y[:5]
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
ax[0].plot(numpy.arange(len(Y)), Y)
ax[1].plot(numpy.arange(800, 900), Y[800:900])
ax[0].set_title("Série temporelle simulée")
ax[1].set_title("Même série temporelle simulée");
from pandas import DataFrame
df = DataFrame(dict(Y=Y))
df.head()
from statsmodels.tsa.tsatools import detrend
df["notrend"] = detrend(df.Y)
df.head()
ax = df.plot()
ax.set_title("Sans tendance");
from statsmodels.tsa.stattools import acf
cor = acf(df.notrend)
fig, ax = plt.subplots(1, 1)
ax.plot(cor)
ax.set_title("Autocorrélogramme");
from statsmodels.tsa.stattools import pacf
pcor = pacf(df.notrend)
fig, ax = plt.subplots(1, 1)
ax.plot(pcor[1:])
ax.set_title("Autocorrélogramme partiel");
def lagged_ts(serie, lag):
dim = serie.shape[0]
res = numpy.zeros((dim - lag + 1, lag))
for i in range(lag):
res[:, i] = serie[i:dim-lag+i+1]
return res
lagged_ts(Y, 3)
lag = lagged_ts(Y, 60)
lag.shape
from numpy.linalg import svd
# u @ numpy.diag(s) @ vh
u, s, vh = svd(lag)
u.shape, s.shape, vh.shape
d = numpy.zeros((941, 60))
d[:60,:60] = numpy.diag(s)
(u @ d @ vh).shape
fig, ax = plt.subplots(1,2, figsize=(12,4))
DataFrame(dict(valeur=s)).plot(kind="bar", ax=ax[0])
DataFrame(dict(valeur=s[1:15])).plot(kind="bar", ax=ax[1])
ax[0].set_title("Valeurs propres")
ax[1].set_title("Valeurs propres sans la première");
np = 12
fig, ax = plt.subplots(np, 3, figsize=(14, np*2))
for n in range(np):
i = n if n < 5 else n * 5 - 15
d = numpy.zeros((941, 60))
d[i, i] = s[i]
X2 = u @ d @ vh
pos = 0 #X2.shape[1] - 1
# série reconstruites avec un axe
ax[n, 0].plot(X2[:,pos])
ax[n, 1].set_title("i=%d" % i)
# série reconstruites avec un axe
ax[n, 1].plot(X2[800:850,pos])
ax[n, 1].set_title("i=%d" % i)
d = numpy.zeros((941, 60))
d[:i+1, :i+1] = numpy.diag(s[:i+1])
X2 = u @ d @ vh
ax[n, 2].plot(X2[800:850,pos])
ax[n, 2].plot(Y[800:850])
ax[n, 2].set_title("-->i=%d + 1" % i)
fig, ax = plt.subplots(1, 1, figsize=(5,5))
for i in range(0, 8):
ax.plot([0, 5], [i, i], 'k-')
if i < 6:
ax.plot([i, i], [0, 7], 'k-')
if i < 4:
ax.text(i + 0.1, 1.5, "Y(t-%d)" % (4-i))
ax.text(i + 0.1, 0.5, "Y(t-%d)" % (3-i))
ax.text(4.1, 1.5, "Y(t)")
ax.text(4.05, 0.5, "Y(t+1)=?")
plt.axis('off');
d = numpy.zeros((941, 60))
for i in range(0, 30):
d[i, i] = s[i]
X2 = u @ d @ vh
from sklearn.covariance import EllipticEnvelope
env = EllipticEnvelope(support_fraction=0.9)
env.fit(X2[:,:30])
out = env.predict(X2[:,:30])
fig, ax = plt.subplots(1, 1, figsize=(12,2))
ax.plot((1 - out)/2, "-")
ax.set_title("Points aberrants d'une série temporelles.");
fig, ax = plt.subplots(1, 1, figsize=(14,2))
ax.plot(X2[:,0], label="serie")
ax.plot((1 - out)*5, "-", label="outlier")
ax.set_title("Points aberrants sur la série reconstruite")
ax.legend();
fig, ax = plt.subplots(1, 1, figsize=(14,2))
ax.plot(Y, label="serie")
ax.plot((1 - out)*5, "-", label="outlier")
ax.set_title("Points aberrants sur la série brute")
ax.legend();
fig, ax = plt.subplots(1, 1, figsize=(14,2))
outp = env.decision_function(X2[:,:30])
ax.plot(Y, label="serie")
ax.plot(outp, "-", label="Proba not outlier")
ax.set_title("Points aberrants sur la série brute")
ax.legend();
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Une série articielle
Step2: On crée un bruit aberrant.
Step3: Autocorrélations
Step4: L'autocorrélogramme à proprement parler.
Step5: Etant donnée que la série $Y_t$ dépend de $Y_{t-2}$, on observe un pic pour $cor(Y_t, Y_{t-2})d$ et pour tous les $d$ pairs. $cor(Y_t, Y{t-4}) \sim cor(Y_t, Y_{t-2})^2$. On enlève ces effets récursifs en calculant l'autocorrélogramme partiel qui correspond à l'estimation des coefficients d'un modèle autorégressif infini.
Step6: SSA
Step7: Je me représente la méthode SVD comme une façon de projeter des vecteurs sur l'espace vectoriel constitué des premiers vecteurs propres, à chaque dimension supplémentaire, c'est comme une pièce du puzzle qui s'assemble jusqu'à recomposer l'ensemble. Ce qu'on peut voir aussi comme ceci
Step8: La prédiction
Step9: Les points aberrants
Step10: L'idéal serait d'utiliser une méthode basée sur une ACP. Le plus proche reste le modèle gaussien avec EllipticEnvelope.
Step11: On observe des plages contiguës. Cela signifie que d'une valeur aberrante contamine des vecteurs décalées consécutifs de la série $Y$. Il ne reste plus qu'à repérer la valeur incriminée.
Step12: Ce qui a l'air de correspondre à la fin des grandes plages. On recommence avec la probabilité d'être un outlier.
|
6,935
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
plt.style.use('ggplot')
plt.rc('axes', grid=False) # turn off the background grid for images
my_matrix = np.array([[1,2],[1,1]])
print(my_matrix.shape)
print(my_matrix)
my_matrix_transposed = np.transpose(my_matrix)
print(my_matrix_transposed)
my_matrix_inverse = linalg.inv(my_matrix)
print(my_matrix_inverse)
my_matrix_inverse.dot(my_matrix)
my_matrix_inverse * my_matrix_inverse
A = np.array([[1,2],[1,1]])
print(A)
b = np.array([[4],[3]])
print(b)
# Solve by inverting A and then mulitply by b
linalg.inv(A).dot(b)
# Cleaner looking
linalg.solve(A,b)
A = np.array([[1,3,5],[2,5,1],[2,3,8]])
b = np.array([[10],[8],[3]])
print(linalg.inv(A))
print(linalg.solve(A,b))
import sympy as sym
AA = sym.Matrix([[1,3,5],[2,5,1],[2,3,8]])
bb = sym.Matrix([[10],[8],[3]])
print(AA**-1)
print(AA**-1 * bb)
%timeit AA**-1 * bb
%timeit linalg.solve(A,b)
print(A)
plt.imshow(A, interpolation='nearest', cmap=plt.cm.Blues);
I = np.load("test_data.npy") # load in a saved numpy array
I.ndim, I.shape, I.dtype
print("The minimum value of the array I is {0:.2f}".format(I.min()))
print("The maximum value of the array I is {0:.2f}".format(I.max()))
print("The mean value of the array I is {0:.2f}".format(I.mean()))
print("The standard deviation of the array I is {0:.2f}".format(I.std()))
#flatten() collapses n-dimentional data into 1-d
plt.hist(I.flatten(),bins=30);
II = I + 8
print("The minimum value of the array II is {0:.2f}".format(II.min()))
print("The maximum value of the array II is {0:.2f}".format(II.max()))
print("The mean value of the array II is {0:.2f}".format(II.mean()))
print("The standard deviation of the array II is {0:.2f}".format(II.std()))
plt.imshow(I, cmap=plt.cm.gray)
plt.colorbar();
fig, ax = plt.subplots(1,5,sharey=True)
fig.set_size_inches(12,6)
fig.tight_layout()
ax[0].imshow(I, cmap=plt.cm.viridis)
ax[0].set_xlabel('viridis')
ax[1].imshow(I, cmap=plt.cm.hot)
ax[1].set_xlabel('hot')
ax[2].imshow(I, cmap=plt.cm.magma)
ax[2].set_xlabel('magma')
ax[3].imshow(I, cmap=plt.cm.spectral)
ax[3].set_xlabel('spectral')
ax[4].imshow(I, cmap=plt.cm.gray)
ax[4].set_xlabel('gray')
plt.imsave('Splash.png', I, cmap=plt.cm.gray) # Write the array I to a PNG file
Ipng = plt.imread('Splash.png') # Read in the PNG file
print("The original data has a min = {0:.2f} and a max = {1:.2f}".format(I.min(), I.max()))
print("The PNG file has a min = {0:.2f} and a max = {1:.2f}".format(Ipng.min(), Ipng.max()))
X = np.linspace(-5, 5, 500)
Y = np.linspace(-5, 5, 500)
X, Y = np.meshgrid(X, Y) # turns two 1-d arrays (X, Y) into one 2-d grid
Z = np.sqrt(X**2+Y**2)+np.sin(X**2+Y**2)
Z.min(), Z.max(), Z.mean()
from matplotlib.colors import LightSource
ls = LightSource(azdeg=0,altdeg=40)
shadedfig = ls.shade(Z,plt.cm.copper)
fig, ax = plt.subplots(1,3)
fig.set_size_inches(12,6)
fig.tight_layout()
ax[0].imshow(shadedfig)
contlevels = [1,2,Z.mean()]
ax[1].axis('equal')
ax[1].contour(Z,contlevels)
ax[2].imshow(shadedfig)
ax[2].contour(Z,contlevels);
I2 = plt.imread('doctor5.png')
print("The image I2 has a shape [height,width] of {0}".format(I2.shape))
print("The image I2 is made up of data of type {0}".format(I2.dtype))
print("The image I2 has a maximum value of {0}".format(I2.max()))
print("The image I2 has a minimum value of {0}".format(I2.min()))
plt.imshow(I2,cmap=plt.cm.gray);
fig, ax = plt.subplots(1,4)
fig.set_size_inches(12,6)
fig.tight_layout()
# You can show just slices of the image - Rememeber: The origin is the upper left corner
ax[0].imshow(I2, cmap=plt.cm.gray)
ax[0].set_xlabel('Original')
ax[1].imshow(I2[0:300,0:100], cmap=plt.cm.gray)
ax[1].set_xlabel('[0:300,0:100]') # 300 rows, 100 columns
ax[2].imshow(I2[:,0:100], cmap=plt.cm.gray) # ":" = whole range
ax[2].set_xlabel('[:,0:100]') # all rows, 100 columns
ax[3].imshow(I2[:,::-1], cmap=plt.cm.gray);
ax[3].set_xlabel('[:,::-1]') # reverse the columns
fig, ax = plt.subplots(1,2)
fig.set_size_inches(12,6)
fig.tight_layout()
CutLine = 300
ax[0].imshow(I2, cmap=plt.cm.gray)
ax[0].hlines(CutLine, 0, 194, color='b', linewidth=3)
ax[1].plot(I2[CutLine,:], color='b', linewidth=3)
ax[1].set_xlabel("X Value")
ax[1].set_ylabel("Pixel Value")
from scipy import ndimage
fig, ax = plt.subplots(1,5)
fig.set_size_inches(14,6)
fig.tight_layout()
ax[0].imshow(I2, cmap=plt.cm.gray)
I3 = ndimage.rotate(I2,45,cval=0.75) # cval is the value to set pixels outside of image
ax[1].imshow(I3, cmap=plt.cm.gray) # Rotate and reshape
I4 = ndimage.rotate(I2,45,reshape=False,cval=0.75) # Rotate and do not reshape
ax[2].imshow(I4, cmap=plt.cm.gray)
I5 = ndimage.shift(I2,(10,30),cval=0.75) # Shift image
ax[3].imshow(I5, cmap=plt.cm.gray)
I6 = ndimage.gaussian_filter(I2,5) # Blur image
ax[4].imshow(I6, cmap=plt.cm.gray);
import astropy.io.fits as fits
x = fits.open('bsg01.fits')
x.info()
x[0].header
xd = x[0].data
print("The image x has a shape [height,width] of {0}".format(xd.shape))
print("The image x is made up of data of type {0}".format(xd.dtype))
print("The image x has a maximum value of {0}".format(xd.max()))
print("The image x has a minimum value of {0}".format(xd.min()))
fig, ax = plt.subplots(1,2)
fig.set_size_inches(12,6)
fig.tight_layout()
ax[0].imshow(xd,cmap=plt.cm.gray)
ax[1].hist(xd.flatten(),bins=20);
CopyData = np.copy(xd)
CutOff = 40
mask = np.where(CopyData > CutOff)
CopyData[mask] = 50 # You can not just throw data away, you have to set it to something.
fig, ax = plt.subplots(1,2)
fig.set_size_inches(12,6)
fig.tight_layout()
ax[0].imshow(CopyData,cmap=plt.cm.gray)
ax[1].hist(CopyData.flatten(),bins=20);
fig, ax = plt.subplots(1,2)
fig.set_size_inches(12,6)
fig.tight_layout()
ax[0].imshow(xd, cmap=plt.cm.gray)
# Open another file 'bsg02.fits'
y = fits.open('bsg02.fits')
yd = y[0].data
ax[1].imshow(yd, cmap=plt.cm.gray);
fig, ax = plt.subplots(1,3)
fig.set_size_inches(12,6)
fig.tight_layout()
ax[0].imshow(xd, cmap=plt.cm.gray)
ax[1].imshow(yd, cmap=plt.cm.gray)
z = xd - yd # Subtract the images pixel by pixel
ax[2].imshow(z, cmap=plt.cm.gray);
S = fits.open('SolarSpectra.fits')
S.info()
Data = S[0].data
Head = S[0].header
Head
# The FITS header has the information to make an array of wavelengths
Start = Head['CRVAL1']
Number = Head['NAXIS1']
Delta = Head['CDELT1']
End = Start + (Number * Delta)
Wavelength = np.arange(Start,End,Delta)
fig, ax = plt.subplots(2,1)
fig.set_size_inches(11,8.5)
fig.tight_layout()
# Full spectra
ax[0].plot(Wavelength, Data, color='b')
ax[0].set_ylabel("Flux")
ax[0].set_xlabel("Wavelength [angstroms]")
# Just the visible range with the hydrogen Balmer lines
ax[1].set_xlim(4000,7000)
ax[1].set_ylim(0.6,1.2)
ax[1].plot(Wavelength, Data, color='b')
ax[1].set_ylabel("Flux")
ax[1].set_xlabel("Wavelength [angstroms]")
H_Balmer = [6563,4861,4341,4102,3970,3889,3835,3646]
ax[1].vlines(H_Balmer,0,2, color='r', linewidth=3, alpha = 0.25)
redfilter = plt.imread('sphereR.jpg')
redfilter.shape,redfilter.dtype
redfilter = plt.imread('sphereR.jpg')[:,:,0]
redfilter.shape,redfilter.dtype
plt.imshow(redfilter,cmap=plt.cm.gray);
greenfilter = plt.imread('sphereG.jpg')[:,:,0]
bluefilter = plt.imread('sphereB.jpg')[:,:,0]
fig, ax = plt.subplots(1,3)
fig.set_size_inches(12,3)
fig.tight_layout()
ax[0].set_title("Red Filter")
ax[1].set_title("Green Filter")
ax[2].set_title("Blue Filter")
ax[0].imshow(redfilter,cmap=plt.cm.gray)
ax[1].imshow(greenfilter,cmap=plt.cm.gray)
ax[2].imshow(bluefilter,cmap=plt.cm.gray);
rgb = np.zeros((480,640,3),dtype='uint8')
print(rgb.shape, rgb.dtype)
plt.imshow(rgb,cmap=plt.cm.gray);
rgb[:,:,0] = redfilter
rgb[:,:,1] = greenfilter
rgb[:,:,2] = bluefilter
fig, ax = plt.subplots(1,4)
fig.set_size_inches(14,3)
fig.tight_layout()
ax[0].set_title("Red Filter")
ax[1].set_title("Green Filter")
ax[2].set_title("Blue Filter")
ax[3].set_title("All Filters Stacked")
ax[0].imshow(redfilter,cmap=plt.cm.gray)
ax[1].imshow(greenfilter,cmap=plt.cm.gray)
ax[2].imshow(bluefilter,cmap=plt.cm.gray)
ax[3].imshow(rgb,cmap=plt.cm.gray);
print("The image rgb has a shape [height,width] of {0}".format(rgb.shape))
print("The image rgb is made up of data of type {0}".format(rgb.dtype))
print("The image rgb has a maximum value of {0}".format(rgb.max()))
print("The image rgb has a minimum value of {0}".format(rgb.min()))
rgb[:,:,0] = redfilter * 1.5
plt.imshow(rgb)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us work with the matrix
Step2: numpy matrix multiply uses the dot() function
Step3: Caution the * will just multiply the matricies on an element-by-element basis
Step4: Solving system of linear equations
Step5: System of 3 equations example (Numpy)
Step6: System of 3 equations example (SymPy) - Python's Symbolic Math Package
Step7: SymPy is slower than NumPy
Step8: Images are just 2-d arrays - imshow will display 2-d arrays as images
Step9: Read in some data
Step10: Math on images applies to every value (pixel)
Step11: Show the image represenation of I with a colorbar
Step12: Colormap reference
Step13: WARNING! Common image formats DO NOT preserve dynamic range of original data!!
Step14: Creating images from math
Step15: Fancy Image Display
Step16: Reading in images (imread) - Common Formats
Step17: Images are just arrays that can be sliced.
Step18: Simple image manipulation
Step19: ndimage can do much more
Step20: You can use masks on images
Step21: You can add and subtract images
Step22: The two images above may look the same but they are not! Subtracting the two images reveals the truth.
Step23: FITS Tables - An astronomical example
Step24: Pseudocolor - All color astronomy images are fake.
Step25: We just want to read in one of the three channels
Step26: Need to create a blank 3-d array to hold all of the images
Step27: Fill the array with the filtered images
|
6,936
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd # data package
import matplotlib.pyplot as plt # graphics
import sys # system module, used to get Python version
import os # operating system tools (check files)
import datetime as dt # date tools, used to note current date
import numpy as np
# these are new
import requests, io # internet and input tools
import numpy as np
%matplotlib inline
print('\nPython version: ', sys.version)
print('Pandas version: ', pd.__version__)
print('Requests version: ', requests.__version__)
print("Today's date:", dt.date.today())
#import scorecard data from csv file on the web
url = 'https://collegescorecard.ed.gov/downloads/Most-Recent-Cohorts-Scorecard-Elements.csv'
r = requests.get(url)
#read csv data, create a dataframe with the csv data
scorecard = pd.read_csv(url,
na_values=['PrivacySuppressed'])
scorecard1 = pd.DataFrame(scorecard)
#create a new data frame with only the relevant columns
newscore = scorecard1[['INSTNM', 'CITY', 'STABBR', 'PREDDEG',
'CONTROL', 'LOCALE', 'SAT_AVG', 'UGDS', 'UGDS_BLACK', 'UGDS_ASIAN',
'UGDS_HISP', 'UGDS_WHITE', 'md_earn_wne_p10', 'PCTFLOAN', 'gt_25k_p6',
'GRAD_DEBT_MDN_SUPP', 'GRAD_DEBT_MDN10YR_SUPP']]
#rename the columns so we can read them easily
newscore2 = newscore.rename(columns={'INSTNM': 'UNIVERSITY', 'STABBR': 'STATE', 'PREDDEG': 'DEGTYPE',
'md_earn_wne_p10': 'MEDIAN EARNINGS, 10 YEARS OUT',
'gt_25k_p6': 'PERCENT EARNING 25k+, 6 YRS OUT',
'GRAD_DEBT_MDN_SUPP': 'MEDIAN DEBT',
'GRAD_DEBT_MDN10YR_SUPP': 'MEDIAN 10-YR MONTHLY PAYMENTS'})
topmedian = newscore2.sort('MEDIAN EARNINGS, 10 YEARS OUT', ascending=False).head(20)
topmedian2 = topmedian.set_index('UNIVERSITY')
topmedian3 = topmedian2.sort('MEDIAN EARNINGS, 10 YEARS OUT', ascending=True)
topmedianalt = newscore2.sort('MEDIAN EARNINGS, 10 YEARS OUT', ascending=True).head(20)
topmedianalt2 = topmedianalt.set_index('UNIVERSITY')
topmedianalt3 = topmedianalt2.sort('MEDIAN EARNINGS, 10 YEARS OUT', ascending=False)
plt.style.use('fivethirtyeight')
fig, ax = plt.subplots()
fig.set_size_inches(14, 8)
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xlim(0,90000)
ax.set_ylim(0,50000)
x = newscore2['MEDIAN EARNINGS, 10 YEARS OUT']
y = newscore2['MEDIAN DEBT']
colors = 'CONTROL'== 1 == 'red', 2 == 'blue', 3 =='green'
Public = plt.scatter(newscore2[newscore["CONTROL"] == 1]["MEDIAN EARNINGS, 10 YEARS OUT"],
newscore2[newscore["CONTROL"] == 1]["MEDIAN DEBT"],
s=newscore2['UGDS']/10, c="blue", alpha=0.9)
Private_nonprofit = plt.scatter(newscore2[newscore["CONTROL"] == 2]["MEDIAN EARNINGS, 10 YEARS OUT"],
newscore2[newscore["CONTROL"] == 2]["MEDIAN DEBT"],
s=newscore2['UGDS']/10, c="lightblue", alpha=0.9)
Private_forprofit = plt.scatter(newscore2[newscore["CONTROL"] == 3]["MEDIAN EARNINGS, 10 YEARS OUT"],
newscore2[newscore["CONTROL"] == 3]["MEDIAN DEBT"],
s=newscore2['UGDS']/10, c="darkblue", alpha=0.9)
plt.legend((Public, Private_nonprofit, Private_forprofit),
('Public', 'Private nonprofit', 'Private forprofit'),
scatterpoints=1,
markerscale=.1,
loc='upper left',
ncol=1,
fontsize=14)
plt.xlabel('Median earnings', size=14)
plt.ylabel('Median debt', size=14)
plt.title('Debt to Earnings by University', fontsize=24)
plt.show()
plt.style.use('fivethirtyeight')
fig, ax = plt.subplots()
fig.set_size_inches(14, 6)
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xlim(0,100000)
ax.set_ylim(0,600)
x = newscore2['MEDIAN EARNINGS, 10 YEARS OUT']
y = newscore2['MEDIAN DEBT']
Urban = plt.scatter(newscore2[newscore["LOCALE"] == 12]["MEDIAN EARNINGS, 10 YEARS OUT"],
newscore2[newscore["LOCALE"] == 12]["MEDIAN 10-YR MONTHLY PAYMENTS"],
s=newscore2['UGDS']/15, c="maroon", alpha=0.9)
plt.scatter(newscore2[newscore["LOCALE"] == 13]["MEDIAN EARNINGS, 10 YEARS OUT"],
newscore2[newscore["LOCALE"] == 13]["MEDIAN 10-YR MONTHLY PAYMENTS"],
s=newscore2['UGDS']/15, c="maroon", alpha=0.9)
plt.scatter(newscore2[newscore["LOCALE"] == 21]["MEDIAN EARNINGS, 10 YEARS OUT"],
newscore2[newscore["LOCALE"] == 21]["MEDIAN 10-YR MONTHLY PAYMENTS"],
s=newscore2['UGDS']/15, c="orange", alpha=0.9)
Suburban = plt.scatter(newscore2[newscore["LOCALE"] == 22]["MEDIAN EARNINGS, 10 YEARS OUT"],
newscore2[newscore["LOCALE"] == 22]["MEDIAN 10-YR MONTHLY PAYMENTS"],
s=newscore2['UGDS']/15, c="orange", alpha=0.9)
plt.scatter(newscore2[newscore["LOCALE"] == 23]["MEDIAN EARNINGS, 10 YEARS OUT"],
newscore2[newscore["LOCALE"] == 23]["MEDIAN 10-YR MONTHLY PAYMENTS"],
s=newscore2['UGDS']/15, c="orange", alpha=0.9)
Rural = plt.scatter(newscore2[newscore["LOCALE"] == 31]["MEDIAN EARNINGS, 10 YEARS OUT"],
newscore2[newscore["LOCALE"] == 31]["MEDIAN 10-YR MONTHLY PAYMENTS"],
s=newscore2['UGDS']/15, c="red", alpha=0.9)
plt.scatter(newscore2[newscore["LOCALE"] == 32]["MEDIAN EARNINGS, 10 YEARS OUT"],
newscore2[newscore["LOCALE"] == 32]["MEDIAN 10-YR MONTHLY PAYMENTS"],
s=newscore2['UGDS']/15, c="red", alpha=0.9)
plt.scatter(newscore2[newscore["LOCALE"] == 33]["MEDIAN EARNINGS, 10 YEARS OUT"],
newscore2[newscore["LOCALE"] == 33]["MEDIAN 10-YR MONTHLY PAYMENTS"],
s=newscore2['UGDS']/15, c="red", alpha=0.9)
plt.scatter(newscore2[newscore["LOCALE"] == 41]["MEDIAN EARNINGS, 10 YEARS OUT"],
newscore2[newscore["LOCALE"] == 41]["MEDIAN 10-YR MONTHLY PAYMENTS"],
s=newscore2['UGDS']/15, c="red", alpha=0.9)
plt.scatter(newscore2[newscore["LOCALE"] == 42]["MEDIAN EARNINGS, 10 YEARS OUT"],
newscore2[newscore["LOCALE"] == 42]["MEDIAN 10-YR MONTHLY PAYMENTS"],
s=newscore2['UGDS']/15, c="red", alpha=0.9)
plt.scatter(newscore2[newscore["LOCALE"] == 43]["MEDIAN EARNINGS, 10 YEARS OUT"],
newscore2[newscore["LOCALE"] == 43]["MEDIAN 10-YR MONTHLY PAYMENTS"],
s=newscore2['UGDS']/15, c="red", alpha=0.9)
plt.legend((Urban, Suburban, Rural),
('Urban', 'Suburban', 'Rural'),
scatterpoints=1,
markerscale=.1,
loc='upper left',
ncol=1,
fontsize=14)
plt.xlabel('Median earnings', size=14)
plt.ylabel('Median 10-yr monthly payments - debt', size=14)
plt.title('Debt to Earnings by Locale (Urban, Suburban, Rural)', fontsize=16)
plt.show()
plt.style.use('fivethirtyeight')
fig, ax = plt.subplots()
topmedian3['MEDIAN EARNINGS, 10 YEARS OUT'].plot(kind='barh', ax=ax, color="lightgreen")
fig.set_size_inches(14, 10)
ax.set_xlabel('Median Earnings')
ax.set_title('Top 20 Universities with Highest Median Earnings (10 years post graduation)')
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
plt.style.use('fivethirtyeight')
fig, ax = plt.subplots()
topmedianalt3['MEDIAN EARNINGS, 10 YEARS OUT'].plot(kind='barh', ax=ax, color="orange")
fig.set_size_inches(14, 10)
ax.set_xlabel('Median Earnings')
ax.set_title('20 Universities with the Lowest Median Earnings (10 years post graduation)')
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
inst_summary = pd.pivot_table(newscore2, index=['CONTROL'], values=['MEDIAN EARNINGS, 10 YEARS OUT', 'MEDIAN DEBT'],
aggfunc=np.sum)
inst_summary = inst_summary.reset_index()
plt.style.use('fivethirtyeight')
fig, ax = plt.subplots()
fig.set_size_inches(12, 6)
n_groups = 3
index = np.arange(n_groups)
bar_width = 0.5
opacity = 1.0
ax.bar(inst_summary['CONTROL'], inst_summary['MEDIAN EARNINGS, 10 YEARS OUT']/inst_summary['MEDIAN DEBT'],
bar_width, alpha=opacity, align='center', color='lightblue')
ax.set_title('Median Earnings to Debt Ratio by Institution Type', fontsize=16)
ax.set_xlabel('Institution Type', fontsize=14)
ax.set_ylabel('Median Earnings to Debt Ratio', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=12)
# Tufte-like axes
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(index + bar_width*2)
ax.set_xticklabels(('Public', 'Private Non Profit', 'Private For Profit'))
ax.xaxis.grid(False)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Debt to Earnings by University
Step2: Debt to Earnings by Locale
Step3: Top 20 Universities by Median Earnings
Step4: 20 Universities with the Lowest Median Earnings
|
6,937
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy.integrate as integrate
from scipy.stats import norm
import matplotlib.pyplot as plt
def f_YgivenX(y,x,sigman):
return np.exp(-((y-x)**2)/(2*sigman**2))/np.sqrt(2*np.pi)/sigman
def f_Y(y,sigman):
return 0.5*(f_YgivenX(y,+1,sigman)+f_YgivenX(y,-1,sigman))
def integrand(y, sigman):
value = f_Y(y,sigman)
if value < 1e-20:
return_value = 0
else:
return_value = value * np.log2(value)
return return_value
def C_BIAWGN(sigman):
# numerical integration of the h(Y) part
integral = integrate.quad(integrand, -np.inf, np.inf, args=(sigman))[0]
# take into account h(Y|X)
return -integral - 0.5*np.log2(2*np.pi*np.exp(1)*sigman**2)
def V_integrand(z, C, sigman):
sigmanq = np.square(sigman)
m1 = np.square(1 - np.log2(1 + np.exp(-2/sigmanq + 2*np.sqrt(2)*z/sigman)) - C)
m2 = np.exp(-np.square(z))
if np.isinf(m1) or np.isinf(m2):
value = 0
else:
value = m1*m2
return value
# compute the dispersion using numerical integration
def V_BIAWGN(C, sigman):
integral = integrate.quad(V_integrand, -np.inf, np.inf, args=(C,sigman))[0]
return integral/np.sqrt(np.pi)
# Alternative implementation using Gauss-Hermite Quadrature
x_GH, w_GH = np.polynomial.hermite.hermgauss(40)
def V_BIAWGN_GH(C, sigman):
integral = sum(w_GH * [np.square(1-np.log2(1 + np.exp(-2/np.square(sigman) + 2*np.sqrt(2)*xi/sigman)) - C) for xi in x_GH])
return integral / np.sqrt(np.pi)
def get_Pe_finite_length(n, r, sigman):
# compute capacity
C = C_BIAWGN(sigman)
# compute dispersion
V = V_BIAWGN_GH(C, sigman)
# Q-function is "norm.sf" (survival function)
return norm.sf((n*(C-r) + 0.5*np.log2(n))/np.sqrt(n*V))
esno_dB_range = np.linspace(-4,3,100)
esno_lin_range = [10**(esno_db/10) for esno_db in esno_dB_range]
# compute sigma_n
sigman_range = [np.sqrt(1/2/esno_lin) for esno_lin in esno_lin_range]
capacity_BIAWGN = [C_BIAWGN(sigman) for sigman in sigman_range]
Pe_BIAWGN_r12_n100 = [get_Pe_finite_length(100, 0.5, sigman) for sigman in sigman_range]
Pe_BIAWGN_r12_n500 = [get_Pe_finite_length(500, 0.5, sigman) for sigman in sigman_range]
Pe_BIAWGN_r12_n1000 = [get_Pe_finite_length(1000, 0.5, sigman) for sigman in sigman_range]
Pe_BIAWGN_r12_n5000 = [get_Pe_finite_length(5000, 0.5, sigman) for sigman in sigman_range]
fig = plt.figure(1,figsize=(10,7))
plt.semilogy(esno_dB_range, Pe_BIAWGN_r12_n100)
plt.semilogy(esno_dB_range, Pe_BIAWGN_r12_n500)
plt.semilogy(esno_dB_range, Pe_BIAWGN_r12_n1000)
plt.semilogy(esno_dB_range, Pe_BIAWGN_r12_n5000)
plt.axvspan(-4, -2.83, alpha=0.5, color='gray')
plt.axvline(x=-2.83, color='k')
plt.ylim((1e-8,1))
plt.xlim((-4,2))
plt.xlabel('$E_s/N_0$ (dB)', fontsize=16)
plt.ylabel('$P_e$', fontsize=16)
plt.legend(['$n = 100$', '$n=500$','$n=1000$', '$n=5000$'], fontsize=16)
plt.text(-3.2, 1e-4, 'Capacity limit', {'color': 'k', 'fontsize': 20, 'rotation': -90})
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.grid(True)
#plt.savefig('BI_AWGN_Pe_R12.pdf',bbox_inches='tight')
#specify esno
esno = -2.83
n_range = np.linspace(10,2000,100)
sigman = np.sqrt(0.5*10**(-esno/10))
C = C_BIAWGN(sigman)
V = V_BIAWGN_GH(C, sigman)
r_Pe_1em3 = [C - np.sqrt(V/n)*norm.isf(1e-3) + 0.5*np.log2(n)/n for n in n_range]
r_Pe_1em6 = [C - np.sqrt(V/n)*norm.isf(1e-6) + 0.5*np.log2(n)/n for n in n_range]
r_Pe_1em9 = [C - np.sqrt(V/n)*norm.isf(1e-9) + 0.5*np.log2(n)/n for n in n_range]
fig = plt.figure(1,figsize=(10,7))
plt.plot(n_range, r_Pe_1em3)
plt.plot(n_range, r_Pe_1em6)
plt.plot(n_range, r_Pe_1em9)
plt.axhline(y=C, color='k')
plt.ylim((0,0.55))
plt.xlim((0,2000))
plt.xlabel('Length $n$', fontsize=16)
plt.ylabel('Rate $r$ (bit/channel use)', fontsize=16)
plt.legend(['$P_e = 10^{-3}$', '$P_e = 10^{-6}$','$P_e = 10^{-9}$', '$C$'], fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.grid(True)
#plt.savefig('BI_AWGN_r_esno_m283.pdf',bbox_inches='tight')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Conditional pdf $f_{Y|X}(y|x)$ for a channel with noise variance (per dimension) $\sigma_n^2$. This is merely the Gaussian pdf with mean $x$ and variance $\sigma_n^2$
Step2: Output pdf $f_Y(y) = \frac12[f_{Y|X}(y|X=+1)+f_{Y|X}(y|X=-1)]$
Step3: This is the function we like to integrate, $f_Y(y)\cdot\log_2(f_Y(y))$. We need to take special care of the case when the input is 0, as we defined $0\cdot\log_2(0)=0$, which is usually treated as "nan"
Step4: Compute the capacity using numerical integration. We have
Step5: Compute the dispersion of the BI-AWGN channel, which is given by (see, e.g., [1]). This is a
Step6: The finite-length capacity for the BI-AWGN channel is given by
Step7: Show finite length capacity estimates for some codes of different lengths $n$
Step8: Different representation, for a given channel (and here, we pick $E_s/N_0 = -2.83$ dB), show the rate the code should at most have to allow for decoding with an error rate $P_e$ (here we specify different $P_e$) if a certain length $n$ is available.
|
6,938
|
<ASSISTANT_TASK:>
Python Code:
def sum_p(X):
y = 0
for x_i in range(int(X)):
y += x_i
return y
from numba import jit
@jit
def sum_j(X):
y = 0
for x_i in range(int(X)):
y += x_i
return y
import os
import time
import pandas as pd
import matplotlib
%matplotlib inline
# Different platforms require different functions to properly measure current timestamp:
if os.name == 'nt':
now = time.clock
else:
now = time.time
def run_benchmarks(functions, call_parameters, num_times,
logy=False, logx=False):
# Executes one function several times and measure performances:
def _apply_function(function, num_times):
for j in range(num_times):
t_0 = now()
y = function(*call_parameters)
duration = (now() - t_0)
yield float(duration)
def _name(function):
return '${' + function.__name__ + '(x)}$'
# Execute all functions the requested number of times and collects durations:
def _apply_functions(functions, num_times):
for function in functions:
yield pd.Series(_apply_function(function, num_times),
name=_name(function))
# Collects and plots the results:
df = pd.concat(_apply_functions(functions, num_times),
axis=1)
ax = df.plot(figsize=(10,5),
logy=logy,
logx=logx,
title='$T[f(x)]$ in seconds',
style='o-')
run_benchmarks(functions=[sum_p, sum_j],
call_parameters=(10000000,),
num_times=5,
logy=True) # Logarithmic scale
run_benchmarks(functions=[sum_j],
call_parameters=(1000000000000000.,),
num_times=5,
logy=True) # Logarithmic scale
from numba import jit
@jit
def sum_j(x):
y = 0.
x_i = 0.
while x_i < x:
y += x_i
x_i += 1.
return y
%load_ext Cython
%%cython
def sum_c(double x):
cdef double y = 0.
cdef double x_i = 0.
while x_i < x:
y += x_i
x_i += 1.
return y
run_benchmarks(functions=[sum_j, sum_c],
call_parameters=(1000000000.,),
num_times=10)
%%cython --annotate
def sum_c(double x):
cdef double y = 0.
cdef double x_i = 0.
while x_i < x:
y += x_i
x_i += 1.
return y
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then we define $sum_j(x)$ that is identical but just with decorator @jit in the definition.
Step2: Lets benchmark them!
Step3: Benchmark results
Step4: Numba caching
Step5: Numba JIT functionality works in the following way
Step6: About Cython
Step7: The numba jitted function is comparable with the cythonized one, lets check what was the C code cython used, just to give us an idea of the efficiency of the code generated.
|
6,939
|
<ASSISTANT_TASK:>
Python Code:
data_in_shape = (3, 5)
L = ZeroPadding1D(padding=1)
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(240)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.ZeroPadding1D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4)
L = ZeroPadding1D(padding=3)
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(241)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.ZeroPadding1D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4)
L = ZeroPadding1D(padding=(3,2))
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(242)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.ZeroPadding1D.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
import os
filename = '../../../test/data/layers/convolutional/ZeroPadding1D.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
json.dump(DATA, f)
print(json.dumps(DATA))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: [convolutional.ZeroPadding1D.1] padding 3 on 4x4 input
Step2: [convolutional.ZeroPadding1D.2] padding (3,2) on 4x4 input
Step3: export for Keras.js tests
|
6,940
|
<ASSISTANT_TASK:>
Python Code:
#Begin spark session
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
#Create pysplice context. Allows you to create a Spark dataframe using our Native Spark DataSource
from splicemachine.spark import PySpliceContext
splice = PySpliceContext(spark)
#Initialize our Feature Store API
from splicemachine.features import FeatureStore
from splicemachine.features.constants import FeatureType
fs = FeatureStore(splice)
#Initialize MLFlow
from splicemachine.mlflow_support import *
mlflow.register_feature_store(fs)
mlflow.register_splice_context(splice)
%%sql
SELECT ltv.CUSTOMERID,
((w.WEEK_END_DATE - ltv.CUSTOMER_START_DATE)/ 7) CUSTOMERWEEK,
CAST(w.WEEK_END_DATE as TIMESTAMP) CUSTOMER_TS,
ltv.CUSTOMER_LIFETIME_VALUE as CUSTOMER_LTV
FROM retail_rfm.weeks w --splice-properties useSpark=True
INNER JOIN
twimlcon_fs.customer_lifetime ltv
ON w.WEEK_END_DATE >= ltv.CUSTOMER_START_DATE AND w.WEEK_END_DATE <= ltv.CUSTOMER_START_DATE + 28 --only first 4 weeks
ORDER BY 1,2
{limit 8}
;
sql =
SELECT ltv.CUSTOMERID,
((w.WEEK_END_DATE - ltv.CUSTOMER_START_DATE)/ 7) CUSTOMERWEEK,
CAST(w.WEEK_END_DATE as TIMESTAMP) CUSTOMER_TS,
ltv.CUSTOMER_LIFETIME_VALUE as CUSTOMER_LTV
FROM retail_rfm.weeks w --splice-properties useSpark=True
INNER JOIN
twimlcon_fs.customer_lifetime ltv
ON w.WEEK_END_DATE > ltv.CUSTOMER_START_DATE AND w.WEEK_END_DATE <= ltv.CUSTOMER_START_DATE + 28 --only first 4 weeks
pks = ['CUSTOMERID','CUSTOMERWEEK'] # Each unique training row is identified by the customer and their week of spending activity
join_keys = ['CUSTOMERID'] # This is the primary key of the Feature Sets that we want to join to
fs.create_training_view(
'twimlcon_customer_lifetime_value',
sql=sql,
primary_keys=pks,
join_keys=join_keys,
ts_col = 'CUSTOMER_TS', # How we join each unique row with our eventual Features
label_col='CUSTOMER_LTV', # The thing we want to predict
desc = 'The current (as of queried) lifetime value of each customer per week of being a customer'
)
#Spark Dataframe
all_features = fs.get_training_set_from_view('twimlcon_customer_lifetime_value')
all_features.limit(8).toPandas()
#SQL used to generate the Dataframe
sql = fs.get_training_set_from_view('twimlcon_customer_lifetime_value',return_sql=True)
print(sql)
import re
# get training set as a SQL statement
feats = fs.get_training_view_features('twimlcon_customer_lifetime_value')
# Grab only up to 4 weeks of RFM values
desired_features = ['CUSTOMER_LIFETIME_DAYS'] + [f.name for f in feats if re.search('_[0-4]W',f.name)]
all_features = fs.get_training_set_from_view('twimlcon_customer_lifetime_value', features = desired_features).dropna()
top_features, feature_importances = fs.run_feature_elimination(
all_features,
features=desired_features,
label = 'CUSTOMER_LTV',
n = 10,
verbose=2,
step=30,
model_type='regression',
log_mlflow=True,
mlflow_run_name='Feature_Elimination_LTV',
return_importances=True
)
model_training_df = fs.get_training_set_from_view('twimlcon_customer_lifetime_value', features = top_features).dropna()
from splicemachine.notebook import get_mlflow_ui
get_mlflow_ui()
###############
# SparkML Model
###############
from pyspark.ml.regression import LinearRegression, RandomForestRegressor
from pyspark.ml.feature import VectorAssembler,StandardScaler
from pyspark.ml import Pipeline
from pyspark.ml.evaluation import RegressionEvaluator
mlflow.set_experiment('Predict Lifetime Value from Initial Customer Activity')
run_tags={'project': 'TWIMLcon Demo',
'team': 'INSERT YOUR NAME HERE'
}
features_list = [f.name for f in top_features]
features_str = ','.join(features_list)
va = VectorAssembler(inputCols=features_list, outputCol='features_raw')
scaler = StandardScaler(inputCol="features_raw", outputCol="features")
with mlflow.start_run(run_name = f"Regression LTV", tags = run_tags):
lr = LinearRegression(featuresCol = 'features', labelCol = 'CUSTOMER_LTV', maxIter=10, regParam=0.3, elasticNetParam=0.8)
#lr = RandomForestRegressor(featuresCol = 'features', labelCol = 'CUSTOMER_LTV')
pipeline = Pipeline( stages=[va, scaler, lr])
# log everything
mlflow.log_feature_transformations(pipeline)
mlflow.log_pipeline_stages(pipeline)
#train
train,test = model_training_df.randomSplit([0.80,0.20])
model = pipeline.fit(train)
predictions = model.transform(test)
lr_model = model.stages[-1]
print("Coefficients: " + str(lr_model.coefficients))
print("Intercept: " + str(lr_model.intercept))
# log metric
pred_evaluator = RegressionEvaluator(predictionCol="prediction", labelCol="CUSTOMER_LTV",metricName="r2")
r2 = pred_evaluator.evaluate(predictions)
print("R Squared (R2) on test data = %g" % r2)
mlflow.log_metric('r2',r2)
mlflow.log_model(model)
run_id = mlflow.current_run_id()
from splicemachine.notebook import get_mlflow_ui
get_mlflow_ui()
%store features_list
%store features_str
spark.stop()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Write any SQL to get your label. The label doesn't have to be apart of the Feature Store
Step3: Create a Training View
Step4: Easily extract all features
Step5: Automatic Feature Selection
Step6: Train a Machine Learning Model
Step7: Store most important features for use in the next jupyter notebook
|
6,941
|
<ASSISTANT_TASK:>
Python Code:
import os
import ml_metadata
import tensorflow_data_validation as tfdv
import tensorflow_model_analysis as tfma
from ml_metadata.metadata_store import metadata_store
from ml_metadata.proto import metadata_store_pb2
from tfx.orchestration import metadata
from tfx.types import standard_artifacts
!python -c "import tfx; print('TFX version: {}'.format(tfx.__version__))"
!python -c "import kfp; print('KFP version: {}'.format(kfp.__version__))"
import yaml
# Set `PATH` to include the directory containing TFX CLI.
PATH=%env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
%cd pipeline
!gsutil ls
#TODO: Set your environment resource settings here for GCP_REGION, ARTIFACT_STORE_URI, ENDPOINT, and CUSTOM_SERVICE_ACCOUNT.
GCP_REGION = 'us-central1'
ARTIFACT_STORE_URI = 'gs://dougkelly-sandbox-kubeflowpipelines-default' #Change
ENDPOINT = '60ff837483ecde05-dot-us-central2.pipelines.googleusercontent.com' #Change
CUSTOM_SERVICE_ACCOUNT = 'tfx-tuner-caip-service-account@dougkelly-sandbox.iam.gserviceaccount.com' #Change
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
# Set your resource settings as environment variables. These override the default values in pipeline/config.py.
%env GCP_REGION={GCP_REGION}
%env ARTIFACT_STORE_URI={ARTIFACT_STORE_URI}
%env CUSTOM_SERVICE_ACCOUNT={CUSTOM_SERVICE_ACCOUNT}
%env PROJECT_ID={PROJECT_ID}
PIPELINE_NAME = 'tfx_covertype_lab_04'
MODEL_NAME = 'tfx_covertype_classifier'
DATA_ROOT_URI = 'gs://workshop-datasets/covertype/small'
CUSTOM_TFX_IMAGE = 'gcr.io/{}/{}'.format(PROJECT_ID, PIPELINE_NAME)
RUNTIME_VERSION = '2.3'
PYTHON_VERSION = '3.7'
USE_KFP_SA=False
ENABLE_TUNING=False
%env PIPELINE_NAME={PIPELINE_NAME}
%env MODEL_NAME={MODEL_NAME}
%env DATA_ROOT_URI={DATA_ROOT_URI}
%env KUBEFLOW_TFX_IMAGE={CUSTOM_TFX_IMAGE}
%env RUNTIME_VERSION={RUNTIME_VERSION}
%env PYTHON_VERIONS={PYTHON_VERSION}
%env USE_KFP_SA={USE_KFP_SA}
%env ENABLE_TUNING={ENABLE_TUNING}
!tfx pipeline compile --engine kubeflow --pipeline_path runner.py
!tfx pipeline create \
--pipeline_path=runner.py \
--endpoint={ENDPOINT} \
--build_target_image={CUSTOM_TFX_IMAGE}
!tfx pipeline update --pipeline_path runner.py --endpoint {ENDPOINT}
!tfx run create --pipeline_name={PIPELINE_NAME} --endpoint={ENDPOINT}
grpc_host = 'localhost'
grpc_port = 7000
connection_config = metadata_store_pb2.MetadataStoreClientConfig()
connection_config.host = grpc_host
connection_config.port = grpc_port
store = metadata_store.MetadataStore(connection_config)
for artifact_type in store.get_artifact_types():
print(artifact_type.name)
for execution_type in store.get_execution_types():
print(execution_type.name)
for context_type in store.get_context_types():
print(context_type.name)
with metadata.Metadata(connection_config) as store:
schema_artifacts = store.get_artifacts_by_type(standard_artifacts.Schema.TYPE_NAME)
stats_artifacts = store.get_artifacts_by_type(standard_artifacts.ExampleStatistics.TYPE_NAME)
anomalies_artifacts = store.get_artifacts_by_type(standard_artifacts.ExampleAnomalies.TYPE_NAME)
schema_file = os.path.join(schema_artifacts[-1].uri, 'schema.pbtxt')
print("Generated schame file:{}".format(schema_file))
stats_path = stats_artifacts[-1].uri
train_stats_file = os.path.join(stats_path, 'train', 'stats_tfrecord')
eval_stats_file = os.path.join(stats_path, 'eval', 'stats_tfrecord')
print("Train stats file:{}, Eval stats file:{}".format(
train_stats_file, eval_stats_file))
anomalies_path = anomalies_artifacts[-1].uri
train_anomalies_file = os.path.join(anomalies_path, 'train', 'anomalies.pbtxt')
eval_anomalies_file = os.path.join(anomalies_path, 'eval', 'anomalies.pbtxt')
print("Train anomalies file:{}, Eval anomalies file:{}".format(
train_anomalies_file, eval_anomalies_file))
schema = tfdv.load_schema_text(schema_file)
tfdv.display_schema(schema=schema)
train_stats = tfdv.load_statistics(train_stats_file)
eval_stats = tfdv.load_statistics(eval_stats_file)
tfdv.visualize_statistics(lhs_statistics=eval_stats, rhs_statistics=train_stats,
lhs_name='EVAL_DATASET', rhs_name='TRAIN_DATASET')
train_anomalies = tfdv.load_anomalies_text(train_anomalies_file)
tfdv.display_anomalies(train_anomalies)
eval_anomalies = tfdv.load_anomalies_text(eval_anomalies_file)
tfdv.display_anomalies(eval_anomalies)
with metadata.Metadata(connection_config) as store:
model_eval_artifacts = store.get_artifacts_by_type(standard_artifacts.ModelEvaluation.TYPE_NAME)
hyperparam_artifacts = store.get_artifacts_by_type(standard_artifacts.HyperParameters.TYPE_NAME)
model_eval_path = model_eval_artifacts[-1].uri
print("Generated model evaluation result:{}".format(model_eval_path))
best_hparams_path = os.path.join(hyperparam_artifacts[-1].uri, 'best_hyperparameters.txt')
print("Generated model best hyperparameters result:{}".format(best_hparams_path))
# Latest pipeline run Tuner search space.
json.loads(file_io.read_file_to_string(best_hparams_path))['space']
# Latest pipeline run Tuner searched best_hyperparameters artifacts.
json.loads(file_io.read_file_to_string(best_hparams_path))['values']
eval_result = tfma.load_eval_result(model_eval_path)
tfma.view.render_slicing_metrics(
eval_result, slicing_column='Wilderness_Area')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Option 1
Step2: The pipeline source can be found in the pipeline folder. Switch to the pipeline folder and compile the pipeline.
Step3: 2.1 Create AI Platform Pipelines cluster
Step4: CUSTOM_SERVICE_ACCOUNT - In the gcp console Click on the Navigation Menu. Navigate to IAM & Admin, then to Service Accounts and use the service account starting with prifix - 'tfx-tuner-caip-service-account'. This enables CloudTuner and the Google Cloud AI Platform extensions Tuner component to work together and allows for distributed and parallel tuning backed by AI Platform Vizier's hyperparameter search algorithm. Please see the lab setup README for setup instructions.
Step5: 2.3 Compile pipeline
Step6: 2.4 Deploy pipeline to AI Platform
Step7: (optional) If you make local changes to the pipeline, you can update the deployed package on AI Platform with the following command
Step8: 2.5 Create and monitor pipeline run
Step9: 2.6 Configure Kubernetes port forwarding
Step10: Connect to ML Metadata service
Step11: Important
Step12: Display the registered execution types.
Step13: List the registered context types.
Step14: Visualizing TFX artifacts
Step15: Visualize schema
Step16: Visualize statistics
Step17: Visualize anomalies
Step18: Retrieve model artifacts
Step19: Return best hyperparameters
Step20: Visualize model evaluations
|
6,942
|
<ASSISTANT_TASK:>
Python Code:
# Work in a temporary directory
import tempfile
import os
os.chdir(tempfile.mkdtemp())
# Since this is running from an IPython notebook,
# we prefix all our commands with "!"
# When running on the command line, omit the leading "!"
! msmb -h
! msmb FsPeptide --data_home ./
! tree
# Remember '\' is the line-continuation marker
# You can enter this command on one line
! msmb DihedralFeaturizer \
--out featurizer.pkl \
--transformed diheds \
--top fs_peptide/fs-peptide.pdb \
--trjs "fs_peptide/*.xtc" \
--stride 10
! msmb RobustScaler \
-i diheds \
--transformed scaled_diheds.h5
! msmb tICA -i scaled_diheds.h5 \
--out tica_model.pkl \
--transformed tica_trajs.h5 \
--n_components 4 \
--lag_time 2
from msmbuilder.dataset import dataset
ds = dataset('tica_trajs.h5')
%matplotlib inline
import msmexplorer as msme
import numpy as np
txx = np.concatenate(ds)
_ = msme.plot_histogram(txx)
! msmb MiniBatchKMeans -i tica_trajs.h5 \
--transformed labeled_trajs.h5 \
--out clusterer.pkl \
--n_clusters 100 \
--random_state 42
! msmb MarkovStateModel -i labeled_trajs.h5 \
--out msm.pkl \
--lag_time 2
from msmbuilder.utils import load
msm = load('msm.pkl')
clusterer = load('clusterer.pkl')
assignments = clusterer.partial_transform(txx)
assignments = msm.partial_transform(assignments)
from matplotlib import pyplot as plt
msme.plot_free_energy(txx, obs=(0, 1), n_samples=10000,
pi=msm.populations_[assignments],
xlabel='tIC 1', ylabel='tIC 2')
plt.scatter(clusterer.cluster_centers_[msm.state_labels_, 0],
clusterer.cluster_centers_[msm.state_labels_, 1],
s=1e4 * msm.populations_, # size by population
c=msm.left_eigenvectors_[:, 1], # color by eigenvector
cmap="coolwarm",
zorder=3
)
plt.colorbar(label='First dynamical eigenvector')
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get example data
Step2: Featurization
Step3: Preprocessing
Step4: Intermediate kinetic model
Step5: tICA Histogram
Step6: Clustering
Step7: MSM
Step8: Plot Free Energy Landscape
|
6,943
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
def pas_de_voyelle(mot):
s = ""
for c in mot :
if c not in "aeiouy" :
s += c
return s
pas_de_voyelle("bonjour"), pas_de_voyelle("au revoir")
mat = [[0,1,0],[0,0,1]]
mat_dict = { }
for i,line in enumerate(mat) :
for j,c in enumerate(line) :
if c != 0 :
mat_dict[i,j] = c
mat_dict
sum ( 1/i for i in range(1,11) )
from math import log
s = 0
N = 100
while N > 1 :
for i in range(1, N):
s += log(i)
N //= 2
print(s)
def calcul(N):
s = 0
c = 0
while N > 1 :
for i in range(1, N):
s += log(i)
c += 1
N //= 2
return c
for i in range(10000,100000, 10000) :
print( i, calcul(i), i * 2 )
def strange(mot):
s = ""
for i in range(len(mot)//2) :
s += mot[i] + mot[-i-1]
if len(mot)%2 == 1 :
s += mot[len(mot)//2]
return s
strange("abcdef"), strange("kayak")
dictionnaire_depart = { "cle1":"valeur1", "cle2":"valeur2" }
dictionnaire_retourne = { }
for k,v in dictionnaire_depart.items():
dictionnaire_retourne[v] = k
dictionnaire_retourne
dictionnaire_depart = { "cle1":"valeur1", "cle2":"valeur2" }
print ( dictionnaire_depart.items() )
print ( list ( dictionnaire_depart.items() ) )
dictionnaire_depart.items() [0]
def ensemble(a,b):
res = [ ]
while a < b :
res.append ( a )
a += 1
return res
def iterateur(a,b):
while a < b :
yield a
a += 1
print( iterateur(0,10) )
print( ensemble(0,10) )
iterateur(0,10) [0]
for x in iterateur(0,10):
print(x)
from math import exp
1/1000 * sum ( exp ( i / 1000 ) for i in range(1,1001) )
from math import log
s = 0
ii = 1
N = 7
for i in range(1,N):
ii *= 2
for k in range(1,ii):
s += log(k)
print(s)
from math import log
def calcul(N):
s = 0
ii = 1
c = 0
for i in range(1,N):
ii *= 2
for k in range(1,ii):
s += log(k)
c += 1
return c
for N in range(10,20):
print(calcul(N), 2**N)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Enoncé 1
Step2: Cette réponse n'est qu'une réponse parmi d'autres. Certains utilisaient la méthode replace, d'autres un test c == "a" or c == "e" ....
Step3: Pour cette question, le code écrit fonction doit fonctionner pour n'importe quelle matrice.
Step4: Q4
Step5: La première boucle s'exécute pour les valeurs $N$, $N/2$, $N/4$, ... jusqu'à ce que $N \leqslant 1$. La boucle imbriquée fait la somme des $log$ de 1 à $N$. Le nombre des opérations est en $O(N + N/2 + N/4 + ...)$, soit quelque chose comme $N \sum_{i=1}^{\ln_2 N} \frac{1}{2^i} \leqslant N \sum_{i=1}^{\infty} \frac{1}{2^i} \leqslant 2N$ (c'est une somme géométrique). On vérifie avec le code suivant qui compte le nombre de fois où on ajoute un logarithme.
Step6: Enoncé 2
Step7: Q2
Step8: La méthode items retourne un itérateur et non une liste. Un itéreur n'est pas un ensemble mais une façon de parcourir tous les éléments d'un ensemble.
Step9: Le python est un langage paresseux car très lent. Il faut lui demander de façon explicite de construire un ensemble ou de copier un ensemble. Par défaut, il ne copie jamais un dictionnaire ou une liste et il préfère retourner un itérateur plutôt qu'une copie d'un ensemble. La plupart du temps, on ne s'en aperçoit pas à moins de vouloir accéder à un élément précis de l'ensemble
Step10: La fonction ensemble suivante retourne une liste d'éléments, la fonction iterateur retourne une façon de parcourir un ensemble. On appelle ce type ce fonction un générateur.
Step11: On ne peut accéder aux éléments d'un générateur car cela n'a pas de sens
Step12: Mais on peut parcourir les éléments générés
Step13: Q3
Step14: Q4
Step15: A chaque itération $i$, on calcule $2^i$ logarithmes. On fait $N$ itérations soit $1 + 2 + 4 + ... + 2^N$ calculs, c'est-à-dire environ $O(1 + 2^1 + 2^2 + 2^3 + ... + 2^N) = O(2^{N+1}) = O(2^N)$ (c'est une somme géométrique).
|
6,944
|
<ASSISTANT_TASK:>
Python Code:
import os
import matplotlib.pyplot as plt
import torch
import pyro
import pyro.contrib.gp as gp
import pyro.distributions as dist
smoke_test = ('CI' in os.environ) # ignore; used to check code integrity in the Pyro repo
assert pyro.__version__.startswith('1.7.0')
pyro.set_rng_seed(0)
# note that this helper function does three different things:
# (i) plots the observed data;
# (ii) plots the predictions from the learned GP after conditioning on data;
# (iii) plots samples from the GP prior (with no conditioning on observed data)
def plot(plot_observed_data=False, plot_predictions=False, n_prior_samples=0,
model=None, kernel=None, n_test=500):
plt.figure(figsize=(12, 6))
if plot_observed_data:
plt.plot(X.numpy(), y.numpy(), 'kx')
if plot_predictions:
Xtest = torch.linspace(-0.5, 5.5, n_test) # test inputs
# compute predictive mean and variance
with torch.no_grad():
if type(model) == gp.models.VariationalSparseGP:
mean, cov = model(Xtest, full_cov=True)
else:
mean, cov = model(Xtest, full_cov=True, noiseless=False)
sd = cov.diag().sqrt() # standard deviation at each input point x
plt.plot(Xtest.numpy(), mean.numpy(), 'r', lw=2) # plot the mean
plt.fill_between(Xtest.numpy(), # plot the two-sigma uncertainty about the mean
(mean - 2.0 * sd).numpy(),
(mean + 2.0 * sd).numpy(),
color='C0', alpha=0.3)
if n_prior_samples > 0: # plot samples from the GP prior
Xtest = torch.linspace(-0.5, 5.5, n_test) # test inputs
noise = (model.noise if type(model) != gp.models.VariationalSparseGP
else model.likelihood.variance)
cov = kernel.forward(Xtest) + noise.expand(n_test).diag()
samples = dist.MultivariateNormal(torch.zeros(n_test), covariance_matrix=cov)\
.sample(sample_shape=(n_prior_samples,))
plt.plot(Xtest.numpy(), samples.numpy().T, lw=2, alpha=0.4)
plt.xlim(-0.5, 5.5)
N = 20
X = dist.Uniform(0.0, 5.0).sample(sample_shape=(N,))
y = 0.5 * torch.sin(3*X) + dist.Normal(0.0, 0.2).sample(sample_shape=(N,))
plot(plot_observed_data=True) # let's plot the observed data
kernel = gp.kernels.RBF(input_dim=1, variance=torch.tensor(5.),
lengthscale=torch.tensor(10.))
gpr = gp.models.GPRegression(X, y, kernel, noise=torch.tensor(1.))
plot(model=gpr, kernel=kernel, n_prior_samples=2)
kernel2 = gp.kernels.RBF(input_dim=1, variance=torch.tensor(0.1),
lengthscale=torch.tensor(10.))
gpr2 = gp.models.GPRegression(X, y, kernel2, noise=torch.tensor(0.1))
plot(model=gpr2, kernel=kernel2, n_prior_samples=2)
optimizer = torch.optim.Adam(gpr.parameters(), lr=0.005)
loss_fn = pyro.infer.Trace_ELBO().differentiable_loss
losses = []
num_steps = 2500 if not smoke_test else 2
for i in range(num_steps):
optimizer.zero_grad()
loss = loss_fn(gpr.model, gpr.guide)
loss.backward()
optimizer.step()
losses.append(loss.item())
# let's plot the loss curve after 2500 steps of training
plt.plot(losses);
plot(model=gpr, plot_observed_data=True, plot_predictions=True)
gpr.kernel.variance.item()
gpr.kernel.lengthscale.item()
gpr.noise.item()
# Define the same model as before.
pyro.clear_param_store()
kernel = gp.kernels.RBF(input_dim=1, variance=torch.tensor(5.),
lengthscale=torch.tensor(10.))
gpr = gp.models.GPRegression(X, y, kernel, noise=torch.tensor(1.))
# note that our priors have support on the positive reals
gpr.kernel.lengthscale = pyro.nn.PyroSample(dist.LogNormal(0.0, 1.0))
gpr.kernel.variance = pyro.nn.PyroSample(dist.LogNormal(0.0, 1.0))
optimizer = torch.optim.Adam(gpr.parameters(), lr=0.005)
loss_fn = pyro.infer.Trace_ELBO().differentiable_loss
losses = []
num_steps = 2500 if not smoke_test else 2
for i in range(num_steps):
optimizer.zero_grad()
loss = loss_fn(gpr.model, gpr.guide)
loss.backward()
optimizer.step()
losses.append(loss.item())
plt.plot(losses);
plot(model=gpr, plot_observed_data=True, plot_predictions=True)
# tell gpr that we want to get samples from guides
gpr.set_mode('guide')
print('variance = {}'.format(gpr.kernel.variance))
print('lengthscale = {}'.format(gpr.kernel.lengthscale))
print('noise = {}'.format(gpr.noise))
N = 1000
X = dist.Uniform(0.0, 5.0).sample(sample_shape=(N,))
y = 0.5 * torch.sin(3*X) + dist.Normal(0.0, 0.2).sample(sample_shape=(N,))
plot(plot_observed_data=True)
# initialize the inducing inputs
Xu = torch.arange(20.) / 4.0
# initialize the kernel and model
pyro.clear_param_store()
kernel = gp.kernels.RBF(input_dim=1)
# we increase the jitter for better numerical stability
sgpr = gp.models.SparseGPRegression(X, y, kernel, Xu=Xu, jitter=1.0e-5)
# the way we setup inference is similar to above
optimizer = torch.optim.Adam(sgpr.parameters(), lr=0.005)
loss_fn = pyro.infer.Trace_ELBO().differentiable_loss
losses = []
num_steps = 2500 if not smoke_test else 2
for i in range(num_steps):
optimizer.zero_grad()
loss = loss_fn(sgpr.model, sgpr.guide)
loss.backward()
optimizer.step()
losses.append(loss.item())
plt.plot(losses);
# let's look at the inducing points we've learned
print("inducing points:\n{}".format(sgpr.Xu.data.numpy()))
# and plot the predictions from the sparse GP
plot(model=sgpr, plot_observed_data=True, plot_predictions=True)
# initialize the inducing inputs
Xu = torch.arange(10.) / 2.0
# initialize the kernel, likelihood, and model
pyro.clear_param_store()
kernel = gp.kernels.RBF(input_dim=1)
likelihood = gp.likelihoods.Gaussian()
# turn on "whiten" flag for more stable optimization
vsgp = gp.models.VariationalSparseGP(X, y, kernel, Xu=Xu, likelihood=likelihood, whiten=True)
# instead of defining our own training loop, we will
# use the built-in support provided by the GP module
num_steps = 1500 if not smoke_test else 2
losses = gp.util.train(vsgp, num_steps=num_steps)
plt.plot(losses);
plot(model=vsgp, plot_observed_data=True, plot_predictions=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Throughout the tutorial we'll want to visualize GPs. So we define a helper function for plotting
Step2: Data
Step3: Define model
Step4: Let's see what samples from this GP function prior look like. Note that this is before we've conditioned on the data. The shape these functions take—their smoothness, their vertical scale, etc.—is controlled by the GP kernel.
Step5: For example, if we make variance and noise smaller we will see function samples with smaller vertical amplitude
Step6: Inference
Step7: Let's see if we're learned anything reasonable
Step8: Here the thick red curve is the mean prediction and the blue band represents the 2-sigma uncertainty around the mean. It seems we learned reasonable kernel hyperparameters, as both the mean and uncertainty give a reasonable fit to the data. (Note that learning could have easily gone wrong if we e.g. chose too large of a learning rate or chose bad initital hyperparameters.)
Step9: The period of the sinusoid that generated the data is $T = 2\pi/3 \approx 2.09$ so learning a lengthscale that's approximiately equal to a quarter period makes sense.
Step10: Let's inspect the hyperparameters we've learned
Step11: Note that the MAP values are different from the MLE values due to the prior.
Step12: Using the sparse GP is very similar to using the basic GP used above. We just need to add an extra parameter $X_u$ (the inducing points).
Step13: We can see that the model learns a reasonable fit to the data. There are three different sparse approximations that are currently implemented in Pyro
|
6,945
|
<ASSISTANT_TASK:>
Python Code:
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style("white")
filename="burrito_current.csv"
df = pd.read_csv(filename)
N = df.shape[0]
m_best = ['Volume','Cost','Tortilla','Temp','Meat','Fillings','Meat:filling',
'Uniformity','Salsa','Synergy','Wrap','overall','Google','Yelp']
for m in m_best:
print m
print 'High,', df.Location[df[m].idxmax()], df[m][df[m].idxmax()]
print 'Low,', df.Location[df[m].idxmin()], df[m][df[m].idxmin()]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Find the best location for each dimension
|
6,946
|
<ASSISTANT_TASK:>
Python Code:
for i in ['a','b','c']:
try:
result = i**2
except TypeError:
print("Type error attempting to run on {i}".format(i=i))
else:
print result
x = 5
y = 0
try:
z = x/y
except ZeroDivisionError:
print("Cannot divide by zero")
finally:
print 'all done'
def ask():
while True:
try:
input = int(raw_input("Enter an integer: "))
except:
print 'Could not make conversion. Try again'
else:
print input**2
ask()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 2
Step2: Problem 3
|
6,947
|
<ASSISTANT_TASK:>
Python Code:
from yargy import Parser, rule, and_
from yargy.predicates import gram, is_capitalized, dictionary
GEO = rule(
and_(
gram('ADJF'), # так помечается прилагательное, остальные пометки описаны в
# http://pymorphy2.readthedocs.io/en/latest/user/grammemes.html
is_capitalized()
),
gram('ADJF').optional().repeatable(),
dictionary({
'федерация',
'республика'
})
)
parser = Parser(GEO)
text = '''
В Чеченской республике на день рождения ...
Донецкая народная республика провозгласила ...
Башня Федерация — одна из самых высоких ...
'''
for match in parser.findall(text):
print([_.value for _ in match.tokens])
from yargy import Parser
from yargy.predicates import gram
from yargy.pipelines import morph_pipeline
from yargy.interpretation import fact
from IPython.display import display
Person = fact(
'Person',
['position', 'name']
)
Name = fact(
'Name',
['first', 'last']
)
POSITION = morph_pipeline([
'премьер министр',
'президент'
])
NAME = rule(
gram('Name').interpretation(
Name.first.inflected()
),
gram('Surn').interpretation(
Name.last.inflected()
)
).interpretation(
Name
)
PERSON = rule(
POSITION.interpretation(
Person.position.inflected()
),
NAME.interpretation(
Person.name
)
).interpretation(
Person
)
parser = Parser(PERSON)
text = '''
12 марта по приказу президента Владимира Путина ...
'''
for match in parser.findall(text):
display(match.fact)
from yargy.tokenizer import MorphTokenizer
tokenizer = MorphTokenizer()
text = '''Ростов-на-Дону
Длительностью 18ч. 10мин.
Яндекс.Такси
π ≈ 3.1415
1 500 000$
http://vk.com
'''
for line in text.splitlines():
print([_.value for _ in tokenizer(line)])
tokenizer = MorphTokenizer()
list(tokenizer('марки стали'))
from yargy import and_, not_
from yargy.tokenizer import MorphTokenizer
from yargy.predicates import is_capitalized, eq
tokenizer = MorphTokenizer()
token = next(tokenizer('Стали'))
predicate = is_capitalized()
assert predicate(token) == True
predicate = and_(
is_capitalized(),
not_(eq('марки'))
)
assert predicate(token) == True
from pymorphy2.shapes import is_roman_number
from yargy.parser import Context
from yargy.tokenizer import Tokenizer
from yargy.predicates import custom
tokenizer = Tokenizer()
token = next(tokenizer('XL'))
predicate = custom(is_roman_number, types='LATIN')
predicate = predicate.activate(Context(tokenizer)) # проверяется, что tokenizer поддерживает тип 'LATIN'
assert predicate(token) == True
token = next(tokenizer('XS'))
assert predicate(token) == False
from yargy import or_, rule
from yargy.predicates import normalized
RULE = or_(
rule(normalized('dvd'), '-', normalized('диск')),
rule(normalized('видео'), normalized('файл'))
)
from yargy import Parser
from yargy.pipelines import morph_pipeline
RULE = morph_pipeline([
'dvd-диск',
'видео файл',
'видеофильм',
'газета',
'электронный дневник',
'эссе',
])
parser = Parser(RULE)
text = 'Видео файл на dvd-диске'
for match in parser.findall(text):
print([_.value for _ in match.tokens])
from yargy import rule, or_
KEY = or_(
rule('р', '.'),
rule('размер')
).named('KEY')
VALUE = or_(
rule('S'),
rule('M'),
rule('L'),
).named('VALUE')
SIZE = rule(
KEY,
VALUE
).named('SIZE')
SIZE.normalized.as_bnf
from yargy.predicates import in_
VALUE = rule(
in_('SML')
).named('VALUE')
SIZE = rule(
KEY,
VALUE
).named('SIZE')
SIZE.normalized.as_bnf
from yargy import forward
EXPR = forward()
EXPR.define(or_(
rule('a'),
rule('(', EXPR, '+', EXPR, ')')
).named('EXPR'))
EXPR.normalized.as_bnf
from yargy import not_
from yargy.predicates import eq
WORD = not_(eq('»'))
TEXT = forward()
TEXT.define(or_(
rule(WORD),
rule(WORD, TEXT)
))
TITLE = rule(
'«',
TEXT,
'»'
).named('TITLE')
TITLE.normalized.as_bnf
TITLE = rule(
'«',
not_(eq('»')).repeatable(),
'»'
).named('TITLE')
TITLE.normalized.as_bnf
parser = Parser(
or_(
PERSON,
TITLE
)
)
text = 'Президент Владимир Путин в фильме «Интервью с Путиным» ..'
for match in parser.findall(text):
print([_.value for _ in match.tokens])
match = parser.match('Президент Владимир Путин')
print([_.value for _ in match.tokens])
match = parser.match('Президент Владимир Путин 25 мая')
print(match)
from IPython.display import display
from yargy.predicates import (
lte,
gte,
dictionary
)
MONTHS = {
'январь',
'февраль',
'март',
'апрель',
'мая',
'июнь',
'июль',
'август',
'сентябрь',
'октябрь',
'ноябрь',
'декабрь'
}
MONTH_NAME = dictionary(MONTHS)
MONTH = and_(
gte(1),
lte(12)
)
DAY = and_(
gte(1),
lte(31)
)
YEAR = and_(
gte(1900),
lte(2100)
)
DATE = or_(
rule(DAY, MONTH_NAME, YEAR),
rule(YEAR, '-', MONTH, '-', DAY),
rule(YEAR, 'г', '.')
).named('DATE')
parser = Parser(DATE)
text = '''2015г.
18 июля 2016
2016-01-02
'''
for line in text.splitlines():
match = parser.match(line)
display(match.tree.as_dot)
from yargy.interpretation import fact
Date = fact(
'Date',
['year', 'month', 'day']
)
DATE = or_(
rule(
DAY.interpretation(
Date.day
),
MONTH_NAME.interpretation(
Date.month
),
YEAR.interpretation(
Date.year
)
),
rule(
YEAR.interpretation(
Date.year
),
'-',
MONTH.interpretation(
Date.month
),
'-',
DAY.interpretation(
Date.day
)
),
rule(
YEAR.interpretation(
Date.year
),
'г', '.'
)
).interpretation(
Date
).named('DATE')
parser = Parser(DATE)
for line in text.splitlines():
match = parser.match(line)
display(match.tree.as_dot)
for line in text.splitlines():
match = parser.match(line)
display(match.fact)
MONTHS = {
'январь': 1,
'февраль': 2,
'март': 3,
'апрель': 4,
'мая': 5,
'июнь': 6,
'июль': 7,
'август': 8,
'сентябрь': 9,
'октябрь': 10,
'ноябрь': 11,
'декабрь': 12
}
DATE = rule(
DAY.interpretation(
Date.day.custom(int)
),
MONTH_NAME.interpretation(
Date.month.normalized().custom(MONTHS.get)
),
YEAR.interpretation(
Date.year.custom(int)
)
).interpretation(
Date
)
parser = Parser(DATE)
match = parser.match('18 июня 2016')
match.fact
NAME = rule(
gram('Name').interpretation(
Name.first.inflected()
),
gram('Surn').interpretation(
Name.last.inflected()
)
).interpretation(
Name
)
parser = Parser(NAME)
for match in parser.findall('Илье Ивановым, Павлом Семенов'):
print([_.value for _ in match.tokens])
parser = Parser(NAME)
for match in parser.findall('Сашу Иванову, Саше Иванову'):
display(match.fact)
from yargy.relations import gnc_relation
gnc = gnc_relation()
NAME = rule(
gram('Name').interpretation(
Name.first.inflected()
).match(gnc),
gram('Surn').interpretation(
Name.last.inflected()
).match(gnc)
).interpretation(
Name
)
parser = Parser(NAME)
for match in parser.findall('Илье Ивановым, Павлом Семенов, Саша Быков'):
print([_.value for _ in match.tokens])
parser = Parser(NAME)
for match in parser.findall('Сашу Иванову, Саше Иванову'):
display(match.fact)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Настоящие сложные грамматики для топонимов собраны в репозитории <a href="https
Step2: Грамматики для имён собраны в репозитории Natasha
Step3: Для каждого токена Pymorph2 возвращает набор граммем. Например, "NOUN, sing, femn" — "существительное в единственном числе женского рода". Полный список в <a href="https
Step4: Токенизатор работает на правилах. В <a href="ref.ipynb#Токенизатор">справочнике</a> показано, как менять стандартные правила и добавлять новые.
Step5: <a href="ref.ipynb#predicates.custom">custom</a> создаёт предикат из произвольной функции. Например, предикат для римских цифр
Step6: Газеттир
Step7: удобно использовать morph_pipeline
Step8: Список газеттиров в <a href="ref.ipynb#Газеттир">справочнике</a>.
Step9: В Yargy терминал грамматики — предикат. Используем встроенный предикат in_, сократим запись VALUE
Step10: Как быть, когда правая часть правила ссылается на левую? Например
Step11: Рекурсивные правила описывают последовательности токенов произвольной длины. Грамматика для текста в кавычках
Step12: Для удобства в Yargy есть метод repeatable с ним запись короче. Библиотека автоматически добавит forward
Step13: Парсер
Step14: match — пытается разобрать весь текст целиком
Step15: Интерпретация
Step16: Интерпретация — процесс преобразования дерева разбора в объект с набором полей. Для даты, например, нужно получить структуры вида Date(year=2016, month=1, day=2). Пользователь размечает дерево на вершины-атрибуты и вершины-конструкторы методом interpretation
Step17: Из размеченного дерева библиотека собирает объект
Step18: Подробнее об интерпретации в <a href="#ref.ipynb#Интерпретация">справочнике</a>.
Step19: Подробнее в <a href="ref.ipynb#Нормализация">справочнике</a>.
Step20: У неё есть две проблемы. Она срабатывает на словосочетаниях, где имя и фамилия в разных падежах
Step21: Имя и фамилия приводятся к нормальной форме независимо, получается женщина "Иванов"
Step22: В Yargy связь между словами и словосочетаниями устанавливается методом match. Для согласования по числу в match передаём number_relation, для согласования по падежу, роду и числу — gnc_relation
|
6,948
|
<ASSISTANT_TASK:>
Python Code:
data_in_shape = (5, 5, 2)
conv = SeparableConv2D(4, (3,3), strides=(1,1),
padding='valid', data_format='channels_last',
depth_multiplier=1, activation='linear', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(160)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('depthwise_kernel shape:', weights[0].shape)
print('depthwise_kernel:', format_decimal(weights[0].ravel().tolist()))
print('pointwise_kernel shape:', weights[1].shape)
print('pointwise_kernel:', format_decimal(weights[1].ravel().tolist()))
print('b shape:', weights[2].shape)
print('b:', format_decimal(weights[2].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.SeparableConv2D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = SeparableConv2D(4, (3,3), strides=(1,1),
padding='valid', data_format='channels_last',
depth_multiplier=2, activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(161)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('depthwise_kernel shape:', weights[0].shape)
print('depthwise_kernel:', format_decimal(weights[0].ravel().tolist()))
print('pointwise_kernel shape:', weights[1].shape)
print('pointwise_kernel:', format_decimal(weights[1].ravel().tolist()))
print('b shape:', weights[2].shape)
print('b:', format_decimal(weights[2].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.SeparableConv2D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 4)
conv = SeparableConv2D(16, (3,3), strides=(1,1),
padding='valid', data_format='channels_last',
depth_multiplier=3, activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(162)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('depthwise_kernel shape:', weights[0].shape)
print('depthwise_kernel:', format_decimal(weights[0].ravel().tolist()))
print('pointwise_kernel shape:', weights[1].shape)
print('pointwise_kernel:', format_decimal(weights[1].ravel().tolist()))
print('b shape:', weights[2].shape)
print('b:', format_decimal(weights[2].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.SeparableConv2D.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = SeparableConv2D(4, (3,3), strides=(2,2),
padding='valid', data_format='channels_last',
depth_multiplier=1, activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(163)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('depthwise_kernel shape:', weights[0].shape)
print('depthwise_kernel:', format_decimal(weights[0].ravel().tolist()))
print('pointwise_kernel shape:', weights[1].shape)
print('pointwise_kernel:', format_decimal(weights[1].ravel().tolist()))
print('b shape:', weights[2].shape)
print('b:', format_decimal(weights[2].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.SeparableConv2D.3'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = SeparableConv2D(4, (3,3), strides=(1,1),
padding='same', data_format='channels_last',
depth_multiplier=1, activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(164)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('depthwise_kernel shape:', weights[0].shape)
print('depthwise_kernel:', format_decimal(weights[0].ravel().tolist()))
print('pointwise_kernel shape:', weights[1].shape)
print('pointwise_kernel:', format_decimal(weights[1].ravel().tolist()))
print('b shape:', weights[2].shape)
print('b:', format_decimal(weights[2].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.SeparableConv2D.4'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = SeparableConv2D(4, (3,3), strides=(1,1),
padding='same', data_format='channels_last',
depth_multiplier=2, activation='relu', use_bias=False)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(165)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('depthwise_kernel shape:', weights[0].shape)
print('depthwise_kernel:', format_decimal(weights[0].ravel().tolist()))
print('pointwise_kernel shape:', weights[1].shape)
print('pointwise_kernel:', format_decimal(weights[1].ravel().tolist()))
# print('b shape:', weights[2].shape)
# print('b:', format_decimal(weights[2].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.SeparableConv2D.5'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = SeparableConv2D(4, (3,3), strides=(2,2),
padding='same', data_format='channels_last',
depth_multiplier=2, activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(166)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('depthwise_kernel shape:', weights[0].shape)
print('depthwise_kernel:', format_decimal(weights[0].ravel().tolist()))
print('pointwise_kernel shape:', weights[1].shape)
print('pointwise_kernel:', format_decimal(weights[1].ravel().tolist()))
print('b shape:', weights[2].shape)
print('b:', format_decimal(weights[2].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.SeparableConv2D.6'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
import os
filename = '../../../test/data/layers/convolutional/SeparableConv2D.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
json.dump(DATA, f)
print(json.dumps(DATA))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: [convolutional.SeparableConv2D.1] 4 3x3 filters on 5x5x2 input, strides=(1,1), padding='valid', data_format='channels_last', depth_multiplier=2, activation='relu', use_bias=True
Step2: [convolutional.SeparableConv2D.2] 16 3x3 filters on 5x5x4 input, strides=(1,1), padding='valid', data_format='channels_last', depth_multiplier=3, activation='relu', use_bias=True
Step3: [convolutional.SeparableConv2D.3] 4 3x3 filters on 5x5x2 input, strides=(2,2), padding='valid', data_format='channels_last', depth_multiplier=1, activation='relu', use_bias=True
Step4: [convolutional.SeparableConv2D.4] 4 3x3 filters on 5x5x2 input, strides=(1,1), padding='same', data_format='channels_last', depth_multiplier=1, activation='relu', use_bias=True
Step5: [convolutional.SeparableConv2D.5] 4 3x3 filters on 5x5x2 input, strides=(1,1), padding='same', data_format='channels_last', depth_multiplier=2, activation='relu', use_bias=False
Step6: [convolutional.SeparableConv2D.6] 4 3x3 filters on 5x5x2 input, strides=(2,2), padding='same', data_format='channels_last', depth_multiplier=2, activation='relu', use_bias=True
Step7: export for Keras.js tests
|
6,949
|
<ASSISTANT_TASK:>
Python Code:
%reload_ext rpy2.ipython
import pandas as pd
%%R
# help()
# help(function)
# help(package='package-name)
%%R
# install
# install.packages('package-name')
# already installed with conda
#install.packages("foreign")
#install.packages("Rcmdr", dependencies = TRUE)
# new installs
install.packages("nortest", repos="http://cran.rstudio.com/")
install.packages("sas7bdat", repos="http://cran.rstudio.com/")
install.packages("Hmisc", repos="http://cran.rstudio.com/")
install.packages("pastecs", repos="http://cran.rstudio.com/")
%%R
# import
# library('package-name')
library(foreign)
library(nortest)
library(sas7bdat)
library(Hmisc)
library(pastecs)
# jupyter "magic : integration with shell
%pwd
%%R
# import excel : via txt tab separated
fichierTexte <- read.table("data/labo-2/SR_Data.txt", header = TRUE)
head(fichierTexte)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Aide
Step2: Autres ressources
Step3: Espace de travail
Step4: Import fichier externe
|
6,950
|
<ASSISTANT_TASK:>
Python Code:
from dolfin import *
from rbnics import *
from problems import *
from reduction_methods import *
@OnlineStabilization()
class AdvectionDominated(EllipticCoerciveProblem):
# Default initialization of members
def __init__(self, V, **kwargs):
# Call the standard initialization
EllipticCoerciveProblem.__init__(self, V, **kwargs)
# ... and also store FEniCS data structures for assembly
assert "subdomains" in kwargs
assert "boundaries" in kwargs
self.subdomains, self.boundaries = kwargs["subdomains"], kwargs["boundaries"]
self.u = TrialFunction(V)
self.v = TestFunction(V)
self.dx = Measure("dx")(subdomain_data=subdomains)
self.ds = Measure("ds")(subdomain_data=boundaries)
# Store advection and forcing expressions
self.beta = Constant((1.0, 1.0))
self.f = Constant(1.0)
# Store terms related to stabilization
self.delta = 0.5
self.h = CellDiameter(V.mesh())
# Return custom problem name
def name(self):
return "AdvectionDominated1POD"
# Return theta multiplicative terms of the affine expansion of the problem.
def compute_theta(self, term):
mu = self.mu
if term == "a":
theta_a0 = 10.0**(- mu[0])
theta_a1 = 1.0
if self.stabilized:
delta = self.delta
theta_a2 = - delta * 10.0**(- mu[0])
theta_a3 = delta
else:
theta_a2 = 0.0
theta_a3 = 0.0
return (theta_a0, theta_a1, theta_a2, theta_a3)
elif term == "f":
theta_f0 = 1.0
if self.stabilized:
delta = self.delta
theta_f1 = delta
else:
theta_f1 = 0.0
return (theta_f0, theta_f1)
else:
raise ValueError("Invalid term for compute_theta().")
# Return forms resulting from the discretization of the affine expansion of the problem operators.
def assemble_operator(self, term):
v = self.v
dx = self.dx
if term == "a":
u = self.u
beta = self.beta
h = self.h
a0 = inner(grad(u), grad(v)) * dx
a1 = inner(beta, grad(u)) * v * dx
a2 = inner(div(grad(u)), h * inner(beta, grad(v))) * dx
a3 = inner(inner(beta, grad(u)), h * inner(beta, grad(v))) * dx
return (a0, a1, a2, a3)
elif term == "f":
f = self.f
beta = self.beta
h = self.h
f0 = f * v * dx
f1 = inner(f, h * inner(beta, grad(v))) * dx
return (f0, f1)
elif term == "k":
u = self.u
k0 = inner(grad(u), grad(v)) * dx
return (k0,)
elif term == "m":
u = self.u
m0 = inner(u, v) * dx
return (m0,)
elif term == "dirichlet_bc":
bc0 = [DirichletBC(self.V, Constant(0.0), self.boundaries, 1),
DirichletBC(self.V, Constant(0.0), self.boundaries, 2)]
return (bc0,)
elif term == "inner_product":
u = self.u
x0 = inner(grad(u), grad(v)) * dx
return (x0,)
else:
raise ValueError("Invalid term for assemble_operator().")
mesh = Mesh("data/square.xml")
subdomains = MeshFunction("size_t", mesh, "data/square_physical_region.xml")
boundaries = MeshFunction("size_t", mesh, "data/square_facet_region.xml")
V = FunctionSpace(mesh, "Lagrange", 2)
problem = AdvectionDominated(V, subdomains=subdomains, boundaries=boundaries)
mu_range = [(0.0, 6.0)]
problem.set_mu_range(mu_range)
reduction_method = PODGalerkin(problem)
reduction_method.set_Nmax(15)
reduction_method.initialize_training_set(100)
reduced_problem = reduction_method.offline()
online_mu = (6.0, )
reduced_problem.set_mu(online_mu)
reduced_problem.solve(online_stabilization=True)
reduced_problem.export_solution(filename="online_solution_with_stabilization")
reduced_problem.export_error(filename="online_error_with_stabilization")
reduced_problem.solve(online_stabilization=False)
reduced_problem.export_solution(filename="online_solution_without_stabilization")
reduced_problem.export_error(filename="online_error_without_stabilization")
reduction_method.initialize_testing_set(100)
reduction_method.error_analysis(online_stabilization=True, filename="error_analysis_with_stabilization")
reduction_method.error_analysis(online_stabilization=False, filename="error_analysis_without_stabilization")
reduction_method.speedup_analysis(online_stabilization=True, filename="speedup_analysis_with_stabilization")
reduction_method.speedup_analysis(online_stabilization=False, filename="speedup_analysis_without_stabilization")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Affine decomposition
Step2: 4. Main program
Step3: 4.2. Create Finite Element space (Lagrange P2)
Step4: 4.3. Allocate an object of the AdvectionDominated class
Step5: 4.4. Prepare reduction with a POD-Galerkin method
Step6: 4.5. Perform the offline phase
Step7: 4.6. Perform an online solve
Step8: 4.7. Perform an error analysis
Step9: 4.8. Perform a speedup analysis
|
6,951
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import h5py
from sklearn import svm, cross_validation
from sklearn.naive_bayes import MultinomialNB
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# First we load the file
file_location = '../results_database/text_wall_street_big.hdf5'
f = h5py.File(file_location, 'r')
# Now we need to get the letters and align them
text_directory = '../data/wall_street_letters.npy'
letters_sequence = np.load(text_directory)
Nletters = len(letters_sequence)
symbols = set(letters_sequence)
# Load the particular example
Nspatial_clusters = 8
Ntime_clusters = 40
Nembedding = 3
parameters_string = '/' + str(Nspatial_clusters)
parameters_string += '-' + str(Ntime_clusters)
parameters_string += '-' + str(Nembedding)
# Set the parameters for the simulation
maximal_lags = np.arange(8, 21, 3)
# Run the delay analysis
N = 50000
delays = np.arange(0, 25, 1)
accuracy_matrix = np.zeros((maximal_lags.size, delays.size))
for maximal_lag_index, maximal_lag in enumerate(maximal_lags):
# Extract the appropriate database
run_name = '/low-resolution' + str(maximal_lag)
nexa = f[run_name + parameters_string]
# Now we load the time and the code vectors
time = nexa['time']
code_vectors = nexa['code-vectors']
code_vectors_distance = nexa['code-vectors-distance']
code_vectors_softmax = nexa['code-vectors-softmax']
code_vectors_winner = nexa['code-vectors-winner']
for delay_index, delay in enumerate(delays):
X = code_vectors_softmax[:(N - delay)]
y = letters_sequence[delay:N]
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)
clf = svm.SVC(C=1.0, cache_size=200, kernel='linear')
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test) * 100.0
accuracy_matrix[maximal_lag_index, delay_index] = score
print('delay_index', delay_index)
print('maximal_lag_index', maximal_lag_index)
print('maximal_lag', maximal_lag)
print('delay', delay)
print('score', score)
print('-------------')
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
for maximal_lag_index in range(maximal_lags.size):
ax.plot(delays, accuracy_matrix[maximal_lag_index, :], 'o-', lw=2, markersize=10,
label=str(maximal_lags[maximal_lag_index]))
ax.set_xlabel('Delays')
ax.set_ylabel('Accuracy')
ax.set_ylim([0, 105])
ax.set_title('Latency analysis for different lags')
ax.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load all the data
Step2: Latency analysis
Step3: Plot it
|
6,952
|
<ASSISTANT_TASK:>
Python Code:
# Package imports
import numpy as np
import matplotlib.pyplot as plt
from testCases import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
%matplotlib inline
np.random.seed(1) # set a seed so that the results are consistent
X, Y = load_planar_dataset()
# Visualize the data:
plt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);
### START CODE HERE ### (≈ 3 lines of code)
shape_X = X.shape
shape_Y = Y.shape
m = Y.flatten().shape # training set size
### END CODE HERE ###
print ('The shape of X is: ' + str(shape_X))
print ('The shape of Y is: ' + str(shape_Y))
print ('I have m = %d training examples!' % (m))
# Train the logistic regression classifier
clf = sklearn.linear_model.LogisticRegressionCV();
clf.fit(X.T, Y.T);
# Plot the decision boundary for logistic regression
plot_decision_boundary(lambda x: clf.predict(x), X, Y)
plt.title("Logistic Regression")
# Print accuracy
LR_predictions = clf.predict(X.T)
print ('Accuracy of logistic regression: %d ' % float((np.dot(Y,LR_predictions) + np.dot(1-Y,1-LR_predictions))/float(Y.size)*100) +
'% ' + "(percentage of correctly labelled datapoints)")
# GRADED FUNCTION: layer_sizes
def layer_sizes(X, Y):
Arguments:
X -- input dataset of shape (input size, number of examples)
Y -- labels of shape (output size, number of examples)
Returns:
n_x -- the size of the input layer
n_h -- the size of the hidden layer
n_y -- the size of the output layer
### START CODE HERE ### (≈ 3 lines of code)
n_x = X.shape[0] # size of input layer
n_h = 4
n_y = Y.shape[0] # size of output layer
### END CODE HERE ###
return (n_x, n_h, n_y)
X_assess, Y_assess = layer_sizes_test_case()
(n_x, n_h, n_y) = layer_sizes(X_assess, Y_assess)
print("The size of the input layer is: n_x = " + str(n_x))
print("The size of the hidden layer is: n_h = " + str(n_h))
print("The size of the output layer is: n_y = " + str(n_y))
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
np.random.seed(2) # we set up a seed so that your output matches ours although the initialization is random.
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
### END CODE HERE ###
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
n_x, n_h, n_y = initialize_parameters_test_case()
parameters = initialize_parameters(n_x, n_h, n_y)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
Argument:
X -- input data of size (n_x, m)
parameters -- python dictionary containing your parameters (output of initialization function)
Returns:
A2 -- The sigmoid output of the second activation
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
# Retrieve each parameter from the dictionary "parameters"
### START CODE HERE ### (≈ 4 lines of code)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
### END CODE HERE ###
# Implement Forward Propagation to calculate A2 (probabilities)
### START CODE HERE ### (≈ 4 lines of code)
Z1 = np.dot(W1, X) + b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)
### END CODE HERE ###
assert(A2.shape == (1, X.shape[1]))
cache = {"Z1": Z1,
"A1": A1,
"Z2": Z2,
"A2": A2}
return A2, cache
X_assess, parameters = forward_propagation_test_case()
A2, cache = forward_propagation(X_assess, parameters)
# Note: we use the mean here just to make sure that your output matches ours.
print(np.mean(cache['Z1']) ,np.mean(cache['A1']),np.mean(cache['Z2']),np.mean(cache['A2']))
# GRADED FUNCTION: compute_cost
def compute_cost(A2, Y, parameters):
Computes the cross-entropy cost given in equation (13)
Arguments:
A2 -- The sigmoid output of the second activation, of shape (1, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
parameters -- python dictionary containing your parameters W1, b1, W2 and b2
Returns:
cost -- cross-entropy cost given equation (13)
m = Y.shape[1] # number of example
# Retrieve W1 and W2 from parameters
### START CODE HERE ### (≈ 2 lines of code)
W1 = parameters["W1"]
W2 = parameters["W2"]
### END CODE HERE ###
# Compute the cross-entropy cost
### START CODE HERE ### (≈ 2 lines of code)
logprobs = np.multiply(Y, np.log(A2)) + np.multiply(np.log(1 - A2), 1 - Y)
cost = - 1 / m * np.sum(logprobs)
### END CODE HERE ###
cost = np.squeeze(cost) # makes sure cost is the dimension we expect.
# E.g., turns [[17]] into 17
assert(isinstance(cost, float))
return cost
A2, Y_assess, parameters = compute_cost_test_case()
print("cost = " + str(compute_cost(A2, Y_assess, parameters)))
# GRADED FUNCTION: backward_propagation
def backward_propagation(parameters, cache, X, Y):
Implement the backward propagation using the instructions above.
Arguments:
parameters -- python dictionary containing our parameters
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2".
X -- input data of shape (2, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
Returns:
grads -- python dictionary containing your gradients with respect to different parameters
m = X.shape[1]
# First, retrieve W1 and W2 from the dictionary "parameters".
### START CODE HERE ### (≈ 2 lines of code)
W1 = parameters["W1"]
W2 = parameters["W2"]
### END CODE HERE ###
# Retrieve also A1 and A2 from dictionary "cache".
### START CODE HERE ### (≈ 2 lines of code)
A1 = cache["A1"]
A2 = cache["A2"]
### END CODE HERE ###
# Backward propagation: calculate dW1, db1, dW2, db2.
### START CODE HERE ### (≈ 6 lines of code, corresponding to 6 equations on slide above)
dZ2 = A2 - Y
dW2 = 1 / m * np.dot(dZ2, A1.T)
db2 = 1 / m * np.sum(dZ2, axis=1, keepdims=True)
dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2))
dW1 = 1 / m * np.dot(dZ1, X.T)
db1 = 1 / m * np.sum(dZ1, axis=1, keepdims=True)
### END CODE HERE ###
grads = {"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2}
return grads
parameters, cache, X_assess, Y_assess = backward_propagation_test_case()
grads = backward_propagation(parameters, cache, X_assess, Y_assess)
print ("dW1 = "+ str(grads["dW1"]))
print ("db1 = "+ str(grads["db1"]))
print ("dW2 = "+ str(grads["dW2"]))
print ("db2 = "+ str(grads["db2"]))
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate = 1.2):
Updates parameters using the gradient descent update rule given above
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients
Returns:
parameters -- python dictionary containing your updated parameters
# Retrieve each parameter from the dictionary "parameters"
### START CODE HERE ### (≈ 4 lines of code)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
### END CODE HERE ###
# Retrieve each gradient from the dictionary "grads"
### START CODE HERE ### (≈ 4 lines of code)
dW1 = grads["dW1"]
db1 = grads["db1"]
dW2 = grads["dW2"]
db2 = grads["db2"]
## END CODE HERE ###
# Update rule for each parameter
### START CODE HERE ### (≈ 4 lines of code)
W1 = W1 - learning_rate * dW1
b1 = b1 - learning_rate * db1
W2 = W2 - learning_rate * dW2
b2 = b2 - learning_rate * db2
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: nn_model
def nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):
Arguments:
X -- dataset of shape (2, number of examples)
Y -- labels of shape (1, number of examples)
n_h -- size of the hidden layer
num_iterations -- Number of iterations in gradient descent loop
print_cost -- if True, print the cost every 1000 iterations
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
np.random.seed(3)
n_x = layer_sizes(X, Y)[0]
n_y = layer_sizes(X, Y)[2]
# Initialize parameters, then retrieve W1, b1, W2, b2. Inputs: "n_x, n_h, n_y". Outputs = "W1, b1, W2, b2, parameters".
### START CODE HERE ### (≈ 5 lines of code)
parameters = initialize_parameters(n_x, n_h, n_y)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
### START CODE HERE ### (≈ 4 lines of code)
# Forward propagation. Inputs: "X, parameters". Outputs: "A2, cache".
A2, cache = forward_propagation(X, parameters)
# Cost function. Inputs: "A2, Y, parameters". Outputs: "cost".
cost = compute_cost(A2, Y, parameters)
# Backpropagation. Inputs: "parameters, cache, X, Y". Outputs: "grads".
grads = backward_propagation(parameters, cache, X, Y)
# Gradient descent parameter update. Inputs: "parameters, grads". Outputs: "parameters".
parameters = update_parameters(parameters, grads)
### END CODE HERE ###
# Print the cost every 1000 iterations
if print_cost and i % 1000 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
return parameters
X_assess, Y_assess = nn_model_test_case()
parameters = nn_model(X_assess, Y_assess, 4, num_iterations=10000, print_cost=False)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: predict
def predict(parameters, X):
Using the learned parameters, predicts a class for each example in X
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (n_x, m)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
# Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.
### START CODE HERE ### (≈ 2 lines of code)
A2, cache = forward_propagation(X, parameters)
predictions = (A2 > 0.5)
### END CODE HERE ###
return predictions
parameters, X_assess = predict_test_case()
predictions = predict(parameters, X_assess)
print("predictions mean = " + str(np.mean(predictions)))
# Build a model with a n_h-dimensional hidden layer
parameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost=True)
# Plot the decision boundary
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
plt.title("Decision Boundary for hidden layer size " + str(4))
# Print accuracy
predictions = predict(parameters, X)
print ('Accuracy: %d' % float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100) + '%')
# This may take about 2 minutes to run
plt.figure(figsize=(16, 32))
hidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50]
for i, n_h in enumerate(hidden_layer_sizes):
plt.subplot(5, 2, i+1)
plt.title('Hidden Layer of size %d' % n_h)
parameters = nn_model(X, Y, n_h, num_iterations = 5000)
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
predictions = predict(parameters, X)
accuracy = float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100)
print ("Accuracy for {} hidden units: {} %".format(n_h, accuracy))
# Datasets
noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets()
datasets = {"noisy_circles": noisy_circles,
"noisy_moons": noisy_moons,
"blobs": blobs,
"gaussian_quantiles": gaussian_quantiles}
### START CODE HERE ### (choose your dataset)
dataset = "gaussian_quantiles"
### END CODE HERE ###
X, Y = datasets[dataset]
X, Y = X.T, Y.reshape(1, Y.shape[0])
# make blobs binary
if dataset == "blobs":
Y = Y%2
# Visualize the data
plt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2 - Dataset
Step2: Visualize the dataset using matplotlib. The data looks like a "flower" with some red (label y=0) and some blue (y=1) points. Your goal is to build a model to fit this data.
Step3: You have
Step4: Expected Output
Step5: You can now plot the decision boundary of these models. Run the code below.
Step7: Expected Output
Step9: Expected Output (these are not the sizes you will use for your network, they are just used to assess the function you've just coded).
Step11: Expected Output
Step13: Expected Output
Step15: Expected Output
Step17: Expected output
Step19: Expected Output
Step21: Expected Output
Step22: Expected Output
Step23: Expected Output
Step24: Expected Output
Step25: Interpretation
|
6,953
|
<ASSISTANT_TASK:>
Python Code:
# Standard
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
# Dimensionality reduction and Clustering
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn import manifold, datasets
from sklearn import preprocessing
from itertools import cycle
# Plotting tools and classifiers
from matplotlib.colors import ListedColormap
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn import cross_validation
from sklearn.cross_validation import LeaveOneOut
# Let's read the data in and clean it
def get_NaNs(df):
columns = list(df.columns.get_values())
row_metrics = df.isnull().sum(axis=1)
rows_with_na = []
for i, x in enumerate(row_metrics):
if x > 0: rows_with_na.append(i)
return rows_with_na
def remove_NaNs(df):
rows_with_na = get_NaNs(df)
cleansed_df = df.drop(df.index[rows_with_na], inplace=False)
return cleansed_df
initial_data = pd.DataFrame.from_csv('Data_Adults_1_reduced_2.csv')
cleansed_df = remove_NaNs(initial_data)
# Let's also get rid of nominal data
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
X = cleansed_df.select_dtypes(include=numerics)
print X.shape
# Let's extract ADHd and Bipolar patients (mutually exclusive)
ADHD = X.loc[X['ADHD'] == 1]
ADHD = ADHD.loc[ADHD['Bipolar'] == 0]
BP = X.loc[X['Bipolar'] == 1]
BP = BP.loc[BP['ADHD'] == 0]
print ADHD.shape
print BP.shape
# Keeping a backup of the data frame object because numpy arrays don't play well with certain scikit functions
ADHD = pd.DataFrame(ADHD.drop(['Patient_ID', 'Age', 'ADHD', 'Bipolar'], axis = 1, inplace = False))
BP = pd.DataFrame(BP.drop(['Patient_ID', 'Age', 'ADHD', 'Bipolar'], axis = 1, inplace = False))
print ADHD.shape
print BP.shape
ADHD_clust = pd.DataFrame(ADHD)
BP_clust = pd.DataFrame(BP)
# This is a consequence of how we dropped columns, I apologize for the hacky code
data = pd.concat([ADHD_clust, BP_clust])
kmeans = KMeans(n_clusters=2)
kmeans.fit(data.get_values())
labels = kmeans.labels_
cluster_centers = kmeans.cluster_centers_
print('Estimated number of clusters: %d' % len(cluster_centers))
print data.shape
for label in [0, 1]:
ds = data.get_values()[np.where(labels == label)]
plt.plot(ds[:,0], ds[:,1], '.')
lines = plt.plot(cluster_centers[label,0], cluster_centers[label,1], 'o')
ADHD_iso = pd.DataFrame(ADHD_clust)
BP_iso = pd.DataFrame(BP_clust)
BP_iso['ADHD-Bipolar'] = 0
ADHD_iso['ADHD-Bipolar'] = 1
print BP_iso.columns
data = pd.DataFrame(pd.concat([ADHD_iso, BP_iso]))
class_labels = data['ADHD-Bipolar']
data = data.drop(['ADHD-Bipolar'], axis = 1, inplace = False)
print data.shape
data = data.get_values()
# Leave one Out cross validation
def leave_one_out(classifier, values, labels):
leave_one_out_validator = LeaveOneOut(len(values))
classifier_metrics = cross_validation.cross_val_score(classifier, values, labels, cv=leave_one_out_validator)
accuracy = classifier_metrics.mean()
deviation = classifier_metrics.std()
return accuracy, deviation
svc = SVC(gamma = 2, C = 1)
bc = BaggingClassifier(n_estimators = 22)
gb = GradientBoostingClassifier()
dt = DecisionTreeClassifier(max_depth = 22)
qda = QDA()
gnb = GaussianNB()
vc = VotingClassifier(estimators=[('gb', gb), ('bc', bc), ('gnb', gnb)],voting='hard')
classifier_accuracy_list = []
classifiers = [(gnb, "Gaussian NB"), (qda, "QDA"), (svc, "SVM"), (bc, "Bagging Classifier"), (vc, "Voting Classifier"),
(dt, "Decision Trees")]
for classifier, name in classifiers:
accuracy, deviation = leave_one_out(classifier, data, class_labels)
print '%s accuracy is %0.4f (+/- %0.3f)' % (name, accuracy, deviation)
classifier_accuracy_list.append((name, accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: we've now dropped the last of the discrete numerical inexplicable data, and removed children from the mix
Step2: Clustering and other grouping experiments
Step3: K-Means clustering
Step4: Classification Experiments
|
6,954
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'noresm2-lm', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
6,955
|
<ASSISTANT_TASK:>
Python Code:
energy = 6
applicator = 10
ssd = 100
x = [0.99, -0.14, -1.0, -1.73, -2.56, -3.17, -3.49, -3.57, -3.17, -2.52, -1.76,
-1.04, -0.17, 0.77, 1.63, 2.36, 2.79, 2.91, 3.04, 3.22, 3.34, 3.37, 3.08, 2.54,
1.88, 1.02, 0.99]
y = [5.05, 4.98, 4.42, 3.24, 1.68, 0.6, -0.64, -1.48, -2.38, -3.77, -4.81,
-5.26, -5.51, -5.58, -5.23, -4.64, -3.77, -2.77, -1.68, -0.29, 1.23, 2.68, 3.8,
4.6, 5.01, 5.08, 5.05]
width, length, poi = parameterise_single_insert(x, y)
print("Width = {0:0.2f} cm\nLength = {1:0.2f} cm".format(width, length))
display_parameterisation(x, y, width, length, poi)
data_filelist = glob('*_data.csv')
assert len(data_filelist) <= 1, "There appears to be more than 1 data.csv file, please delete the old version(s)"
assert len(data_filelist) == 1, "I need at least one data file"
data_filename = data_filelist[0]
data = pd.read_csv(data_filename)
reference = (
(data['Energy (MeV)'] == energy) &
(data['Applicator (cm)'] == applicator) &
(data['SSD (cm)'] == ssd)
)
input_dataframe = data[reference]
label = np.array(input_dataframe['Label']).astype(str)
width_data = np.array(
input_dataframe['Width (cm @ 100SSD)']).astype(float)
length_data = np.array(
input_dataframe['Length (cm @ 100SSD)']).astype(float)
factor_data = np.array(
input_dataframe['Insert factor (dose insert / dose open)']).astype(float)
input_dataframe
number_of_data = len(input_dataframe)
if number_of_data >= 8:
ratio_perim_area_data = convert2_ratio_perim_area(width_data, length_data)
ratio_perim_area = convert2_ratio_perim_area(width, length)
factor = float(spline_model_with_deformability(width, ratio_perim_area,
width_data, ratio_perim_area_data, factor_data))
else:
factor = np.nan
print("Width = {0:0.2f} cm\nLength = {1:0.2f} cm\nFactor = {2:0.4f}".format(
width, length, factor))
if number_of_data >= 8:
ratio_perim_area_data = convert2_ratio_perim_area(width_data, length_data)
figure = interactive(
width_data, length_data, ratio_perim_area_data, factor_data, label)
else:
figure = fallback_scatter(width_data, length_data, factor_data, label)
show(figure)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load model data
Step2: Only use the data for the specified energy, applicator, and ssd
Step3: Calculate the factor
Step4: Display the model
|
6,956
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'snu', 'sandbox-2', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
6,957
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_addons as tfa
num_classes = 100
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")
weight_decay = 0.0001
batch_size = 128
num_epochs = 50
dropout_rate = 0.2
image_size = 64 # We'll resize input images to this size.
patch_size = 8 # Size of the patches to be extracted from the input images.
num_patches = (image_size // patch_size) ** 2 # Size of the data array.
embedding_dim = 256 # Number of hidden units.
num_blocks = 4 # Number of blocks.
print(f"Image size: {image_size} X {image_size} = {image_size ** 2}")
print(f"Patch size: {patch_size} X {patch_size} = {patch_size ** 2} ")
print(f"Patches per image: {num_patches}")
print(f"Elements per patch (3 channels): {(patch_size ** 2) * 3}")
def build_classifier(blocks, positional_encoding=False):
inputs = layers.Input(shape=input_shape)
# Augment data.
augmented = data_augmentation(inputs)
# Create patches.
patches = Patches(patch_size, num_patches)(augmented)
# Encode patches to generate a [batch_size, num_patches, embedding_dim] tensor.
x = layers.Dense(units=embedding_dim)(patches)
if positional_encoding:
positions = tf.range(start=0, limit=num_patches, delta=1)
position_embedding = layers.Embedding(
input_dim=num_patches, output_dim=embedding_dim
)(positions)
x = x + position_embedding
# Process x using the module blocks.
x = blocks(x)
# Apply global average pooling to generate a [batch_size, embedding_dim] representation tensor.
representation = layers.GlobalAveragePooling1D()(x)
# Apply dropout.
representation = layers.Dropout(rate=dropout_rate)(representation)
# Compute logits outputs.
logits = layers.Dense(num_classes)(representation)
# Create the Keras model.
return keras.Model(inputs=inputs, outputs=logits)
def run_experiment(model):
# Create Adam optimizer with weight decay.
optimizer = tfa.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay,
)
# Compile the model.
model.compile(
optimizer=optimizer,
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="acc"),
keras.metrics.SparseTopKCategoricalAccuracy(5, name="top5-acc"),
],
)
# Create a learning rate scheduler callback.
reduce_lr = keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.5, patience=5
)
# Create an early stopping callback.
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor="val_loss", patience=10, restore_best_weights=True
)
# Fit the model.
history = model.fit(
x=x_train,
y=y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=0.1,
callbacks=[early_stopping, reduce_lr],
)
_, accuracy, top_5_accuracy = model.evaluate(x_test, y_test)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
# Return history to plot learning curves.
return history
data_augmentation = keras.Sequential(
[
layers.Normalization(),
layers.Resizing(image_size, image_size),
layers.RandomFlip("horizontal"),
layers.RandomZoom(
height_factor=0.2, width_factor=0.2
),
],
name="data_augmentation",
)
# Compute the mean and the variance of the training data for normalization.
data_augmentation.layers[0].adapt(x_train)
class Patches(layers.Layer):
def __init__(self, patch_size, num_patches):
super(Patches, self).__init__()
self.patch_size = patch_size
self.num_patches = num_patches
def call(self, images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(
images=images,
sizes=[1, self.patch_size, self.patch_size, 1],
strides=[1, self.patch_size, self.patch_size, 1],
rates=[1, 1, 1, 1],
padding="VALID",
)
patch_dims = patches.shape[-1]
patches = tf.reshape(patches, [batch_size, self.num_patches, patch_dims])
return patches
class MLPMixerLayer(layers.Layer):
def __init__(self, num_patches, hidden_units, dropout_rate, *args, **kwargs):
super(MLPMixerLayer, self).__init__(*args, **kwargs)
self.mlp1 = keras.Sequential(
[
layers.Dense(units=num_patches),
tfa.layers.GELU(),
layers.Dense(units=num_patches),
layers.Dropout(rate=dropout_rate),
]
)
self.mlp2 = keras.Sequential(
[
layers.Dense(units=num_patches),
tfa.layers.GELU(),
layers.Dense(units=embedding_dim),
layers.Dropout(rate=dropout_rate),
]
)
self.normalize = layers.LayerNormalization(epsilon=1e-6)
def call(self, inputs):
# Apply layer normalization.
x = self.normalize(inputs)
# Transpose inputs from [num_batches, num_patches, hidden_units] to [num_batches, hidden_units, num_patches].
x_channels = tf.linalg.matrix_transpose(x)
# Apply mlp1 on each channel independently.
mlp1_outputs = self.mlp1(x_channels)
# Transpose mlp1_outputs from [num_batches, hidden_dim, num_patches] to [num_batches, num_patches, hidden_units].
mlp1_outputs = tf.linalg.matrix_transpose(mlp1_outputs)
# Add skip connection.
x = mlp1_outputs + inputs
# Apply layer normalization.
x_patches = self.normalize(x)
# Apply mlp2 on each patch independtenly.
mlp2_outputs = self.mlp2(x_patches)
# Add skip connection.
x = x + mlp2_outputs
return x
mlpmixer_blocks = keras.Sequential(
[MLPMixerLayer(num_patches, embedding_dim, dropout_rate) for _ in range(num_blocks)]
)
learning_rate = 0.005
mlpmixer_classifier = build_classifier(mlpmixer_blocks)
history = run_experiment(mlpmixer_classifier)
class FNetLayer(layers.Layer):
def __init__(self, num_patches, embedding_dim, dropout_rate, *args, **kwargs):
super(FNetLayer, self).__init__(*args, **kwargs)
self.ffn = keras.Sequential(
[
layers.Dense(units=embedding_dim),
tfa.layers.GELU(),
layers.Dropout(rate=dropout_rate),
layers.Dense(units=embedding_dim),
]
)
self.normalize1 = layers.LayerNormalization(epsilon=1e-6)
self.normalize2 = layers.LayerNormalization(epsilon=1e-6)
def call(self, inputs):
# Apply fourier transformations.
x = tf.cast(
tf.signal.fft2d(tf.cast(inputs, dtype=tf.dtypes.complex64)),
dtype=tf.dtypes.float32,
)
# Add skip connection.
x = x + inputs
# Apply layer normalization.
x = self.normalize1(x)
# Apply Feedfowrad network.
x_ffn = self.ffn(x)
# Add skip connection.
x = x + x_ffn
# Apply layer normalization.
return self.normalize2(x)
fnet_blocks = keras.Sequential(
[FNetLayer(num_patches, embedding_dim, dropout_rate) for _ in range(num_blocks)]
)
learning_rate = 0.001
fnet_classifier = build_classifier(fnet_blocks, positional_encoding=True)
history = run_experiment(fnet_classifier)
class gMLPLayer(layers.Layer):
def __init__(self, num_patches, embedding_dim, dropout_rate, *args, **kwargs):
super(gMLPLayer, self).__init__(*args, **kwargs)
self.channel_projection1 = keras.Sequential(
[
layers.Dense(units=embedding_dim * 2),
tfa.layers.GELU(),
layers.Dropout(rate=dropout_rate),
]
)
self.channel_projection2 = layers.Dense(units=embedding_dim)
self.spatial_projection = layers.Dense(
units=num_patches, bias_initializer="Ones"
)
self.normalize1 = layers.LayerNormalization(epsilon=1e-6)
self.normalize2 = layers.LayerNormalization(epsilon=1e-6)
def spatial_gating_unit(self, x):
# Split x along the channel dimensions.
# Tensors u and v will in th shape of [batch_size, num_patchs, embedding_dim].
u, v = tf.split(x, num_or_size_splits=2, axis=2)
# Apply layer normalization.
v = self.normalize2(v)
# Apply spatial projection.
v_channels = tf.linalg.matrix_transpose(v)
v_projected = self.spatial_projection(v_channels)
v_projected = tf.linalg.matrix_transpose(v_projected)
# Apply element-wise multiplication.
return u * v_projected
def call(self, inputs):
# Apply layer normalization.
x = self.normalize1(inputs)
# Apply the first channel projection. x_projected shape: [batch_size, num_patches, embedding_dim * 2].
x_projected = self.channel_projection1(x)
# Apply the spatial gating unit. x_spatial shape: [batch_size, num_patches, embedding_dim].
x_spatial = self.spatial_gating_unit(x_projected)
# Apply the second channel projection. x_projected shape: [batch_size, num_patches, embedding_dim].
x_projected = self.channel_projection2(x_spatial)
# Add skip connection.
return x + x_projected
gmlp_blocks = keras.Sequential(
[gMLPLayer(num_patches, embedding_dim, dropout_rate) for _ in range(num_blocks)]
)
learning_rate = 0.003
gmlp_classifier = build_classifier(gmlp_blocks)
history = run_experiment(gmlp_classifier)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare the data
Step2: Configure the hyperparameters
Step3: Build a classification model
Step4: Define an experiment
Step5: Use data augmentation
Step6: Implement patch extraction as a layer
Step7: The MLP-Mixer model
Step8: Build, train, and evaluate the MLP-Mixer model
Step9: The MLP-Mixer model tends to have much less number of parameters compared
Step10: Build, train, and evaluate the FNet model
Step11: As shown in the FNet paper,
Step12: Build, train, and evaluate the gMLP model
|
6,958
|
<ASSISTANT_TASK:>
Python Code:
import warnings
import scipy as sp
import numpy as np
import porespy as ps
import openpnm as op
import matplotlib.pyplot as plt
ws = op.Workspace()
ws.settings["loglevel"] = 40
warnings.filterwarnings('ignore')
%matplotlib inline
np.random.seed(10)
# NBVAL_IGNORE_OUTPUT
im = ps.generators.overlapping_spheres(shape=[200, 200, 200], r=10, porosity=0.5, maxiter=0)
plt.imshow(im[:, :, 50]);
eps = ps.metrics.porosity(im)
print(f"Porosity: {eps*100:.1f}%")
# NBVAL_IGNORE_OUTPUT
im_3d = ps.visualization.show_3D(im)
plt.imshow(im_3d, cmap=plt.cm.magma);
# NBVAL_IGNORE_OUTPUT
snow = ps.networks.snow2(im, boundary_width=[[0, 3], 0, 0])
# NBVAL_IGNORE_OUTPUT
proj = op.io.PoreSpy.import_data(snow.network)
print(proj)
# NBVAL_IGNORE_OUTPUT
net = proj[0]
geo = proj[1]
print(net)
# NBVAL_IGNORE_OUTPUT
print(geo)
# NBVAL_IGNORE_OUTPUT
fig, ax = plt.subplots(1, 1, figsize=[8, 8])
ax = op.topotools.plot_connections(network=net, alpha=0.8, color='grey', ax=ax)
ax = op.topotools.plot_coordinates(network=net, ax=ax, color='b', markersize=50)
ax = op.topotools.plot_coordinates(network=net, pores=net.pores('xmax'), ax=ax, color='r', markersize=50)
m = np.meshgrid(range(50, 195, 10), range(50, 195, 10))
m = np.vstack([-10*np.ones_like(m[0].flatten()), m[0].flatten(), m[1].flatten()]).T
op.topotools.find_surface_pores(network=net, markers=m, label='left')
op.topotools.clone_pores(network=net, pores=net.pores('left'), labels='left_boundary')
net['pore.coords'][net.pores('left_boundary')] *= [0, 1, 1]
# NBVAL_IGNORE_OUTPUT
Ps = net.pores('xmax')
net.set_label('right_boundary', pores=Ps)
Ts = net.find_neighbor_throats(pores=net.pores('right_boundary'), mode='or')
net.set_label('right_boundary', throats=Ts)
fig, ax = plt.subplots(1, 1, figsize=[5, 5])
ax = op.topotools.plot_coordinates(network=net, color='g', alpha=0.2, ax=ax)
ax = op.topotools.plot_connections(network=net, throats=net.throats('right_boundary'), color='r', ax=ax)
ax = op.topotools.plot_connections(network=net, throats=net.throats('left_boundary'), color='b', ax=ax)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's check out the porosity of the generated image!
Step2: Let's visualize the image using porespy's 3D visualizer
Step3: OpenPNM has an IO class specifically for importing the output from PoreSpy. The import_data method can either accept a handle to a dictionary (as output from the snow algorithm above), or it can accept a filename to a saved dctionary (saved using Python's pickle library). All IO methods in OpenPNM return a project which is a list, in this case containing a network and a geometry object.
Step4: We can unpack the network and geometry objects from the project using the indices in the list as follows
Step5: It is important to note that the net object only has topological information and labels. The geo object was created by the openpnm.io.PoreSpy import class to extract all geometric information from the supplied snow dict and put in on a geometry object. We can print geo to confirm
Step6: Now let's plot things to see what we have
Step7: This looks pretty good, but it only has boundary pores on the right face, indicated by the red dots. When we ran the snow algorithm we specifically told it to only put boundary pores the "right". We could have added them to all faces during the extraction, but for the sake of demonstration we can add them after the fact, although the result is slightly different, as you'll see.
Step8: Now we pass these points in as markers to the find_surface_pores function
Step9: Lastly we want to "clone" these pores and translate them to domain edge
Step10: Now let's inspect the result using the quick plotting tools in the topotools module. First we'll add a new label called 'right_boundary' to match the 'left_boundary' we added above, then we'll plot the throats that connect to ther 'right_boundary' or 'left_boundary'
|
6,959
|
<ASSISTANT_TASK:>
Python Code:
# Import the MetPy unit registry
from metpy.units import units
length = 10.4 * units.inches
width = 20 * units.meters
print(length, width)
area = length * width
print(area)
area.to('m^2')
# Your code goes here
# %load solutions/distance.py
10 * units.degC - 5 * units.degC
25 * units.degC + 5 * units.delta_degF
273 * units.kelvin + 10 * units.kelvin
273 * units.kelvin - 10 * units.kelvin
# 12 UTC temperature
temp_initial = 20 * units.degC
temp_initial
# New 18 UTC temperature
temp_new = temp_initial + 5 * units.delta_degC
temp_new
# Your code goes here
# %load solutions/temperature_change.py
import metpy.constants as mpconst
mpconst.earth_avg_radius
mpconst.dry_air_molecular_weight
mpconst.Re
mpconst.Md
import metpy.calc as mpcalc
import numpy as np
# Make some fake data for us to work with
np.random.seed(19990503) # So we all have the same data
u = np.random.randint(0, 15, 10) * units('m/s')
v = np.random.randint(0, 15, 10) * units('m/s')
print(u)
print(v)
direction = mpcalc.wind_direction(u, v)
print(direction)
# Your code goes here
# %load solutions/wind_speed.py
mpcalc.dewpoint_rh(25 * units.degC, 75 * units.percent)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Don't forget that you can use tab completion to see what units are available! Just about every imaginable quantity is there, but if you find one that isn't, we're happy to talk about adding it.
Step2: That's great, now we have an area, but it is not in a very useful unit still. Units can be converted using the .to() method. While you won't see m$^2$ in the units list, we can parse complex/compound units as strings
Step3: Exercise
Step4: Solution
Step5: Temperature
Step6: We can add a delta to an offset unit as well
Step7: Absolute temperature scales like Kelvin and Rankine do not have an offset and therefore can be used in addition/subtraction without the need for a delta verion of the unit.
Step8: Example
Step9: Maybe the surface temperature increased by 5 degrees Celsius so far today - is this a temperature of 5 degC, or a temperature change of 5 degC? We subconsciously know that its a delta of 5 degC, but often write it as just adding two temperatures together, when it really is
Step10: Exercise
Step11: Solution
Step12: <a href="#top">Top</a>
Step13: You may also notice in the table that most constants have a short name as well that can be used
Step14: <a href="#top">Top</a>
Step15: Let's use the wind_direction function from MetPy to calculate wind direction from these values. Remember you can look at the docstring or the website for help.
Step16: Exercise
Step17: Solution
Step18: As one final demonstration, we will calculation the dewpoint given the temperature and relative humidity
|
6,960
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
import matplotlib as mpl
mpl.rcParams['font.size'] = 8
figsize =(8,4)
mpl.rcParams['figure.figsize'] = figsize
import numpy as np
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
from utils import riemann_tools
from ipywidgets import interact
from ipywidgets import widgets
from clawpack import riemann
from exact_solvers import nonlinear_elasticity
# %load exact_solvers/nonlinear_elasticity.py
def dsigma(eps, K1, K2):
"Derivative of stress w.r.t. strain."
return K1 + 2*K2*eps
def lambda1(q, xi, aux):
eps = q[0]
rho, K1, K2 = aux
return -np.sqrt(dsigma(eps, K1, K2)/rho)
def lambda2(q, xi, aux):
return -lambda1(q,xi,aux)
def make_plot_function(q_l, q_r, aux_l, aux_r):
states, speeds, reval, wave_types = \
nonlinear_elasticity.exact_riemann_solution(q_l,q_r,aux_l,aux_r)
def plot_function(t,which_char):
ax = riemann_tools.plot_riemann(states,speeds,reval,wave_types,
t=t,t_pointer=0,
extra_axes=True,
variable_names=['Strain','Momentum'])
if which_char == 1:
riemann_tools.plot_characteristics(reval,lambda1,(aux_l,aux_r),ax[0])
elif which_char == 2:
riemann_tools.plot_characteristics(reval,lambda2,(aux_l,aux_r),ax[0])
nonlinear_elasticity.phase_plane_plot(q_l, q_r, aux_l, aux_r, ax[3])
plt.show()
return plot_function
def plot_riemann_nonlinear_elasticity(rho_l,rho_r,v_l,v_r):
plot_function = make_plot_function(rho_l,rho_r,v_l,v_r)
interact(plot_function, t=widgets.FloatSlider(value=0.,min=0,max=1.,step=0.1),
which_char=widgets.Dropdown(options=[None,1,2],
description='Show characteristics'));
aux_l = np.array((1., 5., 1.))
aux_r = np.array((1., 2., 1.))
q_l = np.array([2.1, 0.])
q_r = np.array([0.0, 0.])
plot_riemann_nonlinear_elasticity(q_l, q_r, aux_l, aux_r)
solver = riemann.nonlinear_elasticity_1D_py.nonlinear_elasticity_1D
problem_data = {'stress_relation' : 'quadratic'}
fw_states, fw_speeds, fw_reval = \
riemann_tools.riemann_solution(solver,q_l,q_r,aux_l,aux_r,
problem_data=problem_data,
verbose=False,
stationary_wave=True,
fwave=True)
plot_function = \
riemann_tools.make_plot_function(fw_states,fw_speeds, fw_reval,
layout='vertical',
variable_names=('Strain','Momentum'))
interact(plot_function, t=widgets.FloatSlider(value=0.4,min=0,max=.9,step=.1));
ex_states, ex_speeds, ex_reval, wave_types = \
nonlinear_elasticity.exact_riemann_solution(q_l,q_r,aux_l,aux_r)
varnames = nonlinear_elasticity.conserved_variables
plot_function = riemann_tools.make_plot_function([ex_states,fw_states],
[ex_speeds,fw_speeds],
[ex_reval,fw_reval],
[wave_types,['contact']*3],
['Exact','$f$-wave'],
layout='vertical',
variable_names=varnames)
interact(plot_function, t=widgets.FloatSlider(value=0.4,min=0, max=0.9, step=0.1));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this chapter we investigate a nonlinear model of elastic strain in heterogeneous materials. This system is equivalent to the $p$-system of gas dynamics, although the stress-strain relation we will use here is very different from the pressure-density relation typically used in gas dynamics. The equations we consider are
Step2: Approximate solution of the Riemann problem using $f$-waves
Step3: Comparison of exact and approximate solutions
|
6,961
|
<ASSISTANT_TASK:>
Python Code:
import datetime as dt
import graphlab as gl
sf = gl.SFrame.read_csv('raw_data/global_earthquakes.csv', verbose=False)
sf.show()
useful_columns = ['time', 'latitude', 'longitude', 'mag', 'type', 'location']
sf = sf[useful_columns]
mask = sf['type'] == 'nuclear explosion'
sf[mask]
mask = sf['type'] == 'earthquake'
sf = sf[mask]
sf = sf.remove_column('type')
print "Number of earthquake events:", sf.num_rows()
sf['time'] = sf['time'].str_to_datetime(str_format='%Y-%m-%dT%H:%M:%s%ZP')
sf['time'] = sf['time'].apply(lambda x: x.replace(tzinfo=None))
quakes = gl.TimeSeries(sf, index='time')
quakes.print_rows(3)
quakes[4:7].print_rows()
quakes[['latitude', 'longitude']].print_rows(3)
start = dt.datetime(2014, 5, 1)
end = dt.datetime(2014, 5, 2)
quakes.slice(start, end).print_rows(3)
print "Earliest timestamp:", quakes.min_time
print "Latest timestamp:", quakes.max_time
print "Timestamp range:", quakes.range
print "Index column:", quakes.index_col_name
print "Value columns:", quakes.value_col_names
print quakes.index[:3]
big_one = quakes.argmax('mag')
quakes[big_one]
sf2 = quakes.to_sframe()
print type(sf2)
sf_recent = gl.SFrame.read_csv('raw_data/global_earthquakes_recent.csv', verbose=False)
# Trim away the columns we're not interested in.
sf_recent = sf_recent[useful_columns]
# Remove any non-earthquake events.
mask = sf_recent['type'] == 'earthquake'
sf_recent = sf_recent[mask]
sf_recent = sf_recent.remove_column('type')
# Convert the timestamp to a `datetime` type.
sf_recent['time'] = sf_recent['time'].str_to_datetime(str_format='%Y-%m-%dT%H:%M:%s%ZP')
sf_recent['time'] = sf_recent['time'].apply(lambda x: x.replace(tzinfo=None))
# Convert to a `TimeSeries` object.
recent_quakes = gl.TimeSeries(sf_recent, index='time')
recent_quakes.print_rows(3)
all_quakes = quakes.union(recent_quakes)
print all_quakes.min_time
print all_quakes.max_time
grp = quakes.group('location')
print grp
grp.group_info().topk('group_size', k=8)
oklahoma_quakes = grp.get_group('Oklahoma')
oklahoma_quakes.print_rows(3)
grp = quakes.group(quakes.date_part.HOUR)
hour_counts = grp.group_info()
hour_counts.print_rows(5)
import matplotlib.pyplot as plt
%matplotlib notebook
plt.style.use('ggplot')
fig, ax = plt.subplots()
ax.bar(hour_counts['time.hour'], hour_counts['group_size'], color='dodgerblue')
ax.set_xlabel('Hour of the day')
ax.set_ylabel('Number of earthquakes')
fig.show()
import graphlab.aggregate as agg
daily_stats = quakes.resample(period=dt.timedelta(days=1),
upsample_method='none',
downsample_method={'count': agg.COUNT('latitude'),
'avg_mag': agg.MEAN('mag'),
'max_mag': agg.MAX('mag')})
daily_stats['count'] = daily_stats['count'].fillna(0)
daily_stats.print_rows(5)
def compute_daily_stats(data):
daily = data.resample(period=dt.timedelta(days=1),
upsample_method='none',
downsample_method={'count': agg.COUNT('latitude'),
'avg_mag': agg.MEAN('mag'),
'max_mag': agg.MAX('mag')})
daily['count'] = daily['count'].fillna(0)
return daily
# Save the daily counts and recent daily counts.
daily_stats.save('working_data/global_daily_stats.ts')
compute_daily_stats(recent_quakes).save('working_data/global_daily_update.ts')
# Filter just the Oklahoma data from the recent events.
grp = recent_quakes.group('location')
recent_oklahoma_quakes = grp.get_group('Oklahoma')
# Compute daily stats for the Oklahoma quake events.
compute_daily_stats(oklahoma_quakes).save('working_data/ok_daily_stats.ts')
compute_daily_stats(recent_oklahoma_quakes).save('working_data/ok_daily_update.ts')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Inspect the data visually
Step2: A small bit of data cleaning
Step3: 2. Convert to a TimeSeries object
Step4: Convert from SFrame to TimeSeries
Step5: 3. Basic TimeSeries operations
Step6: Some operations are little different
Step7: Some operations are unique to TimeSeries
Step8: Working with the time index
Step9: We can always go back to an SFrame
Step10: 4. Appending more data
Step11: Get the union of the two datasets
Step12: 5. Grouping observations by value
Step13: The group_info SFrame tells us what the group names are and how many observations are in each group.
Step14: The get_group method lets us isolate just the observations for any group.
Step15: 6. Grouping observations by time component
Step16: 7. Resampling
Step17: 8. Setting up the next notebooks
|
6,962
|
<ASSISTANT_TASK:>
Python Code:
traj = md.load('ala2.h5')
atoms, bonds = traj.topology.to_dataframe()
atoms
psi_indices, phi_indices = [6, 8, 14, 16], [4, 6, 8, 14]
angles = md.compute_dihedrals(traj, [phi_indices, psi_indices])
from pylab import *
from math import pi
figure()
title('Dihedral Map: Alanine dipeptide')
scatter(angles[:, 0], angles[:, 1], marker='x', c=traj.time)
cbar = colorbar()
cbar.set_label('Time [ps]')
xlabel(r'$\Phi$ Angle [radians]')
xlim(-pi, pi)
ylabel(r'$\Psi$ Angle [radians]')
ylim(-pi, pi)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Because alanine dipeptide is a little nonstandard in the sense that it's basically dominated by the ACE and NME capping residues, we need to find the indicies of the atoms involved in the phi and psi angles somewhat manually. For standard cases, see compute_phi() and compute_psi() for easier solutions that don't require you to manually find the indices of each dihedral angle.
Step2: Lets plot our dihedral angles in a scatter plot using matplotlib. What conformational states of Alanine dipeptide did we sample?
|
6,963
|
<ASSISTANT_TASK:>
Python Code:
# Importar Librerías
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
# Guardar semilla para numeros aleatorios
seed = 21
np.random.seed(seed)
def generate_data(n):
# Lista para guardar datos etiquetados
output = list()
# Generación de n tuplas aleatorias
input = 2 * np.random.random_sample((n,2)) - 1
# Asignación datos dependiendo del cuadrante
for i in input:
# Cuadrante 1
if i[0] > 0 and i[1] > 0:
output.append(0)
# Cuadrante 2
elif i[0] < 0 and i[1] > 0:
output.append(1)
# Cuadrante 3
elif i[0] < 0 and i[1] < 0:
output.append(0)
# Cuadrante 4
elif i[0] > 0 and i[1] < 0:
output.append(1)
return input, output
(x_training, y_training) = generate_data(1000)
(x_test, y_test) = generate_data(1000)
# Plot de datos de entrenamiento
%matplotlib inline
plt.figure(figsize=(7,7))
set1 = x_training[np.logical_and(x_training[:,0] < 0, x_training[:,1] < 0)]
set2 = x_training[np.logical_and(x_training[:,0] < 0, x_training[:,1] > 0)]
set3 = x_training[np.logical_and(x_training[:,0] > 0, x_training[:,1] > 0)]
set4 = x_training[np.logical_and(x_training[:,0] > 0, x_training[:,1] < 0)]
set1 = np.concatenate((set1, set3), axis=0)
set2 = np.concatenate((set2, set4), axis=0)
plt.plot(set1[:,0], set1[:,1], 'r.')
plt.plot(set2[:,0], set2[:,1], 'bx')
plt.show()
# Creación de una neurona
model = Sequential()
# Dimensión input = 1, Dimensión output = 2, función de activación es Relu
model.add(Dense(output_dim=1, input_dim=2, init="normal"))
model.add(Activation("sigmoid"))
model.compile(loss='mean_squared_error', optimizer='sgd', metrics=['accuracy'])
print "Neurona inicializada"
# Entrenar a la neurona
model.fit(x_training, y_training, nb_epoch=1000,verbose=0)
print "Neurona entrenada"
# Evaluar la neurona
loss_and_metrics = model.evaluate(x_test, y_test, batch_size=1000)
print "Loss: "
print loss_and_metrics[0]
print "Accuracy: "
print loss_and_metrics[1]*100
print round(model.predict(np.array([-1,-1]).reshape(1,2))[0][0],4)
print round(model.predict(np.array([1,1]).reshape(1,2))[0][0],4)
print round(model.predict(np.array([-1,1]).reshape(1,2))[0][0],4)
print round(model.predict(np.array([1,-1]).reshape(1,2))[0][0],4)
from keras.models import Sequential
from keras.layers.core import Dense, Activation
xor = Sequential()
# Dimensión input = 2, Dimensión output = 1, función de activación es Relu
xor.add(Dense(8, input_dim = 2, activation = "relu"))
xor.add(Dense(1, activation = "sigmoid"))
xor.compile(loss='mean_squared_error', optimizer='sgd', metrics=['accuracy'])
print "Red inicializada"
# Entrenar a la red
xor.fit(x_training, y_training, nb_epoch=1000, verbose=0)
print "Red entrenada"
# Evaluar la red
evaluacion = xor.evaluate(x_test, y_test, batch_size=1000)
print "Loss: "
print evaluacion[0]
print "Accuracy: "
print evaluacion[1]*100
print xor.predict_classes(np.array([-1,-1]).reshape(1,2))[0][0]
print xor.predict_classes(np.array([1,1]).reshape(1,2))[0][0]
print xor.predict_classes(np.array([-1,1]).reshape(1,2))[0][0]
print xor.predict_classes(np.array([1,-1]).reshape(1,2))[0][0]
import pandas as pd
url = 'http://mldata.org/repository/data/download/csv/regression-datasets-housing/'
df = pd.read_csv(url, sep=',',header=None, names=['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX',
'RM', 'AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT','MEDV'])
from sklearn.cross_validation import train_test_split
df_train, df_test = train_test_split(df,test_size=0.25, random_state=0)
df.shape
df.info()
df.describe()
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(df_train)
X_train_scaled = pd.DataFrame(scaler.transform(df_train),columns=df_train.columns)
X_test_scaled = pd.DataFrame(scaler.transform(df_test),columns=df_test.columns)
y_train_scaled = X_train_scaled.pop('MEDV')
y_test_scaled = X_test_scaled.pop('MEDV')
def generate_model(optimizer, activation):
model = Sequential()
model.add(Dense(200, input_dim=X_train_scaled.shape[1], init='uniform'))
model.add(Activation(activation))
model.add(Dense(1, init='uniform'))
model.add(Activation('linear'))
model.compile(optimizer=optimizer,loss='mean_squared_error')
return model
sgd = SGD(lr=0.02)
model_d = generate_model(sgd,"sigmoid")
hist = model_d.fit(X_train_scaled.as_matrix(), y_train_scaled.as_matrix(),
nb_epoch=300, verbose=0,
validation_data=(X_test_scaled.as_matrix(), y_test_scaled.as_matrix()))
%matplotlib inline
epochs = np.arange(300)
plt.figure(figsize=(6,6))
plt.plot(epochs, hist.history['loss'], 'b-')
plt.title(u"Función de activación sigmoidal")
plt.xlabel("# epochs")
plt.ylabel("MSE")
plt.show()
sgd = SGD(lr=0.02)
model2 = generate_model(sgd, "relu")
hist2 = model2.fit(X_train_scaled.as_matrix(), y_train_scaled.as_matrix(),
nb_epoch=300, verbose=0,
validation_data=(X_test_scaled.as_matrix(), y_test_scaled.as_matrix()))
%matplotlib inline
epochs = np.arange(300)
plt.plot(epochs, hist2.history['loss'], 'r-')
plt.title(u"Función de activación ReLu")
plt.xlabel("# epochs")
plt.ylabel("MSE")
plt.show()
n_lr = 20
lear_rate = np.linspace(0,0.02,n_lr)
%matplotlib inline
epochs = np.arange(300)
plt.figure(figsize=(15,7))
for n, i in enumerate(lear_rate):
sgd = SGD(lr=i)
model = generate_model(sgd, "sigmoid")
hist = model.fit(X_train_scaled.as_matrix(), y_train_scaled.as_matrix(),
nb_epoch=300, verbose=0,
validation_data=(X_test_scaled.as_matrix(), y_test_scaled.as_matrix()))
plt.plot(epochs, hist.history['loss'], label="Lear_rate:"+str(i))
plt.title(u"Variación de learning rate con activación Sigmoid")
plt.xlabel("# epochs")
plt.ylabel("MSE")
plt.ylim([0, 2])
plt.legend()
plt.show()
#Disminución de la cantidad de learning rates a probar con objetivo de una mejor visualización
n_lr = 5
lear_rate = np.linspace(0,0.02,n_lr)
%matplotlib inline
epochs = np.arange(300)
plt.figure(figsize=(15,7))
for n, i in enumerate(lear_rate):
sgd = SGD(lr=i)
model = generate_model(sgd, "sigmoid")
hist = model.fit(X_train_scaled.as_matrix(), y_train_scaled.as_matrix(),
nb_epoch=300, verbose=0,
validation_data=(X_test_scaled.as_matrix(), y_test_scaled.as_matrix()))
plt.plot(epochs, hist.history['loss'], label="Lear_rate:"+str(i))
plt.title(u"Variación de learning rate con activación Sigmoid")
plt.xlabel("# epochs")
plt.ylabel("MSE")
plt.ylim([0, 2])
plt.legend()
plt.show()
from sklearn import cross_validation
Xm = X_train_scaled.as_matrix()
ym = y_train_scaled.as_matrix()
mse_cvs1 = []
mse_cvs2 = []
for nfold in [5, 10]:
kfold = cross_validation.KFold(len(Xm), nfold)
cvscores1 = []
cvscores2 = []
for i, (train, val) in enumerate(kfold):
# create models
sgd = SGD(lr=0.02)
model1 = generate_model(sgd, 'sigmoid')
model2 = generate_model(sgd, 'relu')
# Fit the models
model1.fit(Xm[train], ym[train], nb_epoch=300, verbose=0)
model2.fit(Xm[train], ym[train], nb_epoch=300, verbose=0)
# evaluate the models
# sigmoid score
scores1 = model1.evaluate(Xm[val], ym[val])
# relu score
scores2 = model2.evaluate(Xm[val], ym[val])
# Store values
cvscores1.append(scores1)
cvscores2.append(scores2)
mse_cvs1.append(np.mean(cvscores1))
mse_cvs2.append(np.mean(cvscores2))
print "Results for sigmoid and CV"
print mse_cvs1
print "Results for relu and CV"
print mse_cvs2
model1 = generate_model(SGD(lr=0.02), "sigmoid")
model2 = generate_model(SGD(lr=0.02), "relu")
model1.fit(X_train_scaled.as_matrix(), y_train_scaled.as_matrix(), nb_epoch=300, verbose=0)
model2.fit(X_train_scaled.as_matrix(), y_train_scaled.as_matrix(), nb_epoch=300, verbose=0)
score_sigmoid = model1.evaluate(X_test_scaled.as_matrix(), y_test_scaled.as_matrix())
score_relu = model2.evaluate(X_test_scaled.as_matrix(), y_test_scaled.as_matrix())
print "Result for sigmoid in testing set"
print score_sigmoid
print "Result for relu in testing set"
print score_relu
n_decay = 5
lear_decay = np.logspace(-6,0,n_decay)
%matplotlib inline
epochs = np.arange(300)
plt.figure(figsize=(15,7))
for i in lear_decay:
sgd = SGD(lr=0.02, decay=i)
model = generate_model(sgd, "sigmoid")
hist = model.fit(X_train_scaled.as_matrix(), y_train_scaled.as_matrix(),
nb_epoch=300, verbose=0,
validation_data=(X_test_scaled.as_matrix(), y_test_scaled.as_matrix()))
plt.plot(epochs, hist.history['loss'], label="decay = "+str(i))
plt.title(u"Variación de learning decay")
plt.xlabel("# epochs")
plt.ylabel("MSE")
plt.legend()
plt.ylim([0,2])
plt.show()
n_decay = 21
momentum = np.linspace(0,1,n_decay)
%matplotlib inline
epochs = np.arange(300)
plt.figure(figsize=(15,7))
for i in momentum:
sgd = SGD(lr=0.02,momentum=i)
model = generate_model(sgd, "sigmoid")
hist = model.fit(X_train_scaled.as_matrix(), y_train_scaled.as_matrix(),
nb_epoch=300, verbose=0,
validation_data=(X_test_scaled.as_matrix(), y_test_scaled.as_matrix()))
plt.plot(epochs, hist.history['loss'],label="Momentum:"+str(i))
plt.title(u"Variación de momentum")
plt.xlabel("# epochs")
plt.ylabel("MSE")
plt.legend()
plt.ylim([0,2])
plt.show()
n_batches = 21
batch_sizes = np.round(np.linspace(1,X_train_scaled.shape[0],n_batches))
%matplotlib inline
epochs = np.arange(300)
for i in batch_sizes:
sgd = SGD(lr=0.02)
model = generate_model(sgd, "sigmoid")
hist = model.fit(X_train_scaled.as_matrix(), y_train_scaled.as_matrix(),
batch_size=i, nb_epoch=300, verbose=0,
validation_data=(X_test_scaled.as_matrix(), y_test_scaled.as_matrix()))
if i == 1:
plt.plot(epochs, hist.history['loss'], 'r-', label="SGD batch=1")
elif i == X_train_scaled.shape[0]:
plt.plot(epochs, hist.history['loss'], 'b-', label="Batch")
else:
plt.plot(epochs, hist.history['loss'], 'g-')
plt.title(u"Variación de batch sizes en d)")
plt.xlabel("# epochs")
plt.ylabel("MSE")
plt.legend()
plt.show()
%matplotlib inline
epochs = np.arange(300)
for i in batch_sizes:
sgd = SGD(lr=0.02)
model = generate_model(sgd, "relu")
hist = model.fit(X_train_scaled.as_matrix(), y_train_scaled.as_matrix(),
batch_size=i, nb_epoch=300, verbose=0,
validation_data=(X_test_scaled.as_matrix(), y_test_scaled.as_matrix()))
# SGD
if i == 1:
plt.plot(epochs, hist.history['loss'], 'r-', label="SGD batch=1")
# BATCH
elif i == X_train_scaled.shape[0]:
plt.plot(epochs, hist.history['loss'], 'b-', label="Batch")
# MINIBATCH
else:
plt.plot(epochs, hist.history['loss'], 'g-')
plt.title(u"Variación de batch sizes en e)")
plt.xlabel("# epochs")
plt.ylabel("MSE")
plt.legend()
plt.show()
import cPickle as pickle
import os
from scipy.misc import imread
# Inicializar semilla aleatoria
np.random.seed(20)
# Carga de un archivo de CIFAR
def load_CIFAR_one(filename):
with open(filename, 'rb') as f:
datadict = pickle.load(f)
X = datadict['data']
Y = datadict['labels']
Y = np.array(Y)
return X, Y
# Carga todos los archivos CIFAR y generar Training set, Testing set y Validation set
def load_CIFAR10(PATH, n_files=6):
xs = []
ys = []
# Juntar toda la data de entrenamiento
for b in range(1, n_files):
f = os.path.join(PATH, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_one(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
# Obtener subconjunto para validacion a partir de data de entrenamiento
v_size = np.random.randint(1000, 5000)
indices = np.random.choice(np.arange(Xtr.shape[0]), v_size)
mask_tr = np.ones(Xtr.shape[0], dtype=bool)
mask_tr[indices] = False
mask_v = np.invert(mask_tr)
# Obtener conjunto de validacion
Xv = Xtr[mask_v]
Yv = Ytr[mask_v]
# Obtener conjunto de prueba
Xtr = Xtr[mask_tr]
Ytr = Ytr[mask_tr]
# Obtener data de prueba
Xte, Yte = load_CIFAR_one(os.path.join(PATH, 'test_batch'))
return Xtr, Ytr, Xte, Yte, Xv, Yv
# Cargar desde carpeta local data
Xtr, Ytr, Xte, Yte, Xv, Yv = load_CIFAR10("data")
label_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
from sklearn.preprocessing import StandardScaler
# Centrar dataset y escalar segun preferencia
def preprocess(X, with_mean=True, with_std=True):
scaler = StandardScaler(with_mean, with_std).fit(X)
return scaler.transform(X)
# Data solo centrada
#Xtr_c = preprocess(Xtr, with_mean=True, with_std=False)
# Data solo escalada
#Xtr_s = preprocess(Xtr, with_mean=False, with_std=True)
# Data centrada y escalada
# Centrar y escalar datos de entrenamiento y validación
Xtr_cs = preprocess(Xtr)
Xv_cs = preprocess(Xv)
Xte_cs = preprocess(Xte)
# Dimension de ejemplos, vectores de 3072 features (32x32x3 pixeles)
input_dim = Xtr.shape[1]
from keras.utils.np_utils import to_categorical
# Convertir etiquetas a una representación amigable
Ytr_conv = to_categorical(Ytr,10)
Yv_conv = to_categorical(Yv,10)
Yte_conv = to_categorical(Yte,10)
# Modelo 1: Red simple de 1 capa
MLPmodel = Sequential()
MLPmodel.add(Dense(50, input_dim=input_dim, init='uniform'))
MLPmodel.add(Activation('relu'))
MLPmodel.add(Dense(10))
MLPmodel.add(Activation('softmax'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=False)
MLPmodel.compile(loss='mse',
optimizer=sgd,
metrics=['accuracy'])
MLPmodel.fit(Xtr_cs, Ytr_conv, nb_epoch=20, batch_size=16, verbose=0)
score = MLPmodel.evaluate(Xv_cs, Yv_conv,verbose=0, batch_size=16)
print "Loss:",score[0], "Accuracy:",score[1]
# Modelo 2
# Combinacion de capas con activaciones tangente hiperbolica, momentum utilizando nesterov
model2 = Sequential()
model2.add(Dense(50, input_dim=input_dim, init='uniform'))
model2.add(Activation('tanh'))
model2.add(Dense(50, init='uniform'))
model2.add(Activation('tanh'))
model2.add(Dense(10, init='uniform'))
model2.add(Activation('softmax'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.8, nesterov=True)
model2.compile(loss='mse',
optimizer=sgd,
metrics=['accuracy'])
model2.fit(Xtr_cs, Ytr_conv, nb_epoch=20, batch_size=32, verbose=0)
score = model2.evaluate(Xv_cs, Yv_conv, batch_size=32, verbose=0)
print "Loss:",score[0], "Accuracy:",score[1]
# Modelo 3
# Combinación anterior cambiando funciones tanh por sigmoidales y perdida entropia cruzada
model3 = Sequential()
model3.add(Dense(256, input_dim=input_dim, init='uniform'))
model3.add(Activation('sigmoid'))
model3.add(Dense(256, init='normal'))
model3.add(Activation('sigmoid'))
model3.add(Dense(10, init='normal'))
model3.add(Activation('softmax'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.8, nesterov=True)
model3.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
model3.fit(Xtr_cs, Ytr_conv, nb_epoch=20, batch_size=32, verbose=0)
score = model3.evaluate(Xv_cs, Yv_conv, batch_size=32, verbose=0)
print "Loss:",score[0], "Accuracy:",score[1]
from top_level_features import hog_features
from top_level_features import color_histogram_hsv
from top_level_features import extract_features
Xtr, Ytr, Xte, Yte, Xv, Yv = load_CIFAR10("data")
#features = extract_features(Xtr,[hog_features]) #extrae hog features
#features = extract_features(Xtr,[color_histogram_hsv]) #extrae histogramas de color
# Extraer features hog (cambios en gradiente) y histograma en espacio de color hsv
features = extract_features(Xtr,[hog_features, color_histogram_hsv])
features_v = extract_features(Xv,[hog_features, color_histogram_hsv])
features_te = extract_features(Xte,[hog_features, color_histogram_hsv])
print Xtr.shape
# Obtuvimos en vez de 3072 features o variables, 154 variables
# Se reduce la dimensionalidad
print features.shape
print features_v.shape
print features_te.shape
# El mejor modelo fue usado para probar las nuevas features
# Probando con la extracción de ambos tipos de features
model3 = Sequential()
model3.add(Dense(256, input_dim=features.shape[1], init='uniform'))
model3.add(Activation('sigmoid'))
model3.add(Dense(256, init='normal'))
model3.add(Activation('sigmoid'))
model3.add(Dense(10, init='normal'))
model3.add(Activation('softmax'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.8, nesterov=True)
model3.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
Ytr_conv = to_categorical(Ytr, 10)
Yv_conv = to_categorical(Yv, 10)
Yte_conv = to_categorical(Yte, 10)
model3.fit(features, Ytr_conv, nb_epoch=20, batch_size=32, verbose=0)
print model3.evaluate(features_v, Yv_conv, batch_size=32, verbose=0)
print model3.evaluate(features_te, Yte_conv, batch_size=32, verbose=0)
#Probando con la extracción solo de hog features
features = extract_features(Xtr,[hog_features]) #extrae hog features
features_te = extract_features(Xte,[hog_features]) #extrae hog features
model3 = Sequential()
model3.add(Dense(256, input_dim=features.shape[1], init='uniform'))
model3.add(Activation('sigmoid'))
model3.add(Dense(256, init='normal'))
model3.add(Activation('sigmoid'))
model3.add(Dense(10, init='normal'))
model3.add(Activation('softmax'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.8, nesterov=True)
model3.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
model3.fit(features, Ytr_conv, nb_epoch=20, batch_size=32, verbose=0)
model3.evaluate(features_te, Yte_conv, batch_size=32, verbose=0)
print features.shape
print features_te.shape
#Probando con la extracción solo de hsv features
features = extract_features(Xtr,[color_histogram_hsv]) #extrae hog features
features_te = extract_features(Xte,[color_histogram_hsv]) #extrae hog features
model3 = Sequential()
model3.add(Dense(256, input_dim=features.shape[1], init='uniform'))
model3.add(Activation('sigmoid'))
model3.add(Dense(256, init='normal'))
model3.add(Activation('sigmoid'))
model3.add(Dense(10, init='normal'))
model3.add(Activation('softmax'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.8, nesterov=True)
model3.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
model3.fit(features, Ytr_conv, nb_epoch=20, batch_size=32, verbose=0)
model3.evaluate(features_te, Yte_conv, batch_size=32, verbose=0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: El problema se considera XOR, o or exclusivo debido a que [(-),(-)] y [(+),(+)] son etiquetados con con circulos y [(-),(+)] y [(+),(-)] son etiquetados con cruces, podemos compararlo con el or exclusivo que obtiene 0 para [1,1] y [0,0] y obtiene 1 para [1,0] y [0,1]
Step2: Como se puede apreciar, la neurona a pesar de la cantidad de datos de entrenamiento utilizados, no es capaz de aprender la función XOR arrojando resultados inconsistentes, siendo incapaz de clasificar de forma determinante los ejemplos de testing en alguna clase. Esto es debido a que las clases del set no son linealmente separable.
Step3: Se puede observar que con un perceptrón multicapa se obtiene un accuracy del 93% pudiendo clasificar correctamente los datos.
Step4: En las lineas 5 a 7 se puede observar que este set se divide para generar el training set y el testing set. Del total de datos, el 25% se deja aparte para pruebas y el restante 75% permanece para entrenamiento.
Step5: c) Normalización de datos. Este procedimiento es necesario para evitar cualquier clase de problemas con la convergencia de nuestra función de optimización, pues es posible que debido a los diversos rangos de datos la convergencia favorezca a ciertos valores, deteniéndose el algoritmo en un punto donde se ha aprendido mal sobre el training set y no se tenga capacidad de generalización.
Step6: d) Gráfico de MSE versus número de epochs utilizados para entrenar para red FF de 3 capas, 200 unidades ocultas y activación sigmoidal entrenada con SGD con parámetros $\eta$=0.2 y 300 epochs de entrenamiento.
Step7: El error obtenido en un comienzo es muy alto pero al aumentar el número de epochs éste disminuye considerablemente convergiendo a un número muy bajo pero a su vez la convergencia posee comportamiento oscilatorio.
Step8: Podemos observar que con la función de activación ReLu se llega a un menor error cuadrático que con Sigmoid, además la convergencia es más estable.
Step9: Se puede observar que a medida se aumenta el learning rate la convergencia va cambiando, para lear_rate = 0 no aprende, pero mientras va aumentando se acerca de a poco a un error más pequeño debido a que aprende de forma lenta pero estable, a medida aumenta la el learning rate pasa a ser más oscilatoria, como se observa en lear_rate = 0.02 donde el resultado es muy oscilatorio.
Step10: Los resultados para el MSE utilizando función de activación sigmoid muestran errores en cross validation de 0.36 para 5 folds y 0.28 para 10 folds, mientras que los errores utilizando función relu bajan a aproximadamente 0.13 en ambos casos. La estimación con función de activación relu es más confiable y predijo de buena forma el comportamiento del testing set.
Step11: Se observa que un decay igual a 1.0 no tiene un efecto en la disminución del MSE. A medida que el factor de decaimiento disminuye el MSE tiende a descender de forma más rápida y converger a valores más bajos. Valores extremadamente bajos de decay poseen un comportamiento demasiado inestable, el MSE puede llegar a oscilar entre el mínimo encontrado (cualitativamente en MSE=0.25) y MSE = 1.0
Step12: j) Entrenar el modelo en d) y e) cambiando el tamaño del batch. Comparar sgd, batch y mini-batch
Step13: Podemos observar en los gráficos anteriores que al variar el tamaño del batch desde 1 hasta el tamaño total, tenemos 3 resultados distinguibles en cada gráfico
Step14: b) Escalamiento y centrado de datos. Aparte de las ventajas mencionadas anteriormente, los resultados experimentales con datos no centrados y no escalados ofrecen resultados peores que si la data se normaliza.
Step15: c) Creación de red neuronal para problema CIFAR. En primer lugar se adaptan las etiquetas a una representación manejable por la red.
Step16: (d) Extracción de features y experimentación.
|
6,964
|
<ASSISTANT_TASK:>
Python Code:
from sympy import *
init_printing()
pi.evalf(10)
alpha, beta, gamma, x, y = symbols('alpha beta gamma x y')
alpha, beta
f= Function('f')
diff(sin(x+1)*cos(y), x, y)
test = diff(f(x)+1,x)
test
Md = Function('M_d')(x)
Md
q1, q2, q3 = symbols('q_1 q_2 q_3')
q = Matrix([q1, q2, q3])
q
acol = Matrix([q1**2+sin(q2), exp(q3), q2-q1])
J = acol.jacobian(q)
J
n, m = symbols('n m', integer=True)
M = MatrixSymbol('M', n, m)
b = MatrixSymbol('b', m, 1)
M*b
(M*b).T
(M*b).T
Transpose(M*b).doit()
A = IndexedBase('A')
i = Idx('i')
A[2], A[i]
x = symbols('x')
expr = abs(sin(x**2))
expr
ccode(expr)
tanh(x).rewrite(exp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Code printing
|
6,965
|
<ASSISTANT_TASK:>
Python Code:
print("Exemplo 9.1")
import numpy as np
Vm = 12
phi = 10
omega = 50
T = 2*np.pi/omega
f = 1/T
print("Amplitude:",Vm,"V")
print("Fase:",phi,"º")
print("Frequência angular:",omega,"rad/s")
print("Período:",T,"s")
print("Frequência:",f,"Hz")
print("Problema Prático 9.1")
Vm = 30
#30sin(4*pi*t - 75º) = 30cos(4*pi*t + 165º)
phi = -75
omega = 4*np.pi
T = 2*np.pi/omega
f = 1/T
print("Amplitude:",Vm,"V")
print("Fase:",phi,"º")
print("Frequência angular:",omega,"rad/s")
print("Período:",T,"s")
print("Frequência:",f,"Hz")
print("Exemplo 9.2")
#v1 = -10cos(wt + 50º) = 10cos(wt + 50 - 180) = 10cos(wt - 130º)
#v2 = 12sen(wt - 10º) = 12cos(wt - 100º)
#-130 - (-100) = -30
phi = 30
print("v2 esta avancada em {}º em relação a v1".format(phi))
print("Problema Prático 9.2")
#i1 = -4sen(377t + 55) = 4sen(377t + 55 + 180) = 4sen(377t + 235) = 4cos(377t + 145)
#i2 = 5cos(377t - 65)
phi = 145 - (-65)
print("i1 esta avancada em {}º em relação a i2".format(phi))
print("Exemplo 9.4")
#6cos(50t - 40)
#r = 6
#phi = -40
#-4sen(30t + 50) = 4sen(30t + 50 + 180) = 4cos(30t + 140)
#r = 4
#phi = 140
print("I: 6[-40º]")
print("V: 4[140º]")
print("Problema Prático 9.4")
#7cos(2t + 40)
#r = 7
#phi = 40
#-4sen(10t + 10) = 4sen(10t + 10 + 180) = 4cos(10t + 100)
#r = 4
#phi = 100
print("V: 7[40º]")
print("I: 4[100º]")
print("Exemplo 9.5")
import numpy as np
r = np.sqrt((-3)**2 + 4**2)
phi = np.arctan(4/(-3))*180/np.pi + 180
print("I: {}[{}º]".format(r,phi))
#j = 1[90º]
#V = 8e^(-j20) = 8[-20º]
#jV = 1*8 [90 -20] = 8[70º]
print("V: 8[70º]")
print("Problema Prático 9.5")
print("v(t) = 25cos(wt + 220)")
#j(12 - j5) = 5 + 12j
r = np.sqrt(5**2 + 12**2)
phi = np.arctan(12/5)*180/np.pi
print("I: {}[{}º]".format(r,phi))
print("Exemplo 9.6")
#4cos(wt + 30) = 4[30]
#5sen(wt + 20) = 5cos(wt + 70) = 5[-110]
x = 4*np.cos(30*np.pi/180) + 5*np.cos(-110*np.pi/180)
y = 4*np.sin(30*np.pi/180) + 5*np.sin(-110*np.pi/180)
print("i1 + i2: {} + j{}".format(x,y))
r = np.sqrt(x**2 + y**2)
phi = np.arctan(y/x)*180/np.pi
print("I: {}[{}]".format(r,phi))
print("i(t): {}cos(wt + {})".format(r,phi))
print("Problema Prático 9.6")
#-10sen(wt - 30) = 10sen(wt + 150) = 10sen(wt + 60) = 10[60]
#20cos(wt + 45) = 20[45]
x = 10*np.cos(60*np.pi/180) + 20*np.cos(45*np.pi/180)
y = 10*np.sin(60*np.pi/180) + 20*np.sin(45*np.pi/180)
print("v1 + v2: {} + j{}".format(x,y))
r = np.sqrt(x**2 + y**2)
phi = np.arctan(y/x)*180/np.pi
print("V: {}[{}]".format(r,phi))
print("v(t): {}cos(wt + {})".format(r,phi))
print("Exemplo 9.7")
#4I + 8I/jw - 3jwI = 50[75]
#4I -4jI - 6jI = 50[75]
#I = 50[75] / (4 - j10)
r = np.sqrt(4**2 + (-10)**2)
phi = np.arctan((-10)/4)*180/np.pi
R = 50/r
Phi = 75 - phi
print("Fasor I: {}[{}]".format(R,Phi))
print("i(t) = {}cos(wt + {}º)".format(R,Phi))
print("Problema Prático 9.7")
#2Vjw + 5V + 10v/jw = 50[-30]
#5V -2jV + 10jV = 50[-30]
#V = 50[-30] / (5 + j8)
r = np.sqrt(5**2 + 8**2)
phi = np.arctan(8/5)*180/np.pi
R = 50/r
Phi = -30 - phi
print("Fasor I: {}[{}]".format(R,Phi))
print("i(t) = {}cos(wt + {}º)".format(R,Phi))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problema Prático 9.1
Step2: Exemplo 9.2
Step3: Problema Prático 9.2
Step4: Fasores
Step5: Problema Prático 9.4
Step6: Exemplo 9.5
Step7: Problema Prático 9.5
Step8: Exemplo 9.6
Step9: Problema Prático 9.6
Step10: Exemplo 9.7
Step11: Problema prático 9.7
|
6,966
|
<ASSISTANT_TASK:>
Python Code:
import datetime
import json
import os
import time
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
import scipy.sparse
import seaborn as sns
sns.set(context="paper", font_scale=1.5, rc={"lines.linewidth": 2}, font='DejaVu Serif')
DATA_DIR = '/hdd2/dawen/data/ml-20m/'
def timestamp_to_date(timestamp):
return datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
raw_data = pd.read_csv(os.path.join(DATA_DIR, 'ratings.csv'), header=0)
# binarize the data (only keep ratings >= 4)
raw_data = raw_data[raw_data['rating'] > 3.5]
# sort the raw data accorindg to timestamp
raw_data = raw_data.sort_index(by=['timestamp'])
raw_data
tstamp = np.array(raw_data['timestamp'])
print("Time span of the dataset: From %s to %s" %
(timestamp_to_date(np.min(tstamp)), timestamp_to_date(np.max(tstamp))))
# apparently the timestamps are ordered, check to make sure
for i in xrange(tstamp.size - 1):
if tstamp[i] > tstamp[i + 1]:
print("not ordered")
plt.hist(tstamp, bins=50)
xticks = np.linspace(tstamp[0], tstamp[-1], 10)
plt.xticks(xticks, map(lambda x: timestamp_to_date(x)[:7], xticks), rotation=90)
pass
start_t = time.mktime(datetime.datetime.strptime("1995-01-01", "%Y-%m-%d").timetuple())
raw_data = raw_data[raw_data['timestamp'] >= start_t]
tr_vd_raw_data = raw_data[:int(0.8 * raw_data.shape[0])]
def get_count(tp, id):
playcount_groupbyid = tp[[id]].groupby(id, as_index=False)
count = playcount_groupbyid.size()
return count
def filter_triplets(tp, min_uc=5, min_sc=0):
# Only keep the triplets for songs which were listened to by at least min_sc users.
if min_sc > 0:
songcount = get_count(tp, 'movieId')
tp = tp[tp['movieId'].isin(songcount.index[songcount >= min_sc])]
# Only keep the triplets for users who listened to at least min_uc songs
# After doing this, some of the songs will have less than min_uc users, but should only be a small proportion
if min_uc > 0:
usercount = get_count(tp, 'userId')
tp = tp[tp['userId'].isin(usercount.index[usercount >= min_uc])]
# Update both usercount and songcount after filtering
usercount, songcount = get_count(tp, 'userId'), get_count(tp, 'movieId')
return tp, usercount, songcount
tr_vd_raw_data, user_activity, item_popularity = filter_triplets(tr_vd_raw_data)
sparsity = 1. * tr_vd_raw_data.shape[0] / (user_activity.shape[0] * item_popularity.shape[0])
print("After filtering, there are %d watching events from %d users and %d movies (sparsity: %.3f%%)" %
(tr_vd_raw_data.shape[0], user_activity.shape[0], item_popularity.shape[0], sparsity * 100))
unique_uid = user_activity.index
unique_sid = item_popularity.index
song2id = dict((sid, i) for (i, sid) in enumerate(unique_sid))
user2id = dict((uid, i) for (i, uid) in enumerate(unique_uid))
with open(os.path.join(DATA_DIR, 'pro', 'unique_uid.txt'), 'w') as f:
for uid in unique_uid:
f.write('%s\n' % uid)
with open(os.path.join(DATA_DIR, 'pro', 'unique_sid.txt'), 'w') as f:
for sid in unique_sid:
f.write('%s\n' % sid)
np.random.seed(13579)
n_ratings = tr_vd_raw_data.shape[0]
vad = np.random.choice(n_ratings, size=int(0.125 * n_ratings), replace=False)
vad_idx = np.zeros(n_ratings, dtype=bool)
vad_idx[vad] = True
vad_raw_data = tr_vd_raw_data[vad_idx]
train_raw_data = tr_vd_raw_data[~vad_idx]
print "There are total of %d unique users in the training set and %d unique users in the entire dataset" % \
(len(pd.unique(train_raw_data['userId'])), len(unique_uid))
print "There are total of %d unique items in the training set and %d unique items in the entire dataset" % \
(len(pd.unique(train_raw_data['movieId'])), len(unique_sid))
train_sid = set(pd.unique(train_raw_data['movieId']))
left_sid = list()
for i, sid in enumerate(unique_sid):
if sid not in train_sid:
left_sid.append(sid)
move_idx = vad_raw_data['movieId'].isin(left_sid)
train_raw_data = train_raw_data.append(vad_raw_data[move_idx])
vad_raw_data = vad_raw_data[~move_idx]
print "There are total of %d unique items in the training set and %d unique items in the entire dataset" % \
(len(pd.unique(train_raw_data['movieId'])), len(unique_sid))
test_raw_data = raw_data[int(0.8 * len(raw_data)):]
test_raw_data = test_raw_data[test_raw_data['movieId'].isin(unique_sid)]
test_raw_data = test_raw_data[test_raw_data['userId'].isin(unique_uid)]
print len(train_raw_data), len(vad_raw_data), len(test_raw_data)
train_timestamp = np.asarray(tr_vd_raw_data['timestamp'])
print("train: from %s to %s" % (timestamp_to_date(train_timestamp[0]),
timestamp_to_date(train_timestamp[-1])))
test_timestamp = np.asarray(test_raw_data['timestamp'])
print("test: from %s to %s" % (timestamp_to_date(test_timestamp[0]),
timestamp_to_date(test_timestamp[-1])))
def numerize(tp):
uid = map(lambda x: user2id[x], tp['userId'])
sid = map(lambda x: song2id[x], tp['movieId'])
tp['uid'] = uid
tp['sid'] = sid
return tp[['timestamp', 'uid', 'sid']]
train_data = numerize(train_raw_data)
train_data.to_csv(os.path.join(DATA_DIR, 'pro', 'train.csv'), index=False)
vad_data = numerize(vad_raw_data)
vad_data.to_csv(os.path.join(DATA_DIR, 'pro', 'validation.csv'), index=False)
test_data = numerize(test_raw_data)
test_data.to_csv(os.path.join(DATA_DIR, 'pro', 'test.csv'), index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Confirmed the timestamps are ordered
Step2: Now we select the data from 1995-01-01 to the last day as the dataset (i.e., all the dataset)
Step3: Take the first 80% of the data as train and validation set
Step4: Split 12.5% (10% of the total ratings) as validation set
Step5: Make sure there is no empty users/items
Step6: For test data, only keep the users and items that appear in the training/validation sets
Step7: Basic data information
Step8: Numerize the data into (timestamp, user_index, item_index) format
|
6,967
|
<ASSISTANT_TASK:>
Python Code:
def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
len(reviews)
reviews[0]
labels[0]
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
from collections import Counter
import numpy as np
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
positive_counts.most_common()
pos_neg_ratios = Counter()
for term,cnt in list(total_counts.most_common()):
if(cnt > 100):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
for word,ratio in pos_neg_ratios.most_common():
if(ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png')
vocab = set(total_counts.keys())
vocab_size = len(vocab)
print(vocab_size)
list(vocab)
import numpy as np
layer_0 = np.zeros((1,vocab_size))
layer_0
from IPython.display import Image
Image(filename='sentiment_network.png')
word2index = {}
for i,word in enumerate(vocab):
word2index[word] = i
word2index
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
def get_target_for_label(label):
if(label == 'POSITIVE'):
return 1
else:
return 0
labels[0]
get_target_for_label(labels[0])
labels[1]
get_target_for_label(labels[1])
import time
import sys
import numpy as np
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):
# set our random number generator
np.random.seed(1)
self.pre_process_data(reviews, labels)
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
if(word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] += 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
self.update_input_layer(review)
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# TODO: Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# TODO: Update the weights
self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
if(np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
if(layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
# evaluate our model before training (just to show how horrible it is)
mlp.test(reviews[-1000:],labels[-1000:])
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
from IPython.display import Image
Image(filename='sentiment_network.png')
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
review_counter = Counter()
for word in reviews[0].split(" "):
review_counter[word] += 1
review_counter.most_common()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lesson
Step2: Project 1
Step3: Transforming Text into Numbers
Step4: Project 2
Step5: Project 3
Step6: Understanding Neural Noise
|
6,968
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import glob
import os
import numpy as np
from time import time
import logging
import gensim
import bz2
import re
from stop_words import get_stop_words
def getTopicForQuery (question,stoplist,dictionary,lda):
Returns the topic probability distribution for a given input question, filtering with the stoplist
and finding the matches in the dictionary of words we have from our topic modelling algorithm.
@param question : The string from which we want to extract the topic
@param stoplist : The list of common words for the language, that we want to exclude
@param dictionary : The dictionary of all the words we find for a given lda model (associated to lda)
@param lda : the model of lda (Latent Dirichlet Allocation) that we want to model the topics from.
@return the topic probability distribution for the given question
# 1. Question -> Lower case -> Split -> Exclude common words
temp = question.lower()
words = re.findall(r'\w+', temp, flags = re.UNICODE | re.LOCALE)
important_words = []
important_words = filter(lambda x: x not in stoplist, words)
# 2. Find matches in the dictionary of words and get the topics
ques_vec = []
ques_vec = dictionary.doc2bow(important_words)
return ldamodel.get_document_topics(ques_vec,minimum_probability=0)
stop_words_de = get_stop_words('de')
with open ("../datas/stop_dictionaries/French_stop_words_changed.txt", "r") as myfile:
stop_words=myfile.read()
stop_words = stop_words.split(',')
stop_words = stop_words_de+stop_words
ldamodel = gensim.models.LdaModel.load('../datas/lda/ldamodelFR.model', mmap='r')
dataset_tmp = []
path = '../datas/scrap/Voting'
allFiles = glob.glob(os.path.join(path, 'Session*.csv'))
for file_ in allFiles:
print(file_)
data_tmp = pd.read_csv(file_)
dataset_tmp += [data_tmp]
data_frame = pd.concat(dataset_tmp)
parl = data_frame.ParlGroupCode.unique().tolist()
#for group in parl :
# data_frame.loc[data_frame.ParlGroupCode==group,'ParlGroupCode']= parl.index(group)
data_frame.head()
votation_frame = data_frame#[['BillTitle','BusinessTitle','FirstName','LastName','Decision','ParlGroupCode','VoteEnd']]
votation_frame = votation_frame.fillna(value='')
votation_frame['text'] = votation_frame['BillTitle']+' '+votation_frame['BusinessTitle']
text_dict = {'text': votation_frame.text.unique()}
topic_learning_frame = pd.DataFrame(text_dict)
topic_learning_frame.head()
def insert_topic(data_frame) :
dict_ = {}
dict_['text'] =data_frame['text'].values
with open ("../datas/lda/topics.txt", "r") as myfile:
s=myfile.read()
topics = s.split('\n')
topics_dic = {}
for topic in topics :
if len(topic)>1 :
name = topic.split(':')
topics_dic[name[0]] = name[1]
dictionary = gensim.corpora.Dictionary.load('../datas/lda/ldaDictionaryFR.dict')
for index, text in zip(data_frame.index,data_frame['text'].values) :
if index%1000 == 0 :
print(index)
for topic in getTopicForQuery(text,stop_words,dictionary,ldamodel) :
if (topics_dic[str(topic[0])]) in dict_ :
dict_[topics_dic[str(topic[0])]] +=[topic[1]]
else :
dict_[topics_dic[str(topic[0])]] =[topic[1]]
return dict_
if not os.path.exists("../datas/nlp_results"):
os.makedirs("../datas/nlp_results")
from yandex_translate import YandexTranslate
translate = YandexTranslate('trnsl.1.1.20161208T132730Z.fe490b34d7db4e4f.0a4c7781a0273d520073a1550b6a6624c1c3fd0a')
text_eng = []
for text in topic_learning_frame.text:
s = translate.translate(text, 'fr-en')
text_eng += [s['text'][0]]
topics_dict = insert_topic(topic_learning_frame)
topics_frame = pd.DataFrame(topics_dict)
#topics_frame['text_eng'] = text_eng
topics_frame.head(5)
#topics_frame.to_csv('../datas/nlp_results/voting_with_topics_unique.csv',index=False)
(pd.merge(votation_frame,topics_frame)).to_csv('../datas/nlp_results/voting_with_topics.csv',index=False)
def insert_topic_unique(data_frame) :
dict_ = {}
dict_['text'] =data_frame['text'].values
dict_['Topic'] = []
with open ("../datas/lda/topics.txt", "r") as myfile:
s=myfile.read()
topics = s.split('\n')
topics_dic = {}
for topic in topics :
if len(topic)>1 :
name = topic.split(':')
topics_dic[name[0]] = name[1]
dictionary = gensim.corpora.Dictionary.load('../datas/lda/ldaDictionary')
for index, text in zip(data_frame.index,data_frame['text'].values) :
if index%1000 == 0 :
print(index)
max_ = 0
for topic in getTopicForQuery(text,stop_words,dictionary,ldamodel) :
max_old = max_
max_ = max(max_,topic[1])
if max_old != max_ :
topic_id = topic[0]
dict_['Topic'] += [topics_dic[str(topic_id)]]
return dict_
topics_s = insert_topic_unique(topic_learning_frame)
topics_ss = pd.DataFrame(topics_s)
topics_ss.head()
topics_ss.to_csv("../datas/nlp_results/voting_single_topic.csv")
data_complete = (pd.merge(votation_frame,topics_ss))
data_complete.head(2)
data_complete.VoteEnd = [x[:10] for x in data_complete.VoteEnd]
data_complete.head(2)
not_used = ['Unnamed: 0','BusinessNumber','BusinessShortNumber','CantonID','ID','IdLegislativePeriod',
'IdSession','IdVote','PersonNumber','RegistrationNumber','BillTitle','BusinessTitle','DecisionText',
'Language','MeaningNo','MeaningYes','ParlGroupColour','ParlGroupCode','ParlGroupNameAbbreviation',
'Subject','text']
data_used = data_complete.drop(not_used ,axis=1)
#data_used.head()
data_mean = data_used.set_index(['VoteEnd','Topic','FirstName'])
data_mean.head(1000)
data_used.to_csv("../datas/nlp_results/voting_single_topic_not_unique.csv")
for year in data_mean.index.get_level_values('VoteEnd').unique() :
for Topic in data_mean.loc[year].index.get_level_values('Topic').unique() :
print("the {0} the subject : {1} had a result of {2}".format(
year,Topic,data_mean.loc[year,Topic].Decision.mean()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 1. initialisation of function for topic determination
Step3: Now we load the lda model we use along with the stop words, in order to have them available for the time we will use them, avoiding to reload them every time we call getTopicForQuery. We also load our lda model for once.
Step4: 2. Creation of the Voting DataFrame
Step5: 2.1 slection of interesting fields in data_frame
Step6: We create a smaller DataFrame which contains only the subjects that are voted, we do not repeat the text each time for each person who votes. Hence we will perform the NLP once on each unique entry.
Step7: 3 Topic Clustering
Step8: We now create a frame using the topics found using insert_topic
Step9: Finally merging the topics with the original frame containing the name and decision of parlementeer ?
|
6,969
|
<ASSISTANT_TASK:>
Python Code:
def zero_args():
# code goes here
pass
def one_arg(a):
# code goes here
pass
def two_args(a, b):
# code goes here
pass
def optional_arg(a, b=0): # <--- please note, optional arguments are listed LAST
# code goes here
pass
def two_options(a=True, b=False):
# code goes here
pass
# To refreash your memory, calling these functions look something like:
zero_args()
one_arg(10)
two_args(10, 20)
optional_arg(10)
optional_arg(10, b=20)
two_options()
two_options(a=True)
from math import sqrt # <-- importing the square root function from the math module.
def pythagoras (a, b):
c2 = a** 2 + b ** 2
return sqrt(c2) # note, this line would be math.sqrt(c2) if we had written 'import math' instead of 'from math...'
# Lets call it!
round(pythagoras(10, 23), 3)
def is_prime(num):
Returns True if number is prime, False otherwise
if num <= 1:
return False # numbers <= 1 are not prime
# check for factors
for i in range(2,num): # for loop that iterates 2-to-num. Each number in the iteration is called "i"
if (num % i) == 0: # modular arithmetic; this asks if num is divisible by i (with no remainder).
return False
return True
a = 10
def change_a():
a = 20
change_a()
print(a)
a = 10
print(id(a))
def change_a():
a = 20
print(id(a))
change_a()
def variables():
x = 10
y = 10
print(x + y) # <--- note the error message; NameError
def variables():
x = 10
y = 10
print(x + y)
return x, y
a, b = variables() # this line maps x,y to a,b .
print(a + b)
a = [i for i in range(10)]
b = [i for i in range(20)]
c = [i for i in "abcde"]
print(a, b, c, sep="\n")
print(i)
# YOUR CODE HERE!
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple stuff right? Okay lets move on and look at the body and return statement. I’m going to create a function that calculates the hypotenuse of a triangle when given sides 'a' and 'b' (i.e. the Pythagorean Theorem).
Step3: So our pythagoras function takes the arguments 'a' and 'b'. The second line calculates 'c2' and once we have that we return the square root of 'c2'.
Step4: To give you a quick run down this code works out if a number, 'n', is prime by seeing if there is a 'q' that is a divisor of n. where
Step5: We print A, and number we get is 10. Why isn't it 20? We called the 'change a' function and the function quite clearly assigned A to 20. what gives?
Step6: Basically, the "A" inside the function is separate from the "A" outside of it. Here's another example
Step7: So Python is telling us X is not defined and that’s not a bug, X and Y are defined within the scope of the function, but not defined within the main program (e.g the place where we are calling it). There are a few fixes; we could move the print statement into the function by indenting it. Or, we could 'save' the function variables for use in the main program. For example
Step8: Why does Python do this?
Step9: In short, self-contained blocks of code (e.g. functions) have their own 'namespace', and that means if a variable is defined inside that block then that variable is said to be defined locally.
|
6,970
|
<ASSISTANT_TASK:>
Python Code:
# egrep.py
import sys, re
# sys.argv is the list of command-line arguments
# sys.argv[0] is the name of the program itself
# sys.argv[1] will be the regex specified at the command line
regex = sys.argv[1]
# for every line passed into the script
for line in sys.stdin:
# if it matches the regex, write it to stdout
if re.search(regex, line):
sys.stdout.write(line)
# line_count.py
import sys
count = 0
for line in sys.stdin:
count += 1
# print goes to sys.stdout
print(count)
!type SomeFile.txt | python egrep.py "[0-9]" | python line_count.py
# 'r' means read-only
file_for_reading = open('reading_file.txt', 'r')
# 'w' is write—will destroy the file if it already exists!
file_for_writing = open('writing_file.txt', 'w')
# 'a' is append—for adding to the end of the file
file_for_appending = open('appending_file.txt', 'a')
# don't forget to close your files when you're done
file_for_writing.close()
with open('SomeFile.txt', 'r') as f:
for line in f:
print(line.strip())
# After with block, file is closed
import csv
with open('stocks.csv', 'r') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
date = row[0]
symbol = row[1]
closing_price = float(row[2])
print(date, symbol, closing_price)
with open('stocks-headers.csv', 'r') as f:
reader = csv.DictReader(f, delimiter=':')
for row in reader:
date = row['date']
symbol = row['symbol']
closing_price = float(row['closing_price'])
print(date, symbol, closing_price)
some_html =
<html>
<head>
<title>A web page</title>
</head>
<body>
<p id="author">Joel Grus</p>
<p id="subject" class="important">Data Science</p>
</body>
</html>
from bs4 import BeautifulSoup
import requests
html = requests.get('http://www.example.com').text
html = some_html
soup = BeautifulSoup(html, 'html5lib')
first_paragraph = soup.find('p')
first_paragraph
soup.p.text, soup.p.text.split()
soup.p['id']
soup.p.get('id')
soup.find_all('p')
[p for p in soup('p') if p.get('id')]
soup('p', {'class' : 'important'})
soup('p', 'important')
[p for p in soup('p') if 'important' in p.get('class', [])]
import json
json_string = { "title" : "Data Science Book",
"author" : "Joel Grus",
"publicationYear" : 2014,
"topics" : [ "data", "science", "data science"] }
# parse the JSON into a Python Dictionary
dict = json.loads(json_string)
if 'data science' in dict['topics']:
print(dict)
endpoint = 'https://api.github.com/users/joelgrus/repos'
repos = json.loads(requests.get(endpoint).text)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading Files
Step2: Use a with block to ensure that files are closed
Step3: Delimited Files
Step5: HTML And The Parsing Thereof
Step7: Using APIs
Step8: Using An Unauthenticated API
|
6,971
|
<ASSISTANT_TASK:>
Python Code:
!ls
import sha
# Our first commit
data1 = 'This is the start of my paper2.'
meta1 = 'date: 1/1/12'
hash1 = sha.sha(data1 + meta1).hexdigest()
print('Hash:', hash1)
# Our second commit, linked to the first
data2 = 'Some more text in my paper...'
meta2 = 'date: 1/2/12'
# Note we add the parent hash here!
hash2 = sha.sha(data2 + meta2 + hash1).hexdigest()
print('Hash:', hash2)
%%bash
git config --global user.name "John Doe"
git config --global user.email "johndoe@uw.edu"
%%bash
# Put here your preferred editor. If this is not set, git will honor
# the $EDITOR environment variable
git config --global core.editor /usr/bin/nano # my preferred editor
# On Windows Notepad will do in a pinch,
# I recommend Notepad++ as a free alternative
# On the mac, you can set nano or emacs as a basic option
%%bash
# And while we're at it, we also turn on the use of color, which is very useful
git config --global color.ui "auto"
%%bash
git config --global credential.helper cache
# Set the cache to timeout after 2 hours (setting is in seconds)
git config --global credential.helper 'cache --timeout=7200'
!cat ~/.gitconfig
!git
%%bash
rm -rf test
git init test
%%bash
cd test
ls
%%bash
cd test
ls -la
%%bash
cd test
ls -l .git
%%bash
cd test
echo "My first bit of text" > file1.txt
%%bash
cd test
ls -al
%%bash
cd test
git add file1.txt
%%bash
cd test
git status
%%bash
cd test
git commit -a -m "This is our first commit"
%%bash
cd test
git log
%%bash
cd test
echo "And now some more text..." >> file1.txt
%%bash
cd test
git diff
%%bash
cd test
git commit -a -m "I have made great progress on this critical matter."
%%bash
cd test
git log
%%bash
cd test
git log --oneline --topo-order --graph
%%bash
cd test
# We create our alias (this saves it in git's permanent configuration file):
git config --global alias.slog "log --oneline --topo-order --graph"
# And now we can use it
git slog
%%bash
cd test
git mv file1.txt file-newname.txt
git status
%%bash
cd test
git commit -a -m"I like this new name better"
echo "Let's look at the log again:"
git slog
%%bash
cd test
git status
ls
%%bash
cd test
git branch experiment
git checkout experiment
%%bash
cd test
echo "Some crazy idea" > experiment.txt
git add experiment.txt
git commit -a -m"Trying something new"
git slog
%%bash
cd test
git checkout master
git slog
%%bash
cd test
echo "All the while, more work goes on in master..." >> file-newname.txt
git commit -a -m"The mainline keeps moving"
git slog
%%bash
cd test
ls
%%bash
cd test
git merge experiment
git slog
%%bash
cd test
ls
echo "Let's see if we have any remote repositories here:"
git remote -v
%%bash
cd test
git remote add origin https://github.com/jakevdp/test.git
%%bash
cd test
git remote -v
%%bash
cd test
git push origin master
%%bash
# Here I clone my 'test' repo but with a different name, test2, to simulate a 2nd computer
git clone https://github.com/jakevdp/test.git test2
cd test2
pwd
git remote -v
%%bash
cd test2 # working on computer #2
echo "More new content on my experiment" >> experiment.txt
git commit -a -m"More work, on machine #2"
%%bash
cd test2
git push origin master
%%bash
cd test
git pull origin master
%%bash
cd test
git branch trouble
git checkout trouble
echo "This is going to be a problem..." >> experiment.txt
git commit -a -m"Changes in the trouble branch"
%%bash
cd test
git checkout master
echo "More work on the master branch..." >> experiment.txt
git commit -a -m"Mainline work"
%%bash
cd test
git merge trouble
%%bash
cd test
cat experiment.txt
%%bash
cd test
cat experiment.txt
%%bash
cd test
git commit -a -m"Completed merge of trouble, fixing conflicts along the way"
git slog
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A repository
Step2: And this is pretty much the essence of Git!
Step3: Other settings
Step4: Password memory
Step5: Github offers in its help pages instructions on how to configure the credentials helper for Mac OSX and Windows.
Step6: Stage 1
Step7: git init
Step8: Note
Step9: Now let's edit our first file in the test directory with a text editor... I'm doing it programatically here for automation purposes, but you'd normally be editing by hand
Step10: git add
Step11: We can now ask git about what happened with status
Step12: git commit
Step13: In the commit above, we used the -m flag to specify a message at the command line.
Step14: git diff
Step15: And now we can ask git what is different
Step16: The cycle of git virtue
Step17: git log revisited
Step18: Sometimes it's handy to see a very summarized version of the log
Step19: Defining an alias
Step20: git mv and rm
Step21: Note that these changes must be committed too, to become permanent! In git's world, until something hasn't been committed, it isn't permanently recorded anywhere.
Step22: And git rm works in a similar fashion.
Step23: We are now going to try two different routes of development
Step24: 3. Using remotes as a single user
Step25: Since the above cell didn't produce any output after the git remote -v call, it means we have no remote repositories configured.
Step26: Let's see the remote situation again
Step27: Pushing changes to a remote repository
Step28: We can now see this repository publicly on github.
Step29: Let's now make some changes in one 'computer' and synchronize them on the second.
Step30: Now we put this new work up on the github server so it's available from the internet
Step31: Now let's fetch that work from machine #1
Step32: An important aside
Step33: And now we go back to the master branch, where we change the same file
Step34: The conflict...
Step35: Let's see what git has put into our file
Step36: At this point, we go into the file with a text editor, decide which changes to keep, and make a new commit that records our decision. I've now made the edits, in this case I decided that both pieces of text were useful, but integrated them with some changes
Step37: Let's then make our new commit
|
6,972
|
<ASSISTANT_TASK:>
Python Code:
# Load regex package
import re
# Create a variable containing a text string
text = 'The quick brown fox jumped over the lazy brown bear.'
# Find any of fox, snake, or bear
re.findall(r'\b(fox|snake|bear)\b', text)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create some text
Step2: Apply regex
|
6,973
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
df = pd.read_csv('output/embedded_1k_reviews.csv')
df['text-similarity-babbage-001'] = df.babbage_similarity.apply(eval).apply(np.array)
matrix = np.vstack(df.babbage_similarity.values)
matrix.shape
from sklearn.cluster import KMeans
n_clusters = 4
kmeans = KMeans(n_clusters = n_clusters,init='k-means++',random_state=42)
kmeans.fit(matrix)
labels = kmeans.labels_
df['Cluster'] = labels
df.groupby('Cluster').Score.mean().sort_values()
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
tsne = TSNE(n_components=2, perplexity=15, random_state=42, init='random', learning_rate=200)
vis_dims2 = tsne.fit_transform(matrix)
x = [x for x,y in vis_dims2]
y = [y for x,y in vis_dims2]
for category, color in enumerate(['purple', 'green', 'red', 'blue']):
xs = np.array(x)[df.Cluster==category]
ys = np.array(y)[df.Cluster==category]
plt.scatter(xs, ys, color=color, alpha=0.3)
avg_x = xs.mean()
avg_y = ys.mean()
plt.scatter(avg_x, avg_y, marker='x', color=color, s=100)
plt.title("Clusters identified visualized in language 2d using t-SNE")
import openai
# Reading a review which belong to each group.
rev_per_cluster = 3
for i in range(n_clusters):
print(f"Cluster {i} Theme:", end=" ")
reviews = "\n".join(df[df.Cluster == i].combined.str.replace("Title: ", "").str.replace("\n\nContent: ", ": ").sample(rev_per_cluster, random_state=42).values)
response = openai.Completion.create(
engine="davinci-instruct-beta-v3",
prompt=f"What do the following customer reviews have in common?\n\nCustomer reviews:\n\"\"\"\n{reviews}\n\"\"\"\n\nTheme:",
temperature=0,
max_tokens=64,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
print(response["choices"][0]["text"].replace('\n',''))
sample_cluster_rows = df[df.Cluster == i].sample(rev_per_cluster, random_state=42)
for j in range(rev_per_cluster):
print(sample_cluster_rows.Score.values[j], end=", ")
print(sample_cluster_rows.Summary.values[j], end=": ")
print(sample_cluster_rows.Text.str[:70].values[j])
print("-" * 100)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Find the clusters using K-means
Step2: It looks like cluster 2 focused on negative reviews, while cluster 0 and 1 focused on positive reviews.
Step3: Visualization of clusters in a 2d projection. The red cluster clearly represents negative reviews. The blue cluster seems quite different from the others. Let's see a few samples from each cluster.
|
6,974
|
<ASSISTANT_TASK:>
Python Code:
WORKING_DIR = u"/path/to/folder/to/music"
FILENAME_PREFIX = u"filename_without_ext"
FILENAME_EXTENSION = u"wav"
OUTPUT_PATTERN = u"/path/to/your/music/<%(prefix)s >%(album)s< (%(suffix)s)>/<<%(discnumber)s->%(tracknumber)s >%(title)s.flac"
PICTURE = u"Folder.jpg"
ANSI_ENCODING = "gbk"
FILES_TO_COPY = ["Artworks.tar"]
DELETE_TARGET_DIR = False # If clean the target folder at first
INPUT_EXTRAINFO = u"%s.ini" % FILENAME_PREFIX
INPUT_CUE = u"%s.cue" % FILENAME_PREFIX
INPUT_AUDIO = u"%s.%s" % (FILENAME_PREFIX, FILENAME_EXTENSION)
import sys
sys.path.append(u"/path/to/your/GatesMusicPet/")
from music_pet.meta import *
from music_pet.utils import *
from music_pet.audio import FLAC, init_flacs
import subprocess
import os, sys
cd $WORKING_DIR
global_report = []
NOT_PARSED = 1
NO_TRACK = 2
albumList = parse_cue(INPUT_CUE, encoding="U8")
extraMetas = parse_ini(INPUT_EXTRAINFO)
for album in albumList.values():
for extraMeta in extraMetas:
album.update_all_tracks(extraMeta)
albumList.fix_album_names()
flacs = []
for album in albumList.values():
flacs = init_flacs(album, OUTPUT_PATTERN)
for flac in flacs:
flac.set_input_file(u"%s/%s" % (
WORKING_DIR, filename_safe(flac.get_tag(u"original_file"))))
flac.set_next_start_time_from_album(album)
flac.cover_picture = PICTURE
for l in album.detail():
print(l)
commands = []
tmpified_files = {}
for flac in flacs:
b_is_wav = flac.get_tag(u"@input_fullpath").endswith(u".wav")
b_tempified = flac.get_tag(u"@input_fullpath") in tmpified_files
if not b_is_wav and not b_tempified:
commands.append(flac.command_build_tempwav(memoize=tmpified_files))
commands.append(flac.command())
commands.append(command_copy_to([PICTURE] + FILES_TO_COPY, parent_folder(flac.get_tag(u"@output_fullpath"))))
if not b_is_wav and not b_tempified:
commands.append(flac.command_clear_tempwav())
flac.create_target_dir()
for cmd in commands:
print(cmd)
print(u"")
cd $WORKING_DIR
for cmd in commands:
print(u"Executing:\n%s\n\n" % cmd)
try:
p = subprocess.check_output(cmd,
shell=True,
)
except subprocess.CalledProcessError as ex:
p = u"Process received an error! code=%s, output=%s" % (ex.returncode, ex.output)
global_report.append((3, u"Process Error, code=%s" % ex.returncode, cmd))
print(p)
print(u"\n\n")
for error in global_report:
print(u"%s\n%s\n\n" % (error[1], error[2]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Filename
Step2: Output Prefix
Step3: Others
Step4: Parse CUE
Step5: Covert Files
|
6,975
|
<ASSISTANT_TASK:>
Python Code:
import numpy
N = 30 # number of points along each axis
X = numpy.linspace(-2, 2, N) # computes a 1D-array for x
Y = numpy.linspace(-2, 2, N) # computes a 1D-array for y
x, y = numpy.meshgrid(X, Y) # generates a mesh grid
from matplotlib import pyplot
%matplotlib inline
pyplot.scatter(x, y)
pyplot.xlabel('x')
pyplot.ylabel('y')
# velocity component functions
def get_u( x, y, S, gamma ):
return gamma/(2*numpy.pi)*(numpy.arctan((x-S)/y)-numpy.arctan((x+S)/y))
def get_v( x, y, S, gamma ):
return gamma/(4*numpy.pi)*(numpy.log(((x+S)**2+y**2)/((x-S)**2+y**2)))
def plot_uv(u,v):
pyplot.figure(figsize=(8,11)) # set size
pyplot.xlabel('x', fontsize=16) # label x
pyplot.ylabel('y', fontsize=16) # label y
m = numpy.sqrt(u**2+v**2) # compute velocity magnitude
velocity = pyplot.contourf(x, y, m, vmin=0) # plot magnitude contours
cbar = pyplot.colorbar(velocity, orientation='horizontal')
cbar.set_label('Velocity magnitude', fontsize=16);
pyplot.quiver(x, y, u, v) # plot vector field
# pyplot.streamplot(x, y, u, v) # plots streamlines - this is slow!
# compute the velocity
gamma = -4 # sheet strength
S = 1 # sheet extents
u = get_u(x,y,S,gamma)
v = get_v(x,y,S,gamma)
# plot it
plot_uv(u,v)
pyplot.plot([-min(S,2),min(S,2)],[0,0],'k-',lw=2) # draw the vortex sheet
alpha = numpy.pi/10 # free-stream angle
U_inf = numpy.cos(alpha) # free-stream in x
V_inf = numpy.sin(alpha) # free-stream in y
# superimpose to get velocity
gamma = -4 # sheet strength
S = 0.5 # sheet extents
u = U_inf+get_u(x,y,S,gamma)
v = V_inf+get_v(x,y,S,gamma)
# plot it
plot_uv(u,v)
pyplot.plot([-min(S,2),min(S,2)],[0,0],'k-',lw=2) # draw the vortex sheet
# vortex panel class
class Panel:
# save the inputs and pre-compute factors for the coordinate tranform
def __init__( self, x0, y0, x1, y1, gamma ):
self.x,self.y,self.gamma = [x0,x1],[y0,y1],gamma
self.xc = 0.5*(x0+x1) # panel x-center
self.yc = 0.5*(y0+y1) # panel y-center
self.S = numpy.sqrt( # ...
(x1-self.xc)**2+(y1-self.yc)**2) # panel width
self.sx = (x1-self.xc)/self.S # unit vector in x
self.sy = (y1-self.yc)/self.S # unit vector in y
# get the velocity!
def velocity( self, x, y ):
gamma = self.gamma
xp,yp = self.transform_xy( x, y ) # transform
up = get_u( xp, yp, self.S, gamma ) # get u prime
vp = get_v( xp, yp, self.S, gamma ) # get v prime
return self.rotate_uv( up, vp ) # rotate back
# plot the panel
def plot(self):
return pyplot.plot(self.x,self.y,'k-',lw=2)
# transform from global to panel coordinates
def transform_xy( self, x, y ):
xt = x-self.xc # shift x
yt = y-self.yc # shift y
xp = xt*self.sx+yt*self.sy # rotate x
yp = yt*self.sx-xt*self.sy # rotate y
return [ xp, yp ]
# rotate velocity back to global coordinates
def rotate_uv( self, up, vp):
u = up*self.sx-vp*self.sy # reverse rotate u prime
v = vp*self.sx+up*self.sy # reverse rotate v prime
return [ u, v ]
# define panel
my_panel = Panel(x0=-0.7,y0=0.5,x1=0.5,y1=-0.4,gamma=-2)
# compute velocity on grid
u,v = my_panel.velocity(x,y)
# plot it
plot_uv(u,v) # plot the flow on the grid
my_panel.plot() # plot the panel
# your code here
from IPython.core.display import HTML
def css_styling():
styles = open('../styles/custom.css', 'r').read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lets visualize the grid to see what we made. We need to import pyplot which has a large set of plotting functions similar to matlab, such as a scatter plot.
Step2: As expected, a grid of equally space points.
Step3: Not the prettiest equations, but nothing numpy can't handle.
Step4: Now we can compute the velocity on the grid and plot it
Step5: Quiz 1
Step6: The dark blue circle is a stagnation point, ie the fluid has stopped, ie $u=v=0$.
Step7: Now we can define a general panel and compute its velocity.
Step8: Quiz 5
Step9: Ignore the line below - it just loads the style sheet.
|
6,976
|
<ASSISTANT_TASK:>
Python Code:
# Author: Olaf Hauk <olaf.hauk@mrc-cbu.cam.ac.uk>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
from mne.minimum_norm.resolution_matrix import make_inverse_resolution_matrix
from mne.minimum_norm.spatial_resolution import resolution_metrics
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects/'
fname_fwd_emeg = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
fname_evo = data_path + '/MEG/sample/sample_audvis-ave.fif'
# read forward solution with EEG and MEG
forward_emeg = mne.read_forward_solution(fname_fwd_emeg)
# forward operator with fixed source orientations
forward_emeg = mne.convert_forward_solution(forward_emeg, surf_ori=True,
force_fixed=True)
# create a forward solution with MEG only
forward_meg = mne.pick_types_forward(forward_emeg, meg=True, eeg=False)
# noise covariance matrix
noise_cov = mne.read_cov(fname_cov)
# evoked data for info
evoked = mne.read_evokeds(fname_evo, 0)
# make inverse operator from forward solution for MEG and EEGMEG
inv_emeg = mne.minimum_norm.make_inverse_operator(
info=evoked.info, forward=forward_emeg, noise_cov=noise_cov, loose=0.,
depth=None)
inv_meg = mne.minimum_norm.make_inverse_operator(
info=evoked.info, forward=forward_meg, noise_cov=noise_cov, loose=0.,
depth=None)
# regularisation parameter
snr = 3.0
lambda2 = 1.0 / snr ** 2
rm_emeg = make_inverse_resolution_matrix(forward_emeg, inv_emeg,
method='MNE', lambda2=lambda2)
ple_psf_emeg = resolution_metrics(rm_emeg, inv_emeg['src'],
function='psf', metric='peak_err')
sd_psf_emeg = resolution_metrics(rm_emeg, inv_emeg['src'],
function='psf', metric='sd_ext')
del rm_emeg
rm_meg = make_inverse_resolution_matrix(forward_meg, inv_meg,
method='MNE', lambda2=lambda2)
ple_psf_meg = resolution_metrics(rm_meg, inv_meg['src'],
function='psf', metric='peak_err')
sd_psf_meg = resolution_metrics(rm_meg, inv_meg['src'],
function='psf', metric='sd_ext')
del rm_meg
brain_ple_emeg = ple_psf_emeg.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=1,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_ple_emeg.add_text(0.1, 0.9, 'PLE PSF EMEG', 'title', font_size=16)
brain_ple_meg = ple_psf_meg.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=2,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_ple_meg.add_text(0.1, 0.9, 'PLE PSF MEG', 'title', font_size=16)
# Subtract the two distributions and plot this difference
diff_ple = ple_psf_emeg - ple_psf_meg
brain_ple_diff = diff_ple.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=3,
clim=dict(kind='value', pos_lims=(0., .5, 1.)),
smoothing_steps=20)
brain_ple_diff.add_text(0.1, 0.9, 'PLE EMEG-MEG', 'title', font_size=16)
brain_sd_emeg = sd_psf_emeg.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=4,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_sd_emeg.add_text(0.1, 0.9, 'SD PSF EMEG', 'title', font_size=16)
brain_sd_meg = sd_psf_meg.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=5,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_sd_meg.add_text(0.1, 0.9, 'SD PSF MEG', 'title', font_size=16)
# Subtract the two distributions and plot this difference
diff_sd = sd_psf_emeg - sd_psf_meg
brain_sd_diff = diff_sd.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=6,
clim=dict(kind='value', pos_lims=(0., .5, 1.)),
smoothing_steps=20)
brain_sd_diff.add_text(0.1, 0.9, 'SD EMEG-MEG', 'title', font_size=16)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: EEGMEG
Step2: MEG
Step3: Visualization
Step4: These plots show that with respect to peak localization error, adding EEG to
|
6,977
|
<ASSISTANT_TASK:>
Python Code:
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
################## build a softmax regression model
# input data
x = tf.placeholder(tf.float32, shape = [None, 784])
# real label
y_ = tf.placeholder(tf.float32, shape=[None, 10])
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev = 0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding = 'SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
#train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g"% accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Build a Multilayer Convolutional Network
Step2: Convolution and Pooling
Step3: First Convolutional Layer
Step4: To apply the layer, we first reshape x to a 4d tensor, with the second and third dimensions corresponding to
Step5: We then convolve x_image with the weight tensor, add the bias, apply the ReLU function, and finally max pool. The max_pool_2x2 method will reduce the image size to 14x14.
Step6: Second Convolutional Layer
Step7: Densely Connected Layer
Step8: Dropout
Step9: Readout Layer
Step10: Train and Evaluate the Model
|
6,978
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Joan Massich <mailsik@gmail.com>
#
# License: BSD Style.
import os.path as op
import mne
from mne.datasets import eegbci
from mne.datasets import fetch_fsaverage
# Download fsaverage files
fs_dir = fetch_fsaverage(verbose=True)
subjects_dir = op.dirname(fs_dir)
# The files live in:
subject = 'fsaverage'
trans = 'fsaverage' # MNE has a built-in fsaverage transformation
src = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif')
bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')
raw_fname, = eegbci.load_data(subject=1, runs=[6])
raw = mne.io.read_raw_edf(raw_fname, preload=True)
# Clean channel names to be able to use a standard 1005 montage
new_names = dict(
(ch_name,
ch_name.rstrip('.').upper().replace('Z', 'z').replace('FP', 'Fp'))
for ch_name in raw.ch_names)
raw.rename_channels(new_names)
# Read and set the EEG electrode locations
montage = mne.channels.make_standard_montage('standard_1005')
raw.set_montage(montage)
raw.set_eeg_reference(projection=True) # needed for inverse modeling
# Check that the locations of EEG electrodes is correct with respect to MRI
mne.viz.plot_alignment(
raw.info, src=src, eeg=['original', 'projected'], trans=trans,
show_axes=True, mri_fiducials=True, dig='fiducials')
fwd = mne.make_forward_solution(raw.info, trans=trans, src=src,
bem=bem, eeg=True, mindist=5.0, n_jobs=1)
print(fwd)
# Use fwd to compute the sensitivity map for illustration purposes
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
brain = eeg_map.plot(time_label='EEG sensitivity', subjects_dir=subjects_dir,
clim=dict(lims=[5, 50, 100]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the data
Step2: Setup source space and compute forward
|
6,979
|
<ASSISTANT_TASK:>
Python Code:
from libtools import *
training = pd.read_csv('data-test.csv')
training.head()
training.describe()
training = training.fillna(-99999)
blind = pd.read_csv('blind.csv')
blind.head()
blind.describe()
training_SH = divisao_sh(training)
training_LM = divisao_lm(training)
blind_SH = divisao_sh(blind)
blind_LM = divisao_lm(blind)
training_SH.head()
training_LM.head()
blind_SH.head()
blind_LM.head()
X_SH = training_SH.drop(['Facies'],axis=1)
y_SH = training_SH['Facies']
X_LM = training_LM.drop(['Facies'],axis=1)
y_LM = training_LM['Facies']
X_SH_blind = blind_SH.drop(['Facies'],axis=1)
y_SH_blind = blind_SH['Facies']
X_LM_blind = blind_LM.drop(['Facies'],axis=1)
y_LM_blind = blind_LM['Facies']
from sklearn.model_selection import train_test_split
X_train_SH, X_test_SH, y_train_SH, y_test_SH = train_test_split(X_SH, y_SH, test_size=0.1)
X_train_LM, X_test_LM, y_train_LM, y_test_LM = train_test_split(X_LM, y_LM, test_size=0.1)
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import classification_report,confusion_matrix
ETC_SH = ExtraTreesClassifier(n_estimators=500, bootstrap=True)
ETC_LM = ExtraTreesClassifier(n_estimators=500)
ETC_SH.fit(X_train_SH, y_train_SH)
ETC_LM.fit(X_train_LM, y_train_LM)
pred_SH = ETC_SH.predict(X_test_SH)
print(confusion_matrix(y_test_SH,pred_SH))
print(classification_report(y_test_SH,pred_SH))
pred_LM = ETC_LM.predict(X_test_LM)
print(confusion_matrix(y_test_LM,pred_LM))
print(classification_report(y_test_LM,pred_LM))
blind_pred_SH = ETC_SH.predict(X_SH_blind)
print(confusion_matrix(y_SH_blind, blind_pred_SH))
print(classification_report(y_SH_blind, blind_pred_SH))
blind_pred_LM = ETC_LM.predict(X_LM_blind)
print(confusion_matrix(y_LM_blind, blind_pred_LM))
print(classification_report(y_LM_blind, blind_pred_LM))
blind_pred_SH = pd.DataFrame(blind_pred_SH, index=X_SH_blind.index)
blind_pred_LM = pd.DataFrame(blind_pred_LM, index=X_LM_blind.index)
pred_blind = pd.concat([blind_pred_SH,blind_pred_LM])
pred_blind = pred_blind.sort_index()
y_blind = blind['Facies']
print(confusion_matrix(y_blind, pred_blind))
print(classification_report(y_blind, pred_blind))
training_data = pd.read_csv('training.csv')
training_data.head()
training_data.describe()
training_data_SH = divisao_sh(training_data)
training_data_LM = divisao_lm(training_data)
training_data_SH.describe()
training_data_LM.describe()
X_SH = training_data_SH.drop(['Facies'],axis=1)
y_SH = training_data_SH['Facies']
X_LM = training_data_LM.drop(['Facies'],axis=1)
y_LM = training_data_LM['Facies']
X_SH.describe()
X_LM.describe()
from sklearn.model_selection import train_test_split
X_train_SH, X_test_SH, y_train_SH, y_test_SH = train_test_split(X_SH, y_SH, test_size=0.1)
X_train_LM, X_test_LM, y_train_LM, y_test_LM = train_test_split(X_LM, y_LM, test_size=0.1)
ETC_SH = ExtraTreesClassifier(n_estimators=500, bootstrap=True)
ETC_LM = ExtraTreesClassifier(n_estimators=500)
ETC_SH.fit(X_train_SH, y_train_SH)
ETC_LM.fit(X_train_LM, y_train_LM)
pred_SH = ETC_SH.predict(X_test_SH)
print(confusion_matrix(y_test_SH,pred_SH))
print(classification_report(y_test_SH,pred_SH))
pred_LM = ETC_LM.predict(X_test_LM)
print(confusion_matrix(y_test_LM,pred_LM))
print(classification_report(y_test_LM,pred_LM))
validation = pd.read_csv('validation_data_nofacies.csv')
validation.head()
validation.describe()
validation['Label_Form_SH_LM'] = validation.Formation.apply((label_two_groups_formation))
validation.head()
validation_SH = divisao_sh(validation)
validation_LM = divisao_lm(validation)
validation_SH.head()
validation_LM.head()
X_val_SH = validation_SH.drop(['Formation','Well Name','Depth','NM_M'], axis=1)
X_val_LM = validation_LM.drop(['Formation','Well Name','Depth','NM_M'], axis=1)
X_val_SH.head()
X_val_LM.head()
pred_val_SH = ETC_SH.predict(X_val_SH)
pred_val_LM =ETC_LM.predict(X_val_LM)
pred_val_SH = pd.DataFrame(pred_val_SH, index=X_val_SH.index)
pred_val_LM = pd.DataFrame(pred_val_LM, index=X_val_LM.index)
pred_val = pd.concat([pred_val_SH,pred_val_LM])
pred_val = pred_val.sort_index()
pred_val.describe()
validation['Facies Pred'] = pred_val
validation=validation.drop(['Label_Form_SH_LM'],axis=1)
validation.head()
validation.to_csv('Prediction.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading the data training data without Shankle well
Step2: Loading the SHANKLE well
Step3: Using the complete training data
Step4: Applying ExtraTreeClassifier
Step5: Making the division between SH and LM
Step6: Removing the colums
|
6,980
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
from os import path as op
import mne
from mne.preprocessing import maxwell_filter
print(__doc__)
data_path = op.join(mne.datasets.misc.data_path(verbose=True), 'movement')
pos = mne.chpi.read_head_pos(op.join(data_path, 'simulated_quats.pos'))
raw = mne.io.read_raw_fif(op.join(data_path, 'simulated_movement_raw.fif'))
raw_stat = mne.io.read_raw_fif(op.join(data_path,
'simulated_stationary_raw.fif'))
mne.viz.plot_head_positions(pos, mode='traces')
# extract our resulting events
events = mne.find_events(raw, stim_channel='STI 014')
events[:, 2] = 1
raw.plot(events=events)
topo_kwargs = dict(times=[0, 0.1, 0.2], ch_type='mag', vmin=-500, vmax=500)
# 0. Take average of stationary data (bilateral auditory patterns)
evoked_stat = mne.Epochs(raw_stat, events, 1, -0.2, 0.8).average()
evoked_stat.plot_topomap(title='Stationary', **topo_kwargs)
# 1. Take a naive average (smears activity)
evoked = mne.Epochs(raw, events, 1, -0.2, 0.8).average()
evoked.plot_topomap(title='Moving: naive average', **topo_kwargs)
# 2. Use raw movement compensation (restores pattern)
raw_sss = maxwell_filter(raw, head_pos=pos)
evoked_raw_mc = mne.Epochs(raw_sss, events, 1, -0.2, 0.8).average()
evoked_raw_mc.plot_topomap(title='Moving: movement compensated', **topo_kwargs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualize the "subject" head movements (traces)
Step2: Process our simulated raw data (taking into account head movements)
|
6,981
|
<ASSISTANT_TASK:>
Python Code:
# install Pint if necessary
try:
import pint
except ImportError:
!pip install pint
# download modsim.py if necessary
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://raw.githubusercontent.com/AllenDowney/' +
'ModSim/main/modsim.py')
# import functions from modsim
from modsim import *
download('https://raw.githubusercontent.com/AllenDowney/' +
'ModSim/main/data/glucose_insulin.csv')
data = pd.read_csv('glucose_insulin.csv', index_col='time');
I0 = 360
k = 0.25
gamma = 0.004
G_T = 80
params = I0, k, gamma, G_T
# Solution
def make_system(params, data):
I0, k, gamma, G_T = params
init = State(I=I0)
t_0 = data.index[0]
t_end = data.index[-1]
G=interpolate(data.glucose)
system = System(I0=I0, k=k, gamma=gamma, G_T=G_T, G=G,
init=init, t_0=t_0, t_end=t_end, dt=1)
return system
# Solution
system = make_system(params, data)
# Solution
def slope_func(t, state, system):
I = state
k, gamma = system.k, system.gamma
G, G_T = system.G, system.G_T
dIdt = -k * I + gamma * (G(t) - G_T) * t
return dIdt
# Solution
slope_func(system.t_0, system.init, system)
# Solution
results, details = run_solve_ivp(system, slope_func,
t_eval=data.index)
details.message
# Solution
results.tail()
# Solution
data.insulin.plot(style='o', color='C2',
alpha=0.5, label='insulin data')
results.I.plot(style='-', color='C2', label='simulation')
decorate(xlabel='Time (min)',
ylabel='Concentration ($\mu$U/mL)')
# Solution
def error_func(params, data):
Computes an array of errors to be minimized.
params: sequence of parameters
actual: array of values to be matched
returns: array of errors
print(params)
# make a System with the given parameters
system = make_system(params, data)
# solve the ODE
results, details = run_solve_ivp(system, slope_func,
t_eval=data.index)
# compute the difference between the model
# results and actual data
errors = results.I - data.insulin
return errors.iloc[2:]
# Solution
error_func(params, data)
# Solution
best_params, details = leastsq(error_func, params, data)
# Solution
details.mesg
# Solution
system2 = make_system(best_params, data)
# Solution
results2, details = run_solve_ivp(system2, slope_func,
t_eval=data.index)
details.message
# Solution
data.insulin.plot(style='o', color='C2',
alpha=0.5, label='insulin data')
results2.I.plot(style='-', color='C2', label='simulation')
decorate(xlabel='Time (min)',
ylabel='Concentration ($\mu$U/mL)')
# Solution
I0, k, gamma, G_T = best_params
# Solution
I_max = data.insulin.max()
Ib = data.insulin[0]
I_max, Ib
# Solution
# The value of G0 is the best estimate from the glucose model
G0 = 272
Gb = data.glucose[0]
G0, Gb
# Solution
phi_1 = (I_max - Ib) / k / (G0 - Gb)
phi_1
# Solution
phi_2 = gamma * 1e4
phi_2
phi_1_interval = 2, 4
phi_1_interval
phi_2_interval = 20, 35
phi_2_interval
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The following cells download and read the data.
Step2: In Chapter 17 I present the glucose minimal model; in Chapter 18 we implemented it using run_simulation and run_solve_ivp.
Step3: Exercise
Step4: Exercise
Step6: Exercise
Step7: Exercise
Step8: Exercise
Step9: According to Pacini and Bergman, here are the normal ranges for these quantities.
|
6,982
|
<ASSISTANT_TASK:>
Python Code:
assert duplicates((1, 1, 2, 3, 4, 5, 6, 8, 2, 4, -7, 12, -7)) == (1, 2, 4, -7)
assert duplicates([1, 1, 2, 3, 4, 5, "asd", 8, "asd", 4, -7, 12, -7]) == (1, 2, 4, "asd", -7)
assert square_collection([1, 2, 3, 4, 5, 6]) == [1, 4, 9, 16, 25, 36]
a = [12, 1, 2, 3, 4, 7, 8, 10]
b = [1, 12, 33, 4, 7, 9, 10]
assert intersection(a, b) == [12, 1, 4, 7, 10]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Napisz generator liczb pseudolosowych z czestotliwosciami 0,25 dla zakresu 1-50 i 0,75 dla zakresu 51-100.
Step2: Zaimplementuj linked liste w pythonie wykorzystujac namedtuples
|
6,983
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib as mpl
from ase.io import read
from pyqstem.util import atoms_plot
from pyqstem import PyQSTEM
from ase.build import mx2
mpl.rc('font',**{'size' : 13})
atoms=mx2(formula='MoS2', kind='2H', a=3.18, thickness=3.19, size=(2, 2, 1), vacuum=2)
cell=atoms.get_cell()
cell[1,0]=0
atoms.set_cell(cell)
atoms.wrap() # wrap atoms outside the unit cell
atoms.center() # center the atoms in the unit cell
atoms*=(3,3,1)
scan_range=[[cell[0,0],2*cell[0,0],30],
[cell[1,1],2*cell[1,1],30]]
fig,ax=plt.subplots(figsize=(7,5))
atoms_plot(atoms,scan_range=scan_range,ax=ax,legend=True)
qstem = PyQSTEM('STEM')
qstem.set_atoms(atoms)
resolution = (0.02,0.02) # resolution in x and y-direction [Angstrom]
samples = (300,300) # samples in x and y-direction
defocus = -50 # defocus [Angstrom]
v0 = 300 # acceleration voltage [keV]
alpha = 20 # convergence angle [mrad]
astigmatism = 40 # astigmatism magnitude [Angstrom]
astigmatism_angle = 100 # astigmatism angle [deg.]
aberrations = {'a33': 3000, 'phi33': 120} # higher order aberrations [Angstrom] or [deg.]
qstem.build_probe(v0,alpha,(300,300),resolution=(0.02,0.02),defocus=defocus,astig_mag=astigmatism,
astig_angle=astigmatism_angle,aberrations=aberrations)
wave=qstem.get_wave()
wave.view(cmap='inferno')
qstem.build_potential(5,scan_range=scan_range)
potential=qstem.get_potential_or_transfunc()
fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,6))
qstem.view(ax=ax1)
potential.view(ax=ax2,cmap='inferno',method='real')
detector1_radii=(70,200) # inner and outer radius of detector 1
detector2_radii=(0,70) # inner and outer radius of detector 2
qstem.add_detector('detector1',detector1_radii)
qstem.add_detector('detector2',detector2_radii)
qstem.run()
img1=np.array(qstem.read_detector('detector1'))
img2=np.array(qstem.read_detector('detector2'))
img1=np.tile(img1,(2,2))
img2=np.tile(img2,(2,2))
extent=[0,scan_range[0][1]*3-scan_range[0][0],0,scan_range[1][1]*3-scan_range[1][0]]
fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,6))
ims1=ax1.imshow(img1.T,extent=extent,interpolation='nearest',cmap='gray')
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(ims1, cax=cax1)
ax1.set_xlabel('x [Angstrom]')
ax1.set_ylabel('y [Angstrom]')
ims2=ax2.imshow(img2.T,extent=extent,interpolation='nearest',cmap='gray')
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(ims2, cax=cax2)
ax2.set_xlabel('x [Angstrom]')
ax2.set_ylabel('y [Angstrom]')
plt.tight_layout()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We create an orthorhombic unit cell of MoS2. The unit cell is repeated 3x3 times, in order to accomodate the size of the probe at all scan positions. We set a scan range that covers the central unit cell.
Step2: We create a QSTEM object in STEM mode and set the atomic object.
Step3: We build a (very bad) probe. Building the probe will also determine the resolution of the potential, when we build it.
Step4: The potential is build and imported to python.
Step5: We can view the extent of the potential using the .view() method of the PyQSTEM object. When the potential is build in this way, it is made to cover exactly the maximum probe extent.
Step6: We add a couple of detectors and run qstem.
Step7: After running we can extract the results from the detectors.
|
6,984
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from mpl_toolkits.mplot3d import Axes3D
from numpy.linalg import eigvals
def Lorenz(state,t,sigma,r,b):
'''
Returns the RHS of the Lorenz equations
'''
# unpack the state vector
x = state[0]
y = state[1]
z = state[2]
# compute state derivatives
xd = sigma * (y-x)
yd = (r-z)*x - y
zd = x*y - b*z
# return the state derivatives
return [xd, yd, zd]
def SolveLorenz(state0,t,sigma=10.,r=28.,b=8./3.0):
'''
use ODEINT to integrate the lorenz equations from initial condition state0 at t=0 for
the range of times given in the numpy array t
'''
Lorenz_p = lambda state,t: Lorenz(state,t,sigma,r,b)
state = odeint(Lorenz_p, state0, t)
return state
def PlotLorenzXvT(state,t,sigma,r,b):
'''
make time series plots of solutions of the Lorenz equations X(t),Y(t),Z(t)
'''
plt.figure()
ax = plt.subplot(111)
X = state[:,0]
Y = state[:,1]
Z = state[:,2]
ax.plot(t,X,'r',label='X')
ax.hold(True)
ax.plot(t,Y,'g',label='Y')
ax.plot(t,Z,'b',label='Z')
ax.set_xlabel('time t')
plt.title('Lorenz Equations: $\sigma=${}, $r=${}, $b=${}'.format(sigma,r,b))
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),ncol=3)
plt.show()
def PlotLorenz3D(state,sigma,r,b):
'''
Show 3-D Phase portrait using mplot3D
'''
# do some fancy 3D plotting
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(state[:,0],state[:,1],state[:,2])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.title('Lorenz Equations: $\sigma=${}, $r=${}, $b=${}'.format(sigma,r,b))
plt.show()
# Set the parameters
sigma= 10.
b = 8./3
# set the initial condition
X0 = [2.0, 3.0, 4.0]
# set the time for integration
t = np.arange(0.0, 30.0, 0.01)
# set the Rayleigh number
r = 0.5
# solve the Equations
state = SolveLorenz(X0,t,sigma,r,b)
# and Visualize as a time series
PlotLorenzXvT(state,t,sigma,r,b)
# and as a 3-D phase portrait
PlotLorenz3D(state,sigma,r,b)
# set the Rayleigh number
r = 10.0
X0 = [2.,3.,4.]
state = SolveLorenz(X0,t,sigma,r,b)
PlotLorenzXvT(state,t,sigma,r,b)
PlotLorenz3D(state,sigma,r,b)
# now change the initial condition so X=-2
X0 = [-2.0, -3.0, 4.0]
state = SolveLorenz(X0,t,sigma,r,b)
PlotLorenzXvT(state,t,sigma,r,b)
PlotLorenz3D(state,sigma,r,b)
# set the Rayleigh number
r = 28.0
X0 = [2.,3.,4.]
state = SolveLorenz(X0,t,sigma,r,b)
PlotLorenzXvT(state,t,sigma,r,b)
PlotLorenz3D(state,sigma,r,b)
# set the Rayleigh number
r = 350
X0 = [2.,3.,4.]
t = np.arange(0,8.,.0001)
state = SolveLorenz(X0,t,sigma,r,b)
PlotLorenzXvT(state,t,sigma,r,b)
PlotLorenz3D(state,sigma,r,b)
sigma = 10
b = 8./3.
r_H = sigma*(sigma+b+3)/(sigma-b -1.) # critical value of r at Hopf bifurcation
r_max = 28.
ra = np.linspace(1,28.,20)
xstar = lambda r: np.sqrt(b*(r-1))
J = lambda r: np.array([[-sigma,sigma,0],[1,-1,-xstar(r)],[xstar(r),xstar(r),-b]])
# plot out the eigenvalues
import matplotlib.cm as cm
cmap = cm.get_cmap('coolwarm')
fig = plt.figure()
for r in ra:
L = eigvals(J(r))
plt.plot(np.real(L),np.imag(L),'o',color=cmap((r-min(ra))/(max(ra)-min(ra))))
plt.hold(True)
# plot out eigenvalues at the Hopf Bifurcation
L = eigvals(J(r_H))
plt.plot(np.real(L),np.imag(L),'sy')
plt.xlabel('Re$(\lambda)$')
plt.ylabel('Im$(\lambda)$')
plt.title('Eigenvalues of $C^+$ for $r\in[1,{}]$, $r_H={}$'.format(max(ra),r_H))
plt.grid()
plt.show()
# start by running the Lorenz system long enough to get on the attractor
r = 28.0
X0 = [1.,0.,0.]
t = np.arange(0,20,.01)
state = SolveLorenz(X0,t,sigma,r,b)
# extract the final state and perturb it by a small amount epsilon
X0 = state[-1]
epsilon=1.e-6
X1 = X0 + epsilon*np.random.rand(3)
delta_0 = np.sqrt(np.sum((X1-X0)**2))
# Now run both initial conditions
t=np.arange(0.,50.,.0001)
state0 = SolveLorenz(X0,t,sigma,r,b)
state1 = SolveLorenz(X1,t,sigma,r,b)
# Compare the two trajectories as time-series X
plt.figure()
ax = plt.subplot(111)
ax.plot(t,state0[:,0],'r',t,state1[:,0],'b')
plt.xlabel('t')
plt.ylabel('X(t)')
plt.show()
# and in the phase space
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(state0[:,0],state0[:,1],state0[:,2],'r-')
plt.hold(True)
ax.plot(state1[:,0],state1[:,1],state1[:,2],'b-')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.title('Lorenz Equations: $\sigma=${}, $r=${}, $b=${}'.format(sigma,r,b))
plt.show()
# calculate the distance between the two trajectories
delta = state1-state0
delta = np.sqrt(np.sum(delta**2,1))
# and plot them
plt.figure()
plt.semilogy(t,delta)
plt.xlabel('t')
plt.ylabel('$||\delta(t)||$')
plt.grid()
# now fit the first part with a straight line to determine the slope
# we'll pick the line between tmin and tmax to avoid initial transients and later saturation
tmin = 1.
tmax = 12.
imin = int(np.argwhere(t<tmin)[-1])
imax = int(np.argwhere(t>tmax)[0])
tfit = t[imin:imax]
p= np.polyfit(tfit,np.log(delta[imin:imax]),1)
plt.hold(True)
plt.semilogy(tfit,np.exp(p[1]+p[0]*tfit),'r')
plt.title('Liapunov Exponent Estimate $\lambda={}$'.format(p[0]))
plt.show()
Y = state0[:,1]
Z = state0[:,2]
plt.figure()
plt.subplot(2,1,1)
ihalf = int(len(Y)/2.)
print ihalf,len(Y)
plt.plot(Y[:ihalf],Z[:ihalf])
plt.xlabel('Y')
plt.ylabel('Z')
plt.title('Lorenz system, $Y,Z$ plane: $r={}$, $\sigma={}$,$b={}$'.format(r,sigma,b))
plt.grid()
plt.subplot(2,1,2)
plt.plot(t,Z)
plt.xlabel('$t$')
plt.ylabel('$Z(t)$')
plt.title('$Z$ time series')
plt.show()
# first let's estimate the centered derivatve of Z to isolate the extrema
dZ = np.zeros(Z.shape)
dZ[1:-2] = Z[2:-1] - Z[0:-3]
dZ.shape
plt.figure()
plt.plot(t,dZ,t,np.zeros(t.shape),'k:')
plt.ylabel('$dZ$')
plt.xlabel('t')
# now let's find all all intervals that contain zero crossings
icross = np.nonzero(dZ[:-2]*dZ[1:-1] <= 0)
Zextreme = Z[icross]
# and pick out all Extremes greater than mean(Z)
meanZ = np.mean(Z)
Zn = Zextreme[Zextreme > meanZ]
# now plot the Lorenz map Z_{n+1} vs Z{n}
plt.figure()
plt.plot(Zn[:-2],Zn[1:-1],'bo')
xlim = plt.gca().get_xlim()
plt.hold(True)
plt.plot(xlim,xlim,'k')
plt.xlabel('$Z_n$')
plt.ylabel('$Z_{n+1}$')
plt.title('Lorenz map: $r={}$, $\sigma={}$, $b={}$'.format(r,sigma,b))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exploring the Lorenz Equations
Step2: Subcritical behavior $r<1$
Step3: Damped Oscillation $r=10$
Step4: Chaos and the strange attractor $r=28$
Step5: Limit Cycle at large Rayleigh number
Step6: Stability of Fixed Points
Step7: Liapunov Exponents
Step8: Estimating the Liapunov exponent
Step9: Calculating the Lorenz Map
Step10: The Lorenz Map
|
6,985
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
# personal packages
from xlearn.cluster import KMeans
from pyingrid import Ingrid
import geoxarray
%matplotlib inline
ig = Ingrid('http://iridl.ldeo.columbia.edu',
'SOURCES/.NOAA/.NCEP-NCAR/.CDAS-1/.MONTHLY',
'.Intrinsic/.PressureLevel/.phi')\
.do('P(500)VALUES') \
.do('T(Dec 1979)(Mar 2010)RANGE')\
.do('yearly-anomalies') \
.do('T(Dec)(Jan)(Feb)(Mar)VALUES')\
.do('X -90 2 60 GRID Y 80 -2 20 GRID')
print(ig)
print('\n[Final data URL]:\n', ig.to_url())
ds = ig.to_dataset()
print(ds)
da = ds.sel(P=500).phi.load()
print(da.name, da.dims)
print(da.coords)
m = KMeans(n_clusters=4, random_state=0, n_jobs=-1).fit(da)
print(m)
fig, axes = plt.subplots(2,2, figsize=(8,8))
regimes = ['NAO$^-$', 'NAO$^+$', 'Blocking', 'Atlantic Ridge']
tags = list('abcd')
for i in range(m.n_clusters):
m.plot_cluster_centers(label=i,
proj='ortho',
plot_type='contourf+',
levels=np.arange(-110, 111, 20),
units='m',
ax=axes.flat[i])
title = '{}, {}'.format(regimes[i],
axes.flat[i].title.get_text())
plt.title(title)
plt.text(0, 1, tags[i],
transform=axes.flat[i].transAxes,
va='bottom',
fontsize=plt.rcParams['font.size']*2,
fontweight='bold')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The data is from the Columbia University IRI data library
Step2: Convert the data from the library into an xarray Dataset
Step3: Get the DataArray and load it into memory
Step4: Perform the k-means clustering
Step5: Show the results
|
6,986
|
<ASSISTANT_TASK:>
Python Code:
import pods, GPy, itertools
%matplotlib inline
from matplotlib import pyplot as plt
s = pods.datasets.singlecell()
Ydf = s['Y']
Y = Ydf.values
labels = s['labels']
marker = '<>^vsd'
Ydf.describe()
import numpy as np
# obtain a centred version of data.
centredY = Y - Y.mean()
# compute inner product matrix
C = np.dot(centredY,centredY.T)
# perform eigendecomposition
V, U = np.linalg.eig(C)
# sort eigenvalues and vectors according to size
ind = V.argsort()
ev = V[ind[::-1]]
U = U[:, ind[::-1]]
def legend_to_the_right(ax):
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height])
_ = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), numpoints=1)
def plot_latent(ax, x, y, marker, labels):
GPy.plotting.Tango.reset()
# make sure labels are in order of input:
ulabels = []
for lab in labels:
if not lab in ulabels:
ulabels.append(lab)
for i, [label, m] in enumerate(zip(ulabels, itertools.cycle(marker))):
symbol = marker[i % len(marker)]
ind = labels == label
ax.plot(x[ind], y[ind], marker=symbol,
markerfacecolor=GPy.plotting.Tango.nextMedium(),
linestyle='',
label=label, mew=.2, alpha=1)
fig, [ax1, ax2] = plt.subplots(1,2,figsize=(13,4))
fracs = (ev.real/ev.real.sum())[:30] # Take only the first 30 eigenvalues
ax1.bar(np.arange(len(fracs)), fracs)
plot_latent(ax2, U[:,0], U[:,1], marker, labels)
legend_to_the_right(ax2)
m = GPy.models.GPLVM(Y, 2, kernel=GPy.kern.RBF(2, ARD=1)+GPy.kern.Bias(2))
m.optimize(messages=1)
fig, [ax1, ax2] = plt.subplots(1,2,figsize=(9,4))
m.kern.plot_ARD(ax=ax1)
m.plot_latent(labels=labels, marker=marker, legend=False, ax=ax2)
_ = legend_to_the_right(ax2)
fig, [ax1, ax2] = plt.subplots(1,2,figsize=(9,4))
m.plot_latent(labels=labels, marker=marker, legend=False, ax=ax1)
m.plot_magnification(labels=labels, marker=marker, legend=False, ax=ax2)
_ = legend_to_the_right(ax2)
m = GPy.models.BayesianGPLVM(Y, 5, num_inducing=15, kernel=GPy.kern.RBF(5,ARD=1)+GPy.kern.Bias(5))
m.optimize('bfgs', max_iters=5e3, messages=1)
fig, [ax1, ax2] = plt.subplots(1,2,figsize=(9,4))
m.kern.plot_ARD(ax=ax1)
m.plot_latent(labels=s['labels'], marker='<>^vsd', legend=False, ax=ax2)
legend_to_the_right(ax2)
n_neighbors = 40
import sklearn.manifold
m = sklearn.manifold.Isomap(n_neighbors=n_neighbors, n_components=2)
X = m.fit_transform(Ydf)
fig, ax = plt.subplots(1)
plot_latent(ax, X[:, 0], X[:, 1], marker, labels)
_ = legend_to_the_right(ax)
n_neighbors = 30
m = sklearn.manifold.LocallyLinearEmbedding(n_neighbors=n_neighbors, n_components=2)
X = m.fit_transform(Y)
fig, ax = plt.subplots(1)
plot_latent(ax, X[:, 0], X[:, 1], marker, labels)
_ = legend_to_the_right(ax)
m = sklearn.manifold.TSNE(n_components=2)
X = m.fit_transform(Y)
fig, ax = plt.subplots(1)
plot_latent(ax, X[:, 0], X[:, 1], marker, labels)
_ = legend_to_the_right(ax)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next we load in the data. We've provided a convenience function for loading in the data with GPy. It is loaded in as a pandas DataFrame. This allows us to summarize it with the describe attribute.
Step2: Principal Component Analysis
Step3: Here we will define a little routine, which puts the legend on the right side of the plot, so that the legend does not overwright the data plot
Step4: PCA Result
Step5: GP-LVM on the Data
Step6: Bayesian GP-LVM
Step7: This gives a really nice result. Broadly speaking two latent dimensions dominate the representation. When we visualize using these two dimensions we can see the entire cell phylogeny laid out nicely in the two dimensions. Additionally we can see the missclassification of the some cells, using the 'standard' approach of repeated k-means clustering and PCA on sub clustered (This was used to get the sample colors of the 64 cellstage).
Step8: Locally Linear Embedding
Step9: t-SNE (t-distributed Stochastic Neighbour Embedding)
|
6,987
|
<ASSISTANT_TASK:>
Python Code:
EUCLIDEAN = 'euclidean'
MANHATTAN = 'manhattan'
PEARSON = 'pearson'
def read_ratings_df():
date_parser = lambda time_in_secs: datetime.utcfromtimestamp(float(time_in_secs))
return pd.read_csv('ml-latest-small/ratings.csv', parse_dates=['timestamp'], date_parser=date_parser)
class MovieData(object):
def __init__(self):
self.ratings_df = read_ratings_df()
self.ratings = defaultdict(dict)
self.init_ratings()
def init_ratings(self):
for _, row in self.ratings_df.iterrows():
self.ratings[row['userId']][row['movieId']] = row
def get_movies(self, user_id):
return set(self.ratings[user_id].keys())
def get_unique_user_ids(self):
return self.ratings_df['userId'].unique()
def get_shared_ratings(self, user1_id, user2_id):
movies1 = self.get_movies(user1_id)
movies2 = self.get_movies(user2_id)
shared_movies = movies1 & movies2
ratings = {}
for movie_id in shared_movies:
ratings[movie_id] = (
self.ratings[user1_id][movie_id]['rating'],
self.ratings[user2_id][movie_id]['rating'],
)
return ratings
@staticmethod
def shared_ratings_to_np_arrays(shared_ratings):
return np.array(shared_ratings.values()).T
def get_euclidean_distance(self, user1_id, user2_id):
shared_ratings = self.get_shared_ratings(user1_id, user2_id)
if len(shared_ratings) == 0:
return 0
ratings = self.shared_ratings_to_np_arrays(shared_ratings)
ratings1 = ratings[0]
ratings2 = ratings[1]
sum_of_squares = np.power(ratings1 - ratings2, 2).sum()
return 1 / (1 + sqrt(sum_of_squares))
def get_manhattan_distance(self, user1_id, user2_id):
shared_ratings = self.get_shared_ratings(user1_id, user2_id)
if len(shared_ratings) == 0:
return 0
ratings = self.shared_ratings_to_np_arrays(shared_ratings)
ratings1 = ratings[0]
ratings2 = ratings[1]
manhattan_sum = np.abs(ratings1 - ratings2).sum()
return 1 / (1 + manhattan_sum)
def get_pearson_correlation(self, user1_id, user2_id):
shared_ratings = self.get_shared_ratings(user1_id, user2_id)
num_ratings = len(shared_ratings)
if num_ratings == 0:
return 0
ratings = self.shared_ratings_to_np_arrays(shared_ratings)
ratings1 = ratings[0]
ratings2 = ratings[1]
mean1 = ratings1.mean()
mean2 = ratings2.mean()
std1 = ratings1.std()
std2 = ratings2.std()
if std1 == 0 or std2 == 0:
return 0
std_scores_1 = (ratings1 - mean1) / std1
std_scores_2 = (ratings2 - mean2) / std2
# numerically stable calculation of the Pearson correlation coefficient
return abs((std_scores_1 * std_scores_2).sum() / (num_ratings - 1))
def get_similar_users(self, user_id, metric=EUCLIDEAN):
metrics = {
EUCLIDEAN: self.get_euclidean_distance,
MANHATTAN: self.get_manhattan_distance,
PEARSON: self.get_pearson_correlation,
}
distance_f = metrics[metric]
similar_users = {}
for similar_user_id in self.ratings:
if similar_user_id == user_id:
continue
distance = distance_f(user_id, similar_user_id)
if distance > 0:
similar_users[similar_user_id] = distance
return similar_users
def predict_score(self, user_id, movie_id):
similar_users = self.get_similar_users(user_id)
total_rating_sum = 0
similarity_sum = 0
for similar_user_id, similarity in similar_users.items():
user_ratings = self.ratings[similar_user_id]
if movie_id in user_ratings:
total_rating_sum += similarity * user_ratings[movie_id]['rating']
similarity_sum += similarity
if similarity_sum == 0:
return 0
return total_rating_sum / similarity_sum
movie_data = MovieData()
def explore_shared_ratings(movie_data):
unique_user_ids = movie_data.get_unique_user_ids()
n_pairs = 30
samples = np.random.choice(unique_user_ids, size=(n_pairs, 2))
for index, sample in enumerate(samples):
user1_id = sample[0]
user2_id = sample[1]
num_movies_1 = len(movie_data.get_movies(user1_id))
num_movies_2 = len(movie_data.get_movies(user2_id))
num_shared_ratings = len(movie_data.get_shared_ratings(user1_id, user2_id))
print 'pair %2d, user1 movies: %4d, user2 movies: %4d, shared movies: %3d' % (
index + 1, num_movies_1, num_movies_2, num_shared_ratings)
explore_shared_ratings(movie_data)
def explore_distances(movie_data):
unique_user_ids = movie_data.get_unique_user_ids()
n_pairs = 30
samples = np.random.choice(unique_user_ids, size=(n_pairs, 2))
for index, sample in enumerate(samples):
user1_id = sample[0]
user2_id = sample[1]
num_shared_ratings = len(movie_data.get_shared_ratings(user1_id, user2_id))
euclidean_distance = movie_data.get_euclidean_distance(user1_id, user2_id)
manhattan_distance = movie_data.get_manhattan_distance(user1_id, user2_id)
pearson_correlation = movie_data.get_pearson_correlation(user1_id, user2_id)
print 'pair %2d, shared movies: %3d, euclidean: %.3f, manhattan: %.3f, pearson: %.3f' % (
index + 1, num_shared_ratings, euclidean_distance, manhattan_distance, pearson_correlation)
explore_distances(movie_data)
def explore_similar_users(movie_data):
unique_user_ids = movie_data.get_unique_user_ids()
n_users = 30
user_ids = np.random.choice(unique_user_ids, size=n_users, replace=False)
for index, user_id in enumerate(user_ids):
similar_users = movie_data.get_similar_users(user_id)
distances = similar_users.values()
print 'user %3d, similar users: %d, max similarity: %.3f, mean: %.3f, std: %.3f' % (
index + 1, len(similar_users), np.max(distances), np.mean(distances), np.std(distances))
explore_similar_users(movie_data)
def explore_predict_score(movie_data):
ratings_df = movie_data.ratings_df
rating_indices = ratings_df.index
n_ratings = 30
sample = np.random.choice(rating_indices, size=n_ratings, replace=False)
for index, rating_index in enumerate(sample):
row = ratings_df.ix[rating_index]
user_id = row['userId']
movie_id = row['movieId']
rating = row['rating']
score = movie_data.predict_score(user_id, movie_id)
print 'rating %2d, rating: %.1f, predicted: %.3f' % (index + 1, rating, score)
explore_predict_score(movie_data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Explore shared ratings
Step2: We are looking at 30 random user pairs. We can notice how small on average is the intersection of the movies they rated (compared to the their total number of ratings).
Step3: Various distances (euclidean, manhattan, pearson correlation).
Step4: Max similarity of 1.0 in most cases is probably an intersection of one movie.
|
6,988
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy.spatial import distance
shape = (6, 6)
xs, ys = np.indices(shape)
xs = xs.reshape(shape[0] * shape[1], 1)
ys = ys.reshape(shape[0] * shape[1], 1)
X = np.hstack((xs, ys))
mid_x, mid_y = (shape[0]-1)/2.0, (shape[1]-1)/2.0
result = distance.cdist(X, np.atleast_2d([mid_x, mid_y]), 'minkowski', p=1).reshape(shape)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
6,989
|
<ASSISTANT_TASK:>
Python Code:
x = 1
y = 2
x + y
x
def add_numbers(x, y):
return x + y
add_numbers(x, y)
def add_numbers(x,y,z=None):
if (z==None):
return x+y
else:
return x+y+z
print(add_numbers(1, 2))
print(add_numbers(1, 2, 3))
def add_numbers(x, y, z=None, flag=False):
if (flag):
print('Flag is true!')
if (z==None):
return x + y
else:
return x + y + z
print(add_numbers(1, 2, flag=True))
def add_numbers(x,y):
return x+y
a = add_numbers
a(1,2)
type('This is a string')
type(None)
type(1)
type(1.0)
type(add_numbers)
x = (1, 'a', 2, 'b')
type(x)
x = [1, 'a', 2, 'b']
type(x)
x.append(3.3)
print(x)
for item in x:
print(item)
i=0
while( i != len(x) ):
print(x[i])
i = i + 1
[1,2] + [3,4]
[1]*3
1 in [1, 2, 3]
x = 'This is a string'
print(x[0]) #first character
print(x[0:1]) #first character, but we have explicitly set the end character
print(x[0:2]) #first two characters
print(x[::-1])
x[-1]
x[-4:-2]
x[:3]
x[3:]
firstname = 'Christopher'
lastname = 'Brooks'
print(firstname + ' ' + lastname)
print(firstname*3)
print('Chris' in firstname)
firstname = 'Christopher Arthur Hansen Brooks'.split(' ')[0] # [0] selects the first element of the list
lastname = 'Christopher Arthur Hansen Brooks'.split(' ')[-1] # [-1] selects the last element of the list
print(firstname)
print(lastname)
'Chris' + 2
'Chris' + str(2)
x = {'Christopher Brooks': 'brooksch@umich.edu', 'Bill Gates': 'billg@microsoft.com'}
x['Christopher Brooks'] # Retrieve a value by using the indexing operator
x['Kevyn Collins-Thompson'] = "Test Test"
x['Kevyn Collins-Thompson']
for name in x:
print(x[name])
for email in x.values():
print(email)
for name, email in x.items():
print(name)
print(email)
x = ('Christopher', 'Brooks', 'brooksch@umich.edu')
fname, lname, email = x
fname
lname
x = ('Christopher', 'Brooks', 'brooksch@umich.edu', 'Ann Arbor')
fname, lname, email, location = x
print("Chris" + 2)
print('Chris' + str(2))
sales_record = {
'price': 3.24,
'num_items': 4,
'person': 'Chris'}
sales_statement = '{} bought {} item(s) at a price of {} each for a total of {}'
print(sales_statement.format(sales_record['person'],
sales_record['num_items'],
sales_record['price'],
sales_record['num_items']*sales_record['price']))
import csv
import pandas as pd
# Nice, sets decimple point
%precision 2
with open('mpg.csv') as csvfile:
mpg = list(csv.DictReader(csvfile))
df = pd.read_csv('mpg.csv')
mpg[:3] # The first three dictionaries in our list.
df
len(mpg)
mpg[0].keys()
sum(float(d['cty']) for d in mpg) / len(mpg)
sum(float(d['hwy']) for d in mpg) / len(mpg)
# set returns unique values
cylinders = set(d['cyl'] for d in mpg)
cylinders
CtyMpgByCyl = []
for c in cylinders: # iterate over all the cylinder levels
summpg = 0
cyltypecount = 0
for d in mpg: # iterate over all dictionaries
if d['cyl'] == c: # if the cylinder level type matches,
summpg += float(d['cty']) # add the cty mpg
cyltypecount += 1 # increment the count
CtyMpgByCyl.append((c, summpg / cyltypecount)) # append the tuple ('cylinder', 'avg mpg')
CtyMpgByCyl.sort(key=lambda x: x[0])
CtyMpgByCyl
vehicleclass = set(d['class'] for d in mpg) # what are the class types
vehicleclass
HwyMpgByClass = []
for t in vehicleclass: # iterate over all the vehicle classes
summpg = 0
vclasscount = 0
for d in mpg: # iterate over all dictionaries
if d['class'] == t: # if the cylinder amount type matches,
summpg += float(d['hwy']) # add the hwy mpg
vclasscount += 1 # increment the count
HwyMpgByClass.append((t, summpg / vclasscount)) # append the tuple ('class', 'avg mpg')
HwyMpgByClass.sort(key=lambda x: x[1])
HwyMpgByClass
import datetime as dt
import time as tm
tm.time()
dtnow = dt.datetime.fromtimestamp(tm.time())
dtnow
dtnow.year, dtnow.month, dtnow.day, dtnow.hour, dtnow.minute, dtnow.second # get year, month, day, etc.from a datetime
delta = dt.timedelta(days = 100) # create a timedelta of 100 days
delta
dt.date.today()
today = dt.date.today()
today - delta # the date 100 days ago
today > today-delta # compare dates
class Person:
department = 'School of Information' #a class variable
def set_name(self, new_name): #a method
self.name = new_name
def set_location(self, new_location):
self.location = new_location
person = Person()
person.set_name('Christopher Brooks')
person.set_location('Ann Arbor, MI, USA')
print('{} live in {} and works in the department {}'.format(person.name, person.location, person.department))
store1 = [10.00, 11.00, 12.34, 2.34]
store2 = [9.00, 11.10, 12.34, 2.01]
cheapest = map(min, store1, store2)
cheapest
for item in cheapest:
print (item)
people = ['Dr. Christopher Brooks', 'Dr. Kevyn Collins-Thompson', 'Dr. VG Vinod Vydiswaran', 'Dr. Daniel Romero']
def split_title_and_name(person):
title = person.split(' ')[0]
lname = person.split(' ')[-1]
return title +" "+ lname
list(map(split_title_and_name, people))
# Single function only
my_function = lambda a, b, c : a + b + c
my_function(1, 2, 3)
people = ['Dr. Christopher Brooks', 'Dr. Kevyn Collins-Thompson', 'Dr. VG Vinod Vydiswaran', 'Dr. Daniel Romero']
def split_title_and_name(person):
return person.split()[0] + ' ' + person.split()[-1]
#option 1
for person in people:
print(split_title_and_name(person) == (lambda x: x.split()[0] + ' ' + x.split()[-1])(person))
#option 2
list(map(split_title_and_name, people)) == list(map(lambda person: person.split()[0] + ' ' + person.split()[-1], people))
my_list = []
for number in range(0, 1000):
if number % 2 == 0:
my_list.append(number)
my_list
my_list = [number for number in range(0,1000) if number % 2 == 0]
my_list
def times_tables():
lst = []
for i in range(10):
for j in range (10):
lst.append(i*j)
return lst
times_tables() == [j*i for i in range(10) for j in range(10)]
lowercase = 'abcdefghijklmnopqrstuvwxyz'
digits = '0123456789'
correct_answer = [a+b+c+d for a in lowercase for b in lowercase for c in digits for d in digits]
correct_answer[0:100]
import numpy as np
mylist = [1, 2, 3]
x = np.array(mylist)
x
y = np.array([4, 5, 6])
y
m = np.array([[7, 8, 9], [10, 11, 12]])
m
m.shape
n = np.arange(0, 30, 2) # start at 0 count up by 2, stop before 30
n
n = n.reshape(3, 5) # reshape array to be 3x5
n
o = np.linspace(0, 4, 9) # return 9 evenly spaced values from 0 to 4
o
o.resize(3, 3)
o
np.ones((3, 2))
np.zeros((2, 3))
np.eye(3)
np.diag(y)
np.array([1, 2, 3] * 3)
np.repeat([1, 2, 3], 3)
p = np.ones([2, 3], int)
p
np.vstack([p, 2*p])
np.hstack([p, 2*p])
print(x + y) # elementwise addition [1 2 3] + [4 5 6] = [5 7 9]
print(x - y) # elementwise subtraction [1 2 3] - [4 5 6] = [-3 -3 -3]
print(x * y) # elementwise multiplication [1 2 3] * [4 5 6] = [4 10 18]
print(x / y) # elementwise divison [1 2 3] / [4 5 6] = [0.25 0.4 0.5]
print(x**2) # elementwise power [1 2 3] ^2 = [1 4 9]
x.dot(y) # dot product 1*4 + 2*5 + 3*6
z = np.array([y, y**2])
print(len(z)) # number of rows of array
z = np.array([y, y**2])
z
z.shape
z.T
z.T.shape
z.dtype
z = z.astype('f')
z.dtype
a = np.array([-4, -2, 1, 3, 5])
a.sum()
a.max()
a.min()
a.mean()
a.std()
a.argmax()
a.argmin()
s = np.arange(13)**2
s
s[0], s[4], s[-1]
s[1:5]
s[-4:]
s[-5::-2]
r = np.arange(36)
r.resize((6, 6))
r
r[2, 2]
r[3, 3:6]
r[:2, :-1]
r[-1, ::2]
r[r > 30]
r[r > 30] = 30
r
r2 = r[:3,:3]
r2
r2[:] = 0
r2
r
r_copy = r.copy()
r_copy
r_copy[:] = 10
print(r_copy, '\n')
print(r)
test = np.random.randint(0, 10, (4,3))
test
for row in test:
print(row)
for i in range(len(test)):
print(test[i])
for i, row in enumerate(test):
print('row', i, 'is', row)
test2 = test**2
test2
for i, j in zip(test, test2):
print(i,'+',j,'=',i+j)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <br>
Step2: <br>
Step3: <br>
Step4: <br>
Step5: <br>
Step6: <br>
Step7: <br>
Step8: <br>
Step9: <br>
Step10: <br>
Step11: <br>
Step12: <br>
Step13: <br>
Step14: <br>
Step15: <br>
Step16: <br>
Step17: <br>
Step18: <br>
Step19: <br>
Step20: <br>
Step21: <br>
Step22: <br>
Step23: <br>
Step24: <br>
Step25: <br>
Step26: <br>
Step27: <br>
Step28: <br>
Step29: <br>
Step30: <br>
Step31: <br>
Step32: <br>
Step33: <br>
Step34: <br>
Step35: <br>
Step36: <br>
Step37: <br>
Step38: <br>
Step39: <br>
Step40: <br>
Step41: <br>
Step42: <br>
Step43: <br>
Step44: <br>
Step45: <br>
Step46: <br>
Step47: <br>
Step48: <br>
Step49: <br>
Step50: <br>
Step51: <br>
Step52: <br>
Step53: <br>
Step54: <br>
Step55: <br>
Step56: <br>
Step57: <br>
Step58: <br>
Step59: <br>
Step60: <br>
Step61: <br>
Step62: <br>
Step63: <br>
Step64: <br>
Step65: <br>
Step66: <br>
Step67: <br>
Step68: <br>
Step69: <br>
Step70: <br>
Step71: <br>
Step72: <br>
Step73: <br>
Step74: <br>
Step75: <br>
Step76: <br>
Step77: <br>
Step78: <br>
Step79: <br>
Step80: <br>
Step81: <br>
Step82: <br>
Step83: <br>
Step84: <br>
Step85: <br>
Step86: <br>
Step87: <br>
Step88: <br>
Step89: <br>
Step90: <br>
Step91: <br>
Step92: <br>
Step93: <br>
Step94: <br>
Step95: <br>
Step96: <br>
Step97: <br>
Step98: <br>
Step99: <br>
|
6,990
|
<ASSISTANT_TASK:>
Python Code:
# Set up the exercise
import math
from learntools.core import binder
binder.bind(globals())
from learntools.intro_to_programming.ex2 import *
print('Setup complete.')
# TODO: Complete the function
def get_expected_cost(beds, baths):
value = ____
return value
# Check your answer
q1.check()
#%%RM_IF(PROD)%%
# Complete the function
def get_expected_cost(beds, baths):
value = 80000 + 30000 * beds + 10000 * baths
return value
q1.assert_check_passed()
# Uncomment to see a hint
#_COMMENT_IF(PROD)_
q1.hint()
# Uncomment to view the solution
#_COMMENT_IF(PROD)_
q1.solution()
# TODO: Use the get_expected_cost function to fill in each value
option_one = ____
option_two = ____
option_three = ____
option_four = ____
print(option_one)
print(option_two)
print(option_three)
print(option_four)
# Check your answer
q2.check()
#%%RM_IF(PROD)%%
# Use the get_expected_cost function to fill in each value
option_one = get_expected_cost(2, 3)
option_two = get_expected_cost(3, 2)
option_three = get_expected_cost(3, 3)
option_four = get_expected_cost(3, 4)
print(option_one)
print(option_two)
print(option_three)
print(option_four)
q2.assert_check_passed()
# Uncomment to see a hint
#_COMMENT_IF(PROD)_
q2.hint()
# Uncomment to view the solution
#_COMMENT_IF(PROD)_
q2.solution()
# TODO: Finish defining the function
def get_cost(sqft_walls, sqft_ceiling, sqft_per_gallon, cost_per_gallon):
cost = ____
return cost
# Check your answer
q3.check()
#%%RM_IF(PROD)%%
def get_cost(sqft_walls, sqft_ceiling, sqft_per_gallon, cost_per_gallon):
total_sqft = sqft_walls + sqft_ceiling
gallons_needed = total_sqft / sqft_per_gallon
cost = cost_per_gallon * gallons_needed
return cost
q3.assert_check_passed()
# Uncomment to see a hint
#_COMMENT_IF(PROD)_
q3.hint()
# Uncomment to view the solution
#_COMMENT_IF(PROD)_
q3.solution()
# TODO: Set the project_cost variable to the cost of the project
project_cost = ____
# Check your answer
q4.check()
#%%RM_IF(PROD)%%
# Set the project_cost variable to the cost of the project
project_cost = get_cost(432, 144, 400, 15)
q4.assert_check_passed()
# Uncomment to see a hint
#_COMMENT_IF(PROD)_
q4.hint()
# Uncomment to view the solution
#_COMMENT_IF(PROD)_
q4.solution()
test_value = 2.17
rounded_value = math.ceil(test_value)
print(rounded_value)
def get_actual_cost(sqft_walls, sqft_ceiling, sqft_per_gallon, cost_per_gallon):
cost = ____
return cost
# Check your answer
q5.check()
#%%RM_IF(PROD)%%
def get_actual_cost(sqft_walls, sqft_ceiling, sqft_per_gallon, cost_per_gallon):
total_sqft = sqft_walls + sqft_ceiling
gallons_needed = total_sqft / sqft_per_gallon
gallons_to_buy = math.ceil(gallons_needed)
cost = cost_per_gallon * gallons_to_buy
return cost
q5.assert_check_passed()
# Uncomment to see a hint
#_COMMENT_IF(PROD)_
q5.hint()
# Uncomment to view the solution
#_COMMENT_IF(PROD)_
q5.solution()
get_actual_cost(432, 144, 400, 15)
get_actual_cost(594, 288, 400, 15)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Question 1
Step2: Question 2
Step3: Question 3
Step4: Question 4
Step5: 🌶️ Question 5
Step6: Use the next code cell to define the function get_actual_cost(). You'll need to use the math.ceil() function to do this.
Step7: Once your function is verified as correct, run the next code cell to calculate the updated cost of your project.
Step8: Say you're working with a slightly larger room. Run the next code cell to calculate the cost of the project.
|
6,991
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("white")
#Note the new use of the dtype option here. We can directly tell pandas to use the Speed column as a category in one step.
speeddf = pd.read_csv("../Class04/Class04_speed_data.csv",dtype={'Speed':'category'})
#We'll use a different tool to plot the data now that we know how to group the data by a category. This will help us make better combined plots later on.
groups = speeddf.groupby('Speed')
# Plot
trainfig, ax = plt.subplots()
ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
# The next step is to cycle through the groups (based on our categories) and plot each one on the same axis.
for name, group in groups:
ax.plot(group['Grade'], group['Bumpiness'], marker='o', linestyle='', ms=8, label=name)
ax.set_aspect(1)
ax.legend(bbox_to_anchor=(1.2,0.5))
ax.set_xlabel('Grade')
ax.set_ylabel('Bumpiness')
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeClassifier
# Create our decision boundary mesh
# point in the mesh
x_min = 0.0; x_max = 1.0 # Mesh x size
y_min = 0.0; y_max = 1.0 # Mesh y size
h = .01 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max+h, h), np.arange(y_min, y_max+h, h))
# Split the data into training and testing sets and prepare the features and labels
train, test = train_test_split(speeddf, test_size=0.2, random_state=23)
features_train = train[['Grade','Bumpiness']].values
labels_train = train['Speed'].values
features_test = test[['Grade','Bumpiness']].values
labels_test = test['Speed'].values
class_labels = ["slow", "fast"]
# Load the model and fit the data
dtmodel = DecisionTreeClassifier(random_state=32)
dtmodel.fit(features_train,labels_train)
y_pred = dtmodel.predict(features_test)
# Predict the boundary
Z = pd.Series(dtmodel.predict(np.c_[xx.ravel(), yy.ravel()]), dtype='category').cat.codes.values.reshape(xx.shape)
# First plot our points
testfig1, ax = plt.subplots()
plt.pcolormesh(xx, yy, Z, cmap= plt.cm.cool, alpha=0.1,axes=ax)
ax.set_aspect(1)
# Plot test points
groups = test.groupby('Speed')
# The next step is to cycle through the groups (based on our categories) and plot each one on the same axis.
for name, group in groups:
ax.plot(group['Grade'], group['Bumpiness'], marker='o', linestyle='', ms=8, label=name)
ax.legend(bbox_to_anchor=(1.2,0.5))
ax.set_xlabel('Grade')
ax.set_ylabel('Bumpiness')
import sklearn.metrics as metrics
recall_score = metrics.recall_score(labels_test, y_pred,labels=class_labels,average=None)
prec_score = metrics.precision_score(labels_test, y_pred,labels=class_labels,average=None)
f1_score = metrics.f1_score(labels_test, y_pred,labels=class_labels,average=None)
acc_score = metrics.accuracy_score(labels_test, y_pred)
matt_score = metrics.matthews_corrcoef(labels_test, y_pred)
print("Class-dependent Metrics")
print("Sensitivity/Recall Score: {}".format(recall_score))
print("Precision Score: {}".format(prec_score))
print("F1 Score: {}".format(f1_score))
print("\nClass-independent Metrics")
print("Accuracy Score: {}".format(acc_score))
print("Matthews Correlation Coefficient (MCC): {}".format(matt_score))
# Load the model and fit the data
dtmodel = DecisionTreeClassifier(min_samples_leaf=10,random_state=32)
dtmodel.fit(features_train,labels_train)
y_pred = dtmodel.predict(features_test)
# Predict the boundary
Z = pd.Series(dtmodel.predict(np.c_[xx.ravel(), yy.ravel()]), dtype='category').cat.codes.values.reshape(xx.shape)
# First plot our points
testfig1, ax = plt.subplots()
plt.pcolormesh(xx, yy, Z, cmap= plt.cm.cool, alpha=0.1,axes=ax)
ax.set_aspect(1)
# Plot test points
groups = test.groupby('Speed')
# The next step is to cycle through the groups (based on our categories) and plot each one on the same axis.
for name, group in groups:
ax.plot(group['Grade'], group['Bumpiness'], marker='o', linestyle='', ms=8, label=name)
ax.legend(bbox_to_anchor=(1.2,0.5))
ax.set_xlabel('Grade')
ax.set_ylabel('Bumpiness')
matt_score = metrics.matthews_corrcoef(labels_test, y_pred)
print("Matthews Correlation Coefficient (MCC): {}".format(matt_score))
# Load the model and fit the data
dtmodel = DecisionTreeClassifier(min_samples_leaf=5,random_state=32)
dtmodel.fit(features_train,labels_train)
y_pred = dtmodel.predict(features_test)
# Predict the boundary
Z = pd.Series(dtmodel.predict(np.c_[xx.ravel(), yy.ravel()]), dtype='category').cat.codes.values.reshape(xx.shape)
# First plot our points
testfig1, ax = plt.subplots()
plt.pcolormesh(xx, yy, Z, cmap= plt.cm.cool, alpha=0.1,axes=ax)
ax.set_aspect(1)
# Plot test points
groups = test.groupby('Speed')
# The next step is to cycle through the groups (based on our categories) and plot each one on the same axis.
for name, group in groups:
ax.plot(group['Grade'], group['Bumpiness'], marker='o', linestyle='', ms=8, label=name)
ax.legend(bbox_to_anchor=(1.2,0.5))
ax.set_xlabel('Grade')
ax.set_ylabel('Bumpiness')
matt_score = metrics.matthews_corrcoef(labels_test, y_pred)
print("Matthews Correlation Coefficient (MCC): {}".format(matt_score))
# Load the model and fit the data
from sklearn.ensemble import RandomForestClassifier
rfmodel = RandomForestClassifier(n_estimators=100,random_state=32)
rfmodel.fit(features_train,labels_train)
y_pred = rfmodel.predict(features_test)
# Predict the boundary
Z = pd.Series(rfmodel.predict(np.c_[xx.ravel(), yy.ravel()]), dtype='category').cat.codes.values.reshape(xx.shape)
# First plot our points
testfig1, ax = plt.subplots()
plt.pcolormesh(xx, yy, Z, cmap= plt.cm.cool, alpha=0.1,axes=ax)
ax.set_aspect(1)
# Plot test points
groups = test.groupby('Speed')
# The next step is to cycle through the groups (based on our categories) and plot each one on the same axis.
for name, group in groups:
ax.plot(group['Grade'], group['Bumpiness'], marker='o', linestyle='', ms=8, label=name)
ax.legend(bbox_to_anchor=(1.2,0.5))
ax.set_xlabel('Grade')
ax.set_ylabel('Bumpiness')
matt_score = metrics.matthews_corrcoef(labels_test, y_pred)
print("Matthews Correlation Coefficient (MCC): {}".format(matt_score))
rfmodel.feature_importances_
# Load the model and fit the data
from sklearn.ensemble import AdaBoostClassifier
abcmodel = AdaBoostClassifier(n_estimators=100,random_state=32)
abcmodel.fit(features_train,labels_train)
y_pred = abcmodel.predict(features_test)
# Predict the boundary
Z = pd.Series(abcmodel.predict(np.c_[xx.ravel(), yy.ravel()]), dtype='category').cat.codes.values.reshape(xx.shape)
# First plot our points
testfig1, ax = plt.subplots()
plt.pcolormesh(xx, yy, Z, cmap= plt.cm.cool, alpha=0.1,axes=ax)
ax.set_aspect(1)
# Plot test points
groups = test.groupby('Speed')
# The next step is to cycle through the groups (based on our categories) and plot each one on the same axis.
for name, group in groups:
ax.plot(group['Grade'], group['Bumpiness'], marker='o', linestyle='', ms=8, label=name)
ax.legend(bbox_to_anchor=(1.2,0.5))
ax.set_xlabel('Grade')
ax.set_ylabel('Bumpiness')
matt_score = metrics.matthews_corrcoef(labels_test, y_pred)
print("Matthews Correlation Coefficient (MCC): {}".format(matt_score))
import xgboost
xgbmodel = xgboost.XGBClassifier(n_estimators=100, seed=32)
xgbmodel.fit(features_train,labels_train)
y_pred = xgbmodel.predict(features_test)
# Predict the boundary
Z = pd.Series(xgbmodel.predict(np.c_[xx.ravel(), yy.ravel()]), dtype='category').cat.codes.values.reshape(xx.shape)
# First plot our points
testfig1, ax = plt.subplots()
plt.pcolormesh(xx, yy, Z, cmap= plt.cm.cool, alpha=0.1,axes=ax)
ax.set_aspect(1)
# Plot test points
groups = test.groupby('Speed')
# The next step is to cycle through the groups (based on our categories) and plot each one on the same axis.
for name, group in groups:
ax.plot(group['Grade'], group['Bumpiness'], marker='o', linestyle='', ms=8, label=name)
ax.legend(bbox_to_anchor=(1.2,0.5))
ax.set_xlabel('Grade')
ax.set_ylabel('Bumpiness')
matt_score = metrics.matthews_corrcoef(labels_test, y_pred)
print("Matthews Correlation Coefficient (MCC): {}".format(matt_score))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We'll import the DecisionTreeClassifier and use all of the default values except for the random_state. We'll provide that so that the output is consistent run-to-run. The decision tree classifier uses the random number generator to make decisions about branching, so if we don't set this, we'll get different results every time we run the algorithm.
Step2: Take a look at the decision boundary for this classifier
Step3: So our decision boundary is cleaned up significantly and we got a bump in the test performance of the model. Let's check one more value to see if we can do any better.
Step4: We got an MCC of 0.894 with a fairly simple decision boundary. That's good! There are, perhaps, a few too many wiggles in the boundary, but overall it is looking pretty good. Note that all of the boundaries are straight lines- that is because the decision tree is choosing cutoff values of "Grade" and "Bumpiness" to split the dataset along those lines. Overall this isn't too bad.
Step5: We see that the ensemble does a reasonable job- perhaps not better, in this case, than the decision tree by itself. However, there is something else that we get out of using the ensemble
Step6: Both features (Grade and Bumpiness) have just about the same importance in our model (about 50% each). That isn't too surprising since we faked the data to begin with...
Step7: XGBoost
|
6,992
|
<ASSISTANT_TASK:>
Python Code:
from keras.applications import inception_v3
from keras import backend as K
# We will not be training our model,
# so we use this command to disable all training-specific operations
K.set_learning_phase(0)
# Build the InceptionV3 network.
# The model will be loaded with pre-trained ImageNet weights.
model = inception_v3.InceptionV3(weights='imagenet',
include_top=False)
# Dict mapping layer names to a coefficient
# quantifying how much the layer's activation
# will contribute to the loss we will seek to maximize.
# Note that these are layer names as they appear
# in the built-in InceptionV3 application.
# You can list all layer names using `model.summary()`.
layer_contributions = {
'mixed2': 0.2,
'mixed3': 3.,
'mixed4': 2.,
'mixed5': 1.5,
}
# Get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])
# Define the loss.
loss = K.variable(0.)
for layer_name in layer_contributions:
# Add the L2 norm of the features of a layer to the loss.
coeff = layer_contributions[layer_name]
activation = layer_dict[layer_name].output
# We avoid border artifacts by only involving non-border pixels in the loss.
scaling = K.prod(K.cast(K.shape(activation), 'float32'))
loss += coeff * K.sum(K.square(activation[:, 2: -2, 2: -2, :])) / scaling
# This holds our generated image
dream = model.input
# Compute the gradients of the dream with regard to the loss.
grads = K.gradients(loss, dream)[0]
# Normalize gradients.
grads /= K.maximum(K.mean(K.abs(grads)), 1e-7)
# Set up function to retrieve the value
# of the loss and gradients given an input image.
outputs = [loss, grads]
fetch_loss_and_grads = K.function([dream], outputs)
def eval_loss_and_grads(x):
outs = fetch_loss_and_grads([x])
loss_value = outs[0]
grad_values = outs[1]
return loss_value, grad_values
def gradient_ascent(x, iterations, step, max_loss=None):
for i in range(iterations):
loss_value, grad_values = eval_loss_and_grads(x)
if max_loss is not None and loss_value > max_loss:
break
print('...Loss value at', i, ':', loss_value)
x += step * grad_values
return x
import scipy
from keras.preprocessing import image
def resize_img(img, size):
img = np.copy(img)
factors = (1,
float(size[0]) / img.shape[1],
float(size[1]) / img.shape[2],
1)
return scipy.ndimage.zoom(img, factors, order=1)
def save_img(img, fname):
pil_img = deprocess_image(np.copy(img))
scipy.misc.imsave(fname, pil_img)
def preprocess_image(image_path):
# Util function to open, resize and format pictures
# into appropriate tensors.
img = image.load_img(image_path)
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = inception_v3.preprocess_input(img)
return img
def deprocess_image(x):
# Util function to convert a tensor into a valid image.
if K.image_data_format() == 'channels_first':
x = x.reshape((3, x.shape[2], x.shape[3]))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((x.shape[1], x.shape[2], 3))
x /= 2.
x += 0.5
x *= 255.
x = np.clip(x, 0, 255).astype('uint8')
return x
import numpy as np
# Playing with these hyperparameters will also allow you to achieve new effects
step = 0.01 # Gradient ascent step size
num_octave = 3 # Number of scales at which to run gradient ascent
octave_scale = 1.4 # Size ratio between scales
iterations = 20 # Number of ascent steps per scale
# If our loss gets larger than 10,
# we will interrupt the gradient ascent process, to avoid ugly artifacts
max_loss = 10.
# Fill this to the path to the image you want to use
base_image_path = '/home/ubuntu/data/original_photo_deep_dream.jpg'
# Load the image into a Numpy array
img = preprocess_image(base_image_path)
# We prepare a list of shape tuples
# defining the different scales at which we will run gradient ascent
original_shape = img.shape[1:3]
successive_shapes = [original_shape]
for i in range(1, num_octave):
shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape])
successive_shapes.append(shape)
# Reverse list of shapes, so that they are in increasing order
successive_shapes = successive_shapes[::-1]
# Resize the Numpy array of the image to our smallest scale
original_img = np.copy(img)
shrunk_original_img = resize_img(img, successive_shapes[0])
for shape in successive_shapes:
print('Processing image shape', shape)
img = resize_img(img, shape)
img = gradient_ascent(img,
iterations=iterations,
step=step,
max_loss=max_loss)
upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape)
same_size_original = resize_img(original_img, shape)
lost_detail = same_size_original - upscaled_shrunk_original_img
img += lost_detail
shrunk_original_img = resize_img(original_img, shape)
save_img(img, fname='dream_at_scale_' + str(shape) + '.png')
save_img(img, fname='final_dream.png')
from matplotlib import pyplot as plt
plt.imshow(deprocess_image(np.copy(img)))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we compute the "loss", the quantity that we will seek to maximize during the gradient ascent process. In Chapter 5, for filter
Step2: Now let's define a tensor that contains our loss, i.e. the weighted sum of the L2 norm of the activations of the layers listed above.
Step3: Now we can set up the gradient ascent process
Step4: Finally, here is the actual Deep Dream algorithm.
|
6,993
|
<ASSISTANT_TASK:>
Python Code:
# access yelp.csv using a relative path
import pandas as pd
import seaborn as sns
yelp = pd.read_csv('C:/Users/Joshuaw/Documents/GA_Data_Science/data/yelp.csv')
yelp.head()
# read the data from yelp.json into a list of rows
# each row is decoded into a dictionary using using json.loads()
import json
import pandas as pd
import seaborn as sns
with open('C:/Users/Joshuaw/Documents/GA_Data_Science/data/yelp.json', 'rU') as f:
data = [json.loads(row) for row in f]
# show the first review
data[0]
# convert the list of dictionaries to a DataFrame
#note that the cool, funny, and useful vote types were in a nested dictionary: u'votes': {u'cool': 2, u'funny': 0, u'useful': 5}
yelp = pd.DataFrame(data)
yelp.head(2)
# add DataFrame columns for cool, useful, and funny, do this by creating a new column for each column
#use list comprehension to pass the nested structure, remember it's as rows, e.g. row['votes]['cool]
yelp['cool'] = [row['votes']['cool'] for row in data]
yelp['useful'] = [row['votes']['useful'] for row in data]
yelp['funny'] = [row['votes']['funny'] for row in data]
# drop the votes column
#since votes was a nested data type votes:cool, votes:funny, votes:useful, the votes column has all types of vote data
#making the data not so useful so drop it, remember axis=1 indicates that we want to drop a particular column, we would use
#axis=0 to drop a specific row
yelp.drop('votes', axis=1, inplace=True)
yelp.head(1)
# treat stars as a categorical variable and look for differences between groups
yelp.groupby('stars').mean()
# correlation matrix
%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
#create correlation matrix
corr = yelp.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 8))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, cmap=cmap, vmax=.3,
square=True, linewidths=.5,
cbar_kws={"shrink": .5})
#sns.heatmap(yelp.corr())
# multiple scatter plots
sns.set(style="white")
sns.pairplot(yelp, x_vars=['cool', 'useful', 'funny'], y_vars='stars', size=6, aspect=0.7, kind='reg', markers = '+')
feature_cols = ['cool', 'useful', 'funny']
x = yelp[feature_cols]
y = yelp.stars
#feature_cols = ['cool', 'useful', 'funny']
#X = yelp[feature_cols]
#y = yelp.stars
#instantiate the linear regression model
from sklearn.linear_model import LinearRegression
#store the regression model function as a variable called 'linreg'
#we specify that we want to fit a model that includes an intercept, because we don't assume the data has been centered
linreg = LinearRegression(fit_intercept=True)
#fit a linear regression model
linreg.fit(x, y)
#find coeffecient of determination aka R2 and store it as a variable
#result = 0.044
r2 = linreg.score(x, y, sample_weight=None)
#create a list of lists that shows the independent variables and their coeffecients
#note: need to see whether this is a b weight or a beta weight and how to look at significance of the coeffecients
coef = zip(feature_cols, linreg.coef_)
print(r2)
print(coef)
print(linreg.intercept_)
#overall votes is a poor predictor of stars, sentiment is likely to be a better predictor
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import numpy as np
# define a function that accepts a list of features and returns testing RMSE
def train_test_rmse(feature_cols):
X = yelp[feature_cols]
y = yelp.stars
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
y_pred = linreg.predict(X_test)
return np.sqrt(metrics.mean_squared_error(y_test, y_pred))
# calculate RMSE with all three features
train_test_rmse(['cool', 'useful', 'funny'])
print train_test_rmse(['cool', 'useful'])
print train_test_rmse(['cool', 'funny'])
print train_test_rmse(['useful', 'funny'])
# new feature: review length (number of characters)
yelp['length'] = yelp.text.apply(len)
# new features: whether or not the review contains 'love' or 'hate'
yelp['love'] = yelp.text.str.contains('love', case=False).astype(int)
yelp['hate'] = yelp.text.str.contains('hate', case=False).astype(int)
# add new features to the model and calculate RMSE
train_test_rmse(['cool', 'useful', 'funny', 'length', 'love', 'hate'])
# split the data (outside of the function)
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1)
# create a NumPy array with the same shape as y_test
y_null = np.zeros_like(y_test, dtype=float)
# fill the array with the mean of y_test
y_null.fill(y_test.mean())
# calculate null RMSE
import numpy as np
print np.sqrt(metrics.mean_squared_error(y_test, y_null))
# import and instantiate KNN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=50)
# classification models will automatically treat the response value (1/2/3/4/5) as unordered categories
knn.fit(X_train, y_train)
y_pred_class = knn.predict(X_test)
print metrics.accuracy_score(y_test, y_pred_class)
# use linear regression to make continuous predictions
linreg = LinearRegression()
linreg.fit(x_train, y_train)
y_pred = linreg.predict(x_test)
# round its predictions to the nearest integer
y_pred_class = y_pred.round()
# calculate classification accuracy of the rounded predictions
print metrics.accuracy_score(y_test, y_pred_class)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Task 1 (Bonus)
Step2: Task 2
Step3: Task 3
Step4: Task 4
Step5: Task 5
Step6: Task 6
Step7: Task 7 (Bonus)
Step8: Task 8 (Bonus)
Step9: Task 9 (Bonus)
Step10: Task 10 (Bonus)
|
6,994
|
<ASSISTANT_TASK:>
Python Code:
full_survey = ds.cadence_plot(fieldID=1427, mjd_center=61404, mjd_range=[-1825, 1825],
observedOnly=False, colorbar=True);
plt.close()
full_survey[0]
half_survey = ds.cadence_plot(fieldID=1427, mjd_center=61404, mjd_range=[-1825, 1],
observedOnly=False, colorbar=True);
second_year = ds.cadence_plot(fieldID=1427, mjd_center=60200, mjd_range=[-150, 150],
observedOnly=False, colorbar=True);
secondYearObs = ds.cadence_plot(fieldID=1427, mjd_center=60300, mjd_range=[-0, 30], observedOnly=False)
plt.close()
secondYearObs[0]
df['obsID'] = df.index.values
uniqueObs = df.groupby(['night', 'filter'])
aa = uniqueObs['airmass'].agg({'myInds': lambda x: x.idxmin()}).myInds.astype(int).values
ourOpSim = df.ix[aa]
axs = df.hist(by='filter', column='airmass', histtype='step', lw=2, alpha=1, color='k', normed=True);
axs = df.ix[aa].hist(by='filter', column='airmass', histtype='step', lw=2, alpha=1, color='r', ax=axs, normed=True)
df.obsID.unique().size, df.obsID.size
ourOpSim.head()
ourOpSim.obsID.values
ourOpSim.obsID.to_csv('FirstSet_obsHistIDs.csv')
ourOpSim.to_csv('SelectedKrakenVisits.csv')
xx = ourOpSim.groupby(['night', 'filter']).aggregate('count')
assert(all(xx.max() == 1))
dff = uniqueObs['airmass'].agg({'myInds': lambda x: x.idxmin()})
aa = dff.myInds.astype(int).values
aa.sort()
l = []
for key in keys:
l.append(uniqueObs.get_group(key).airmass.idxmin())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: List of obsHistIDs with unique nights
Step2: How much does it help our airmass distribution by choosing the lowest airmass of the available ones
Step3: Our culled opsim that we shall try out first is now 'ourOpSim' . We can write this our to a csv file, or a database. We can also view the list of obsHistIDs
Step4: Scratch
|
6,995
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('generator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Build the model
g_model = generator(input_z, input_size)
# g_model is the generator output
d_model_real, d_logits_real = discriminator(input_real)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True)
# Calculate losses
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
_ = view_samples(-1, samples)
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model Inputs
Step2: Generator network
Step3: Discriminator
Step4: Hyperparameters
Step5: Build network
Step6: Discriminator and Generator Losses
Step7: Optimizers
Step8: Training
Step9: Training loss
Step10: Generator samples from training
Step11: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
Step12: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
Step13: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
|
6,996
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as spo
def parab(X):
## X = 2 is the min
Y = (X - 2)**2 + 1.5
return Y
initial_guess = 3
opt_methods_no_Jacobian = ['Nelder-Mead','Powell','CG','BFGS','L-BFGS-B','TNC','COBYLA','SLSQP']
mins = [spo.minimize(parab,initial_guess,method=om,) for om in opt_methods_no_Jacobian]
min_df = pd.DataFrame({'Method' : opt_methods_no_Jacobian,
'min' : [m.x for m in mins]
}).set_index('Method')
min_df
def plot_mins(f,min_df,xmin=-5,xmax=5,ticks=1000,title='',xguess=3,legend_pos='upper left'):
x = np.linspace(xmin, xmax, ticks)
y = f(x)
plt.plot(x, y ,color='b')
plt.plot(min_df, f(min_df),'g',linewidth=13,marker="+",label='solutions')
plt.plot(xguess, f(xguess),'black',linewidth=13,marker="*",label='initial guess')
plt.axis([xmin-1, xmax+1, y.min()-1, y.max()+1])
plt.title(title)
plt.grid(True)
plt.legend(loc=legend_pos)
plt.show()
plot_mins(f=parab,min_df=min_df,xmin=-3,xmax=8,title='Parabola')
def non_convex_1(X):
if type(X) is float:
if X <=0 or X >=4:
return 5.5
else:
return (X - 2)**2 + 1.5
else:
Y = (X - 2)**2 + 1.5
Y[np.logical_or(X <= 0 , X >=4)] = 5.5
return Y
initial_guess = 4.5
mins = [spo.minimize(non_convex_1,initial_guess,method=om,) for om in opt_methods_no_Jacobian]
min_df = pd.DataFrame({'Method' : opt_methods_no_Jacobian,
'min' : [m.x for m in mins]
}).set_index('Method')
min_df
plot_mins(f=non_convex_1,min_df=min_df,xmin=-2,xmax=9,title='Non-Convex example N.1 - initial guess = 4.5',
xguess=4.5,legend_pos='lower right')
initial_guess = 3.9
mins = [spo.minimize(non_convex_1,initial_guess,method=om,) for om in opt_methods_no_Jacobian]
min_df = pd.DataFrame({'Method' : opt_methods_no_Jacobian,
'min' : [m.x for m in mins]
}).set_index('Method')
min_df
plot_mins(f=non_convex_1,min_df=min_df,xmin=-2,xmax=9,title='Non-Convex example N.1 - initial guess = 3.9',
xguess=3.9,legend_pos='lower right')
from scipy.optimize import basinhopping
res = basinhopping(non_convex_1, 4.5, minimizer_kwargs={"method": "BFGS"},niter=200)
res.x
from scipy.optimize import minimize, rosen, rosen_der
mins = [spo.minimize(rosen,[1.3, 0.7, 0.8, 1.9, 1.2],method=om) for om in opt_methods_no_Jacobian]
min_df = pd.DataFrame({'Method' : opt_methods_no_Jacobian,
'min' : [m.x for m in mins],
'min_value': [rosen(m.x) for m in mins]
}).set_index('Method')
min_df.sort_values(by = 'min_value' , inplace=True)
min_df
res = basinhopping(rosen, [1.3, 0.7, 0.8, 1.9, 1.2], minimizer_kwargs={"method": "BFGS"},niter=200)
print(">>> basinhopping min: "+str(res.x)+" - min_value:"+str(rosen(res.x)))
min_df = pd.DataFrame({'Method' : opt_methods_no_Jacobian+['Basin Hopping'],
'min' : [m.x for m in mins]+[res.x],
'min_value': [rosen(m.x) for m in mins]+[rosen(res.x)]
}).set_index('Method')
min_df.sort_values(by = 'min_value' , inplace=True)
min_df
def my_rosen(x,out_value=10e10):
## no negative values
if np.any(x<0.0):
return out_value
return rosen(x)
mins = [spo.minimize(my_rosen,[1.3, 0.7, 0.8, 1.9, 1.2],method=om) for om in opt_methods_no_Jacobian]
res = basinhopping(my_rosen, [1.3, 0.7, 0.8, 1.9, 1.2], minimizer_kwargs={"method": "BFGS"},niter=200)
min_df = pd.DataFrame({'Method' : opt_methods_no_Jacobian+['Basin Hopping'],
'min' : [m.x for m in mins]+[res.x],
'min_value': [rosen(m.x) for m in mins]+[rosen(res.x)]
}).set_index('Method')
min_df.sort_values(by = 'min_value' , inplace=True)
min_df
def my_rosen(x,out_value=10e10,approx = 0.1):
## no negative values
if np.any(x<0.0):
return out_value
## x1 = x2 = x3 = x4
if type(x) is list:
x = np.array(x)
if np.absolute(x[0] - x[1])<approx and np.absolute(x[0] - x[2])<approx and np.absolute(x[0] - x[3])<approx and np.absolute(x[1] - x[2])<approx and np.absolute(x[1] - x[3])<approx and np.absolute(x[2] - x[3])<approx:
rosen(x)
else:
return out_value
mins = [spo.minimize(my_rosen,[1.3, 0.7, 0.8, 1.9, 1.2],method=om) for om in opt_methods_no_Jacobian]
res = basinhopping(my_rosen, [1.3, 0.7, 0.8, 1.9, 1.2], minimizer_kwargs={"method": "BFGS"},niter=200)
min_df = pd.DataFrame({'Method' : opt_methods_no_Jacobian+['Basin Hopping'],
'min' : [m.x for m in mins]+[res.x],
'min_value': [rosen(m.x) for m in mins]+[rosen(res.x)]
}).set_index('Method')
min_df.sort_values(by = 'min_value' , inplace=True)
min_df
opt_methods_no_Jacobian_bounds = ['SLSQP','L-BFGS-B','TNC']
## handling bounds for SLSQP, L-BFGS-B, TNC
bnds = ((0,1),(0,1),(0,1),(0,1),(0,1))
mins = [spo.minimize(rosen,[1.3, 0.7, 0.8, 1.9, 1.2],method=om,bounds=bnds) for om in opt_methods_no_Jacobian_bounds]
min_df = pd.DataFrame({'Method' : opt_methods_no_Jacobian_bounds,
'min' : [m.x for m in mins],
'min_value': [rosen(m.x) for m in mins]
}).set_index('Method')
min_df.sort_values(by = 'min_value' , inplace=True)
min_df
## handling constraints for SLSQP (the only that seems to support constraints)
cons = ({'type': 'eq','fun' : lambda x: np.array([x[0]- x[1]]),'jac' : lambda x: np.array([1,-1,0,0,0])},
{'type': 'eq','fun' : lambda x: np.array([x[0]- x[2]]),'jac' : lambda x: np.array([1,0,-1,0,0])},
{'type': 'eq','fun' : lambda x: np.array([x[0]- x[3]]),'jac' : lambda x: np.array([1,0,0,-1,0])},
{'type': 'eq','fun' : lambda x: np.array([x[0]- x[4]]),'jac' : lambda x: np.array([1,0,0,0,-1])},
{'type': 'eq','fun' : lambda x: np.array([x[1]- x[2]]),'jac' : lambda x: np.array([0,1,-1,0,0])},
{'type': 'eq','fun' : lambda x: np.array([x[1]- x[3]]),'jac' : lambda x: np.array([0,1,0,-1,0])},
{'type': 'eq','fun' : lambda x: np.array([x[1]- x[4]]),'jac' : lambda x: np.array([0,1,0,0,-1])},
{'type': 'eq','fun' : lambda x: np.array([x[2]- x[3]]),'jac' : lambda x: np.array([0,0,1,-1,0])},
{'type': 'eq','fun' : lambda x: np.array([x[2]- x[4]]),'jac' : lambda x: np.array([0,0,1,0,-1])},
{'type': 'eq','fun' : lambda x: np.array([x[3]- x[4]]),'jac' : lambda x: np.array([0,0,0,1,-1])})
bnds = ((0,1),(0,1),(0,1),(0,1),(0,1))
mins = spo.minimize(rosen,[0.5, 0.7, 0.8, 0.7, 1.2],method='SLSQP',bounds=bnds,constraints=cons)
min_df = pd.DataFrame({'Method' : 'SLSQP',
'min' : [mins.x] ,
'min_value': rosen(mins.x)
}).set_index('Method')
min_df
cons = ({'type': 'ineq','fun' : lambda x: np.array(4-np.sum(np.array(x))),'jac' : lambda x: np.array([-1,-1,-1,-1,-1])})
bnds = ((0,1),(0,1),(0,1),(0,1),(0,1))
mins = spo.minimize(rosen,[0.5, 0.7, 0.8, 0.7, 1.2],method='SLSQP',bounds=bnds,constraints=cons)
min_df = pd.DataFrame({'Method' : 'SLSQP',
'min' : [mins.x] ,
'min_value': rosen(mins.x)
}).set_index('Method')
min_df
def my_rosen(x,out_value=10e10,approx = 0.1):
## no negative values
if np.any(x<0.0):
return out_value
if type(x) is list:
x = np.array(x)
## x1 = x2 = x3 = x4
if np.absolute(np.sum(x)-4)<approx:
rosen(x)
else:
return out_value
mins = [spo.minimize(my_rosen,[1.3, 0.7, 0.8, 1.9, 1.2],method=om) for om in opt_methods_no_Jacobian]
res = basinhopping(my_rosen, [1.3, 0.7, 0.8, 1.9, 1.2], minimizer_kwargs={"method": "BFGS"},niter=200)
min_df = pd.DataFrame({'Method' : opt_methods_no_Jacobian+['Basin Hopping'],
'min' : [m.x for m in mins]+[res.x],
'min_value': [rosen(m.x) for m in mins]+[rosen(res.x)]
}).set_index('Method')
min_df.sort_values(by = 'min_value' , inplace=True)
min_df
class MyBounds(object):
def __init__(self, xmax=[1.0,1.0,1.0,1.0,1.0], xmin=[0,0,0,0,0] ):
self.xmax = np.array(xmax)
self.xmin = np.array(xmin)
def __call__(self, **kwargs):
x = kwargs["x_new"]
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
cons = bool(np.sum(x) <= 4)
return tmax and tmin and cons
mybounds = MyBounds()
res = basinhopping(rosen, [1.3, 0.7, 0.8, 1.9, 1.2], minimizer_kwargs={"method": "BFGS"},
niter=200,accept_test=mybounds)
print(">>> basinhopping min: "+str(res.x)+" - min_value:"+str(rosen(res.x)))
import matplotlib.pyplot as plt
import pandas.io.data as web
def get_data(symbols,
add_ref=True,
data_source='yahoo',
price='Adj Close',
start='1/21/2010',
end='4/15/2016'):
Read stock data (adjusted close) for given symbols from.
if add_ref and 'SPY' not in symbols: # add SPY for reference, if absent
symbols.insert(0, 'SPY')
df = web.DataReader(symbols,
data_source=data_source,
start=start,
end=end)
return df[price,:,:]
def compute_daily_returns(df):
Compute and return the daily return values.
# Note: Returned DataFrame must have the same number of rows
daily_returns = (df / df.shift(1)) - 1
daily_returns.ix[0,:] = 0
return daily_returns
def fill_missing_values(df_data):
Fill missing values in data frame, in place.
df_data.fillna(method='ffill',inplace=True)
df_data.fillna(method='backfill',inplace=True)
return df_data
def cumulative_returns(df):
return df/df.ix[0,:] - 1
df = fill_missing_values(get_data(symbols=['GOOG','SPY','IBM','GLD'],
start='4/21/2015',
end='7/15/2016'))
dr = compute_daily_returns(df)
def cumulative_returns_obj(alloc,df=df):
if type(alloc) is list:
alloc = np.array(alloc)
cr = cumulative_returns(df).ix[-1,:]
return -1 * np.dot(cr , alloc)
cons = ({'type': 'ineq','fun' : lambda x: np.array(1-np.sum(np.array(x))),'jac' : lambda x: np.array([-1,-1,-1,-1])})
bnds = ((0,1),(0,1),(0,1),(0,1))
mins = spo.minimize(cumulative_returns_obj,[0.25, 0.25, 0.25, 0.25],method='SLSQP',bounds=bnds,constraints=cons)
min_df = pd.DataFrame({'Asset' : df.columns,
'allocation' : mins.x ,
'portfolio cumul. ret': -1 * cumulative_returns_obj(mins.x)
}).set_index('Asset')
min_df
cumulative_returns(df).ix[-1,:]
class MyBoundsP(object):
def __init__(self, xmax=[1,1,1,1], xmin=[0,0,0,0] ):
self.xmax = np.array(xmax)
self.xmin = np.array(xmin)
def __call__(self, **kwargs):
x = kwargs["x_new"]
tmax = bool(np.all(np.array(x) <= self.xmax))
tmin = bool(np.all(np.array(x) >= self.xmin))
conss = bool( np.sum(np.array(x)) <= 1)
return tmax and tmin and conss
myboundsp = MyBoundsP()
def print_fun(x, f, accepted):
print("at minimum %.4f accepted %d" % (f, int(accepted)))
res = basinhopping(cumulative_returns_obj, [.25,0.25,0.25,0.25], minimizer_kwargs={"method": "BFGS"},
niter=10,accept_test=myboundsp,callback=print_fun , T=-1.0, stepsize=-500000)
print(">>> basinhopping max: "+str(res.x)+" - max_value:"+str(-1 * cumulative_returns_obj(res.x)))
res
def average_daily_return_obj(alloc,dr=dr):
if type(alloc) is list:
alloc = np.array(alloc)
return -100 * np.dot(dr.mean(),alloc)
cons = ({'type': 'ineq','fun' : lambda x: np.array(1-np.sum(np.array(x))),'jac' : lambda x: np.array([-1,-1,-1,-1])})
bnds = ((0,1),(0,1),(0,1),(0,1))
mins = spo.minimize(average_daily_return_obj,[0.25, 0.25, 0.25, 0.25],method='SLSQP',bounds=bnds,constraints=cons)
min_df = pd.DataFrame({'Asset' : df.columns,
'allocation' : mins.x ,
'portfolio avg daily ret': -1 * average_daily_return_obj(mins.x)/100
}).set_index('Asset')
min_df
dr.mean()
def std_daily_return_obj(alloc,dr=dr):
if type(alloc) is list:
alloc = np.array(alloc)
return 10000000 * np.dot(dr.std(),alloc)
cons = ({'type': 'eq','fun' : lambda x: np.array(1-np.sum(np.array(x))),'jac' : lambda x: np.array([-1,-1,-1,-1])})
bnds = ((0,1),(0,1),(0,1),(0,1))
mins = spo.minimize(std_daily_return_obj,[0.25, 0.25, 0.25, 0.25],method='SLSQP',bounds=bnds,constraints=cons)
min_df = pd.DataFrame({'Asset' : df.columns,
'allocation' : mins.x ,
'portfolio risk': std_daily_return_obj(mins.x) / 10000000
}).set_index('Asset')
min_df
dr.std()
def sharpe_ratio_obj(alloc,dr=dr,sample_freq='d',risk_free_rate=0.0):
if type(alloc) is list:
alloc = np.array(alloc)
sr = ( np.sum(alloc * dr,axis=1) - risk_free_rate).mean() / np.sum(alloc * dr,axis=1).std()
if sample_freq == 'd':
sr = sr * np.sqrt(252)
elif sample_freq == 'w':
sr = sr * np.sqrt(52)
elif sample_freq == 'm':
sr = sr * np.sqrt(12)
else:
raise Exception('unkown sample frequency :'+str(sample_freq))
return -1*sr
cons = ({'type': 'ineq','fun' : lambda x: np.array(1-np.sum(np.array(x))),'jac' : lambda x: np.array([-1,-1,-1,-1])})
bnds = ((0,1),(0,1),(0,1),(0,1))
mins = spo.minimize(sharpe_ratio_obj,[0.25, 0.25, 0.25, 0.25],method='SLSQP',bounds=bnds,constraints=cons)
min_df = pd.DataFrame({'Asset' : df.columns,
'max_point' : mins.x,
'max_value': -1 * average_daily_return_obj(mins.x)
}).set_index('Asset')
min_df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Non-Convex example
Step2: The problem with such function is the fact it is not convex. Hence, starting from initial guess
Step3: We can see that Basin Hopping is able to find the global minimum instead of previous methods.
Step4: We can see that all optimizers provide close minima although Powell makes a better job this time.
Step5: Pretty close the other ones.
Step6: Bounds and contraints
Step7: We know that the global minima is (1,1,1,1) so we should not have sub-optimal solution requiring the constraints that
Step8: We can see that no one handled this constraint properly.
Step9: This is a very interesting and disastrous results. The constraints leads us to a sub-optimal solution even
Step10: Better ... even if not the optimal solution (1,1,1,1,1). This is pretty important for the use case of
Step11: Finally, let's see how Basin Hopping behaves best working with bonds and constraints.
Step15: Optimizing a portfolio
Step16: 1. Cumulative return
Step17: According to this allocation all the money should be put on GOOG as it is expected as GOOG has the
Step18: As excercise, let's check if Basin Hopping gets the same result.
Step19: This behaviour is very disappointing. It turns out that each iteration of basinhopping rejected the
Step20: According to this allocation all the money should be put on GOOG as it is expected as
Step21: 3. Rsk (Standard deviation of daily return)
Step22: In order to make the optmizer get the rigth solution we had to multiply
Step23: 4. Sharpe ratio
|
6,997
|
<ASSISTANT_TASK:>
Python Code:
# Start by importing torch
import torch
# Construct a bunch of ones
some_ones = torch.ones(2, 2)
print(some_ones)
# Construct a bunch of zeros
some_zeros = torch.zeros(2, 2)
print(some_zeros)
# Construct some normally distributed values
some_normals = torch.randn(2, 2)
print(some_normals)
torch_tensor = torch.randn(5, 5)
numpy_ndarray = torch_tensor.numpy()
back_to_torch = torch.from_numpy(numpy_ndarray)
# Create two tensors
a = torch.randn(5, 5)
b = torch.randn(5, 5)
print(a)
print(b)
# Indexing by i,j
another_tensor = a[2, 2]
print(another_tensor)
# The above returns a tensor type! To get the python value:
python_value = a[2, 2].item()
print(python_value)
# Getting a whole row or column or range
first_row = a[0, :]
first_column = a[:, 0]
combo = a[2:4, 2:4]
print(combo)
# Addition
c = a + b
# Elementwise multiplication: c_ij = a_ij * b_ij
c = a * b
# Matrix multiplication: c_ik = a_ij * b_jk
c = a.mm(b)
# Matrix vector multiplication
c = a.matmul(b[:, 0])
a = torch.randn(5, 5)
print(a.size())
vec = a[:, 0]
print(vec.size())
# Matrix multiple 5x5 * 5x5 --> 5x5
aa = a.mm(a)
# matrix vector 5x5 * 5 --> 5
v1 = a.matmul(vec)
print(v1)
print(1, vec)
vec_as_matrix = vec.view(5, 1)
print(2, vec_as_matrix)
v2 = a.mm(vec_as_matrix)
print(v2)
# Add one to all elements
a.add_(1)
# Divide all elements by 2
a.div_(2)
# Set all elements to 0
a.zero_()
# Add a dummy dimension, e.g. (n, m) --> (n, m, 1)
a = torch.randn(10, 10)
# At the end
print(a.unsqueeze(-1).size())
# At the beginning
print(a.unsqueeze(0).size())
# In the middle
print(a.unsqueeze(1).size())
# What you give you can take away
print(a.unsqueeze(0).squeeze(0).size())
# View things differently, i.e. flat
print(a.view(100, 1).size())
# Or not flat
print(a.view(50, 2).size())
# Copy data across a new dummy dimension!
a = torch.randn(2)
a = a.unsqueeze(-1)
print(a)
print(a.expand(2, 3))
# Check if you have it
do_i_have_cuda = torch.cuda.is_available()
if do_i_have_cuda:
print('Using fancy GPUs')
# One way
a = a.cuda()
a = a.cpu()
# Another way
device = torch.device('cuda')
a = a.to(device)
device = torch.device('cpu')
a = a.to(device)
else:
print('CPU it is!')
# Batched matrix multiply
a = torch.randn(10, 5, 5)
b = torch.randn(10, 5, 5)
# The same as for i in 1 ... 10, c_i = a[i].mm(b[i])
c = a.bmm(b)
print(c.size())
# A tensor that will remember gradients
x = torch.randn(1, requires_grad=True)
print(x)
print(x.grad)
y = x.exp()
print(y.grad)
y.backward()
print(y.grad)
print(x.grad)
import math
print(x.grad, y, math.exp(x[0]))
# Compute another thingy with x.
z = x * 2
z.backward()
print(x.grad) # Should be 2! But it will be 2 + e^x.
x_a = torch.randn(1, requires_grad=True)
x_b = torch.randn(1, requires_grad=True)
x = x_a * x_b
x1 = x ** 2
x2 = 1 / x1
x3 = x2.exp()
x4 = 1 + x3
x5 = x4.log()
x6 = x5 ** (1/3)
x6.backward()
print(x_a.grad)
print(x_b.grad)
x = torch.randn(1, requires_grad=True)
y = torch.tanh(x)
y.backward()
print(x.grad)
# Set our random seeds
import random
import numpy as np
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
# Get ourselves a simple dataset
from sklearn.datasets import make_classification
set_seed(7)
X, Y = make_classification(n_features=2, n_redundant=0, n_informative=1, n_clusters_per_class=1)
print('Number of examples: %d' % X.shape[0])
print('Number of features: %d' % X.shape[1])
# Take a peak
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.scatter(X[:, 0], X[:, 1], marker='o', c=Y, s=25, edgecolor='k')
plt.show()
# Convert data to PyTorch
X, Y = torch.from_numpy(X), torch.from_numpy(Y)
# Gotcha: "Expected object of scalar type Float but got scalar type Double"
# If you see this it's because numpy defaults to Doubles whereas pytorch has floats.
X, Y = X.float(), Y.float()
# Define dimensions
num_feats = 2
hidden_size = 100
num_outputs = 1
# Learning rate
eta = 0.1
num_steps = 1000
# Input to hidden weights
W1 = torch.randn(hidden_size, num_feats, requires_grad=True)
b1 = torch.zeros(hidden_size, requires_grad=True)
# Hidden to output
W2 = torch.randn(num_outputs, hidden_size, requires_grad=True)
b2 = torch.zeros(num_outputs, requires_grad=True)
# Group parameters
parameters = [W1, b1, W2, b2]
# Get random order
indices = torch.randperm(X.size(0))
print(indices)
# Keep running average losses for a learning curve?
avg_loss = []
# Run!
for step in range(num_steps):
# Get example
i = indices[step % indices.size(0)]
x_i, y_i = X[i], Y[i]
# Run example
hidden = torch.relu(W1.matmul(x_i) + b1)
y_hat = torch.sigmoid(W2.matmul(hidden) + b2)
# Compute loss binary cross entropy: -(y_i * log(y_hat) + (1 - y_i) * log(1 - y_hat))
# Epsilon for numerical stability
eps = 1e-6
loss = -(y_i * (y_hat + eps).log() + (1 - y_i) * (1 - y_hat + eps).log())
# Add to our running average learning curve. Don't forget .item()!
if step == 0:
avg_loss.append(loss.item())
else:
old_avg = avg_loss[-1]
new_avg = (loss.item() + old_avg * len(avg_loss)) / (len(avg_loss) + 1)
avg_loss.append(new_avg)
# Zero out all previous gradients
for param in parameters:
# It might start out as None
if param.grad is not None:
# In place
param.grad.zero_()
# Backward pass
loss.backward()
# Update parameters
for param in parameters:
# In place!
param.data = param.data - eta * param.grad
plt.plot(range(num_steps), avg_loss)
plt.ylabel('Loss')
plt.xlabel('Step')
plt.show()
import torch.nn as nn
# Linear layer: in_features, out_features
linear = nn.Linear(10, 10)
print(linear)
# Convolution layer: in_channels, out_channels, kernel_size, stride
conv = nn.Conv2d(1, 20, 5, 1)
print(conv)
# RNN: num_inputs, num_hidden, num_layers
rnn = nn.RNN(10, 10, 1)
print(rnn)
print(linear.weight)
print([k for k,v in conv.named_parameters()])
# Make our own model!
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input channel to 20 feature maps of 5x5 kernel. Stride 1.
self.conv1 = nn.Conv2d(1, 20, 5, 1)
# 20 input channels to 50 feature maps of 5x5 kernel. Stride 1.
self.conv2 = nn.Conv2d(20, 50, 5, 1)
# Full connected of final 4x4 image to 500 features
self.fc1 = nn.Linear(4*4*50, 500)
# From 500 to 10 classes
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
# Initialize it
model = Net()
import torch.optim as optim
# Initialize with model parameters
optimizer = optim.SGD(model.parameters(), lr=0.01)
import tqdm
import torch.nn.functional as F
def train(model, train_loader, optimizer, epoch):
# For things like dropout
model.train()
# Avg loss
total_loss = 0
# Iterate through dataset
for data, target in tqdm.tqdm(train_loader):
# Zero grad
optimizer.zero_grad()
# Forward pass
output = model(data)
# Negative log likelihood loss function
loss = F.nll_loss(output, target)
# Backward pass
loss.backward()
total_loss += loss.item()
# Update
optimizer.step()
# Print average loss
print("Train Epoch: {}\t Loss: {:.6f}".format(epoch, total_loss / len(train_loader)))
def test(model, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
from torchvision import datasets, transforms
# See the torch DataLoader for more details.
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=32, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=32, shuffle=True)
for epoch in range(1, 10 + 1):
train(model, train_loader, optimizer, epoch)
test(model, test_loader)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tensors
Step2: PyTorch tensors and NumPy ndarrays even share the same memory handles, so you can switch between the two types essentially for free
Step3: Like NumPy, there are a zillion different operations you can do with tensors. Best thing to do is to go to https
Step4: In-place operations exist to, generally denoted by a trailing '' (e.g. my_tensor.my_inplace_function).
Step5: Manipulate dimensions...
Step6: If you have a GPU...
Step7: And many more!
Step8: Autograd
Step9: At first the 'grad' parameter is None
Step10: Let's do an operation. Take y = e^x.
Step11: To run the gradient computing magic, call '.backward()' on a variable.
Step12: For all dependent variables {x_1, ..., x_n} that were used to compute y, dy/x_i is computed and stored in the x_i.grad field.
Step13: Important! Remember to zero gradients before subsequent calls to backwards.
Step14: Also important! Under the hood PyTorch stores all the stuff required to compute gradients (call stack, cached values, etc). If you want to save a variable just to keep it around (say for logging or plotting) remember to call .item() to get the python value and free the PyTorch machinery memory.
Step15: We'll train a one layer neural net to classify this dataset. Let's define the parameter sizes
Step16: And now run a few steps of SGD!
Step17: torch.nn
Step18: A note on convolution sizes
Step19: Updating is now as easy as
Step20: Testing loops are similar.
Step21: MNIST
|
6,998
|
<ASSISTANT_TASK:>
Python Code:
import json
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.functions import explode
from pyspark.ml.feature import VectorAssembler
from pyspark.mllib.tree import RandomForest, RandomForestModel
#custom modules
import MySQLConnection
IMPORTANT: MUST use class paths when using spark-submit
$SPARK_HOME/bin/spark-submit --packages org.apache.spark:spark-streaming-kafka_2.10:1.6.2,mysql:mysql-connector-java:5.1.28 ProcessSensorReadings.py
def writeLumbarReadings(time, rdd):
try:
# Convert RDDs of the words DStream to DataFrame and run SQL query
connectionProperties = MySQLConnection.getDBConnectionProps('/home/erik/mysql_credentials.txt')
sqlContext = SQLContext(rdd.context)
if rdd.isEmpty() == False:
lumbarReadings = sqlContext.jsonRDD(rdd)
lumbarReadingsIntermediate = lumbarReadings.selectExpr("readingID","readingTime","deviceID","metricTypeID","uomID","actual.y AS actualYaw","actual.p AS actualPitch","actual.r AS actualRoll","setPoints.y AS setPointYaw","setPoints.p AS setPointPitch","setPoints.r AS setPointRoll")
assembler = VectorAssembler(
inputCols=["actualPitch"], # Must be in same order as what was used to train the model. Testing using only pitch since model has limited dataset.
outputCol="features")
lumbarReadingsIntermediate = assembler.transform(lumbarReadingsIntermediate)
predictions = loadedModel.predict(lumbarReadingsIntermediate.map(lambda x: x.features))
predictionsDF = lumbarReadingsIntermediate.map(lambda x: x.readingID).zip(predictions).toDF(["readingID","positionID"])
combinedDF = lumbarReadingsIntermediate.join(predictionsDF, lumbarReadingsIntermediate.readingID == predictionsDF.readingID).drop(predictionsDF.readingID)
combinedDF = combinedDF.drop("features")
combinedDF.show()
combinedDF.write.jdbc("jdbc:mysql://localhost/biosensor", "SensorReadings", properties=connectionProperties)
except:
pass
def writeLumbarTrainingReadings(time, rddTraining):
try:
# Convert RDDs of the words DStream to DataFrame and run SQL query
connectionProperties = MySQLConnection.getDBConnectionProps('/home/erik/mysql_credentials.txt')
sqlContext = SQLContext(rddTraining.context)
if rddTraining.isEmpty() == False:
lumbarTrainingReading = sqlContext.jsonRDD(rddTraining)
lumbarTrainingReadingFinal = lumbarTrainingReading.selectExpr("deviceID","metricTypeID","uomID","positionID","actual.y AS actualYaw","actual.p AS actualPitch","actual.r AS actualRoll","setPoints.y AS setPointYaw","setPoints.p AS setPointPitch","setPoints.r AS setPointRoll")
lumbarTrainingReadingFinal.write.jdbc("jdbc:mysql://localhost/biosensor", "SensorTrainingReadings", properties=connectionProperties)
except:
pass
if __name__ == "__main__":
sc = SparkContext(appName="Process Lumbar Sensor Readings")
ssc = StreamingContext(sc, 2) # 2 second batches
loadedModel = RandomForestModel.load(sc, "../machine_learning/models/IoTBackBraceRandomForest.model")
#Process Readings
streamLumbarSensor = KafkaUtils.createDirectStream(ssc, ["LumbarSensorReadings"], {"metadata.broker.list": "localhost:9092"})
lineSensorReading = streamLumbarSensor.map(lambda x: x[1])
lineSensorReading.foreachRDD(writeLumbarReadings)
#Process Training Readings
streamLumbarSensorTraining = KafkaUtils.createDirectStream(ssc, ["LumbarSensorTrainingReadings"], {"metadata.broker.list": "localhost:9092"})
lineSensorTrainingReading = streamLumbarSensorTraining.map(lambda x: x[1])
lineSensorTrainingReading.foreachRDD(writeLumbarTrainingReadings)
# Run and then wait for termination signal
ssc.start()
ssc.awaitTermination()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Script to Process the Sensor Readings - ProcessSensorReadings.py
Step2: The "writeLumbarReadings" method takes the rdd received from Spark Streaming as an input. It then extracts the JSON data and converts to a SQLContext dataframe.
Step3: The "writeLumbarTrainingReadings" method also accepts an RDD from Spark Streaming but does not need to do any machine learning processing since we already know the posture from the JSON data.
Step4: In the main part of the script the machine learning model is loaded and then two Spark StreamingContexts are created to listen for either actual device readings or training readings. The appropriate methods are then called upon receipt.
|
6,999
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
import seaborn as sns #sets up styles and gives us more plotting options
URL = "tmrw.co" # User-entered website
# Time period 17th Jan - 16th April (arbitrary )
# API credentials
# Email address 705762800217-compute@developer.gserviceaccount.com
# Key IDs 948ee8e2a420ef14a5d5a29bd35104fe2f1e6ed4
# open file. It is requested via API explorer using request parameters:
#Account: TMRW Tech Hub
#Property: TMRW
#View: All Web Site Data
#ids: ga:123303369
#start-date: 2017-01-15
#end-date: yesterday
#metrics
#ga:sessions
#ga:percentNewSessions
#ga:bounceRate
#ga:pageviewsPerSession
#ga:avgSessionDuration
#ga:goal1ConversionRate
#ga:goal1Completions
#dimensions
#ga:city
#ga:userAgeBracket
#sort
#ga:goal1ConversionRate
# Open file
TMRW_users= pd.read_csv("files/TMRW_geo_loc_API.csv")
#TMRW_users[TMRW_users.Age=='55-64']
TMRW_users
# rename columns
TMRW_users.columns=['city','age', 'new_sessions','sessions','bounce_rate','asd','goal1','goal1CR','PPS']
TMRW_users=TMRW_users.sort_values('goal1CR')
TMRW_users_filter = TMRW_users[TMRW_users.sessions > 80]
TMRW_users_filter
TMRW_users_filter.describe()
# will need to convert dnumbers into tim
#TMRW_users_filter_new =pd.to_datetime(TMRW_users_filter['asd'], format='%H:%M')
# Check if the number of sessions is enough for analysis
if TMRW_users.sessions.sum() < 80:
print("Error")
#if sessions are too small remove
#if one conversion - remove
# 1. Too small sessions
TMRW_users.describe().loc['mean', 'sessions']
#algo for bucketing into varios secsions:
#buckets by goal1CR
TMRW_users_goal1CR = TMRW_users_filter.nlargest(3, 'goal1CR')
TMRW_users_goal1CR_gCity = TMRW_users_goal1CR.groupby(['city']).mean() #this is not too correct- average of proportion, but ok for now
TMRW_users_goal1CR_gAge = TMRW_users_goal1CR.groupby(['age']).mean()
TMRW_users_goal1CR_gCity
TMRW_users_goal1CR.loc[11,'age']
TMRW_users_goal1CR_gAge.loc[: , 'goal1']
TMRW_users_goal1CR.loc[]
'age'.max
# The most converting audience
print(+" is most converting Demographic category")
#x = TMRW_users_goal1CR_gAge.index
y = TMRW_users_goal1CR.[: , 'goal1CR']
plt.hist(y)
plt.title("Top converting buckets")
plt.xlabel("Conv rate")
plt.ylabel("Frequency")
plt.show()
TMRW_users_bounce_rate = TMRW_users_filter.nlargest(3, 'bounce_rate')
TMRW_users_bounce_rate
#if the_largest traffic source = ('not_set') then output error ''
TMRW_users_PPS = TMRW_users_filter.nlargest(3, 'PPS')
TMRW_users_PPS
TMRW_users_agcities=TMRW_users.groupby(["city"]).sum()
TMRW_users_agcities
selected=TMRW_users_agcities.loc[:,'Goal 1 Completions']
selected
labels = selected.index
sizes = selected
# colours are taken from http://tools.medialab.sciences-po.fr/iwanthue/
colors = ['#1f394d','#2a7585', '#163c45', '#004a6e']
explode = (0, 0, 0, 0)
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=False, startangle=90)
plt.axis('equal')
plt.title('Cities by conversion')
plt.show()
# Conversion traffic
# Generate text
print (" segment converts best")
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
input_mob = pd.read_csv('files/TMRW_mob.csv')
input_mob.columns=['device','sessions','%news', 'new_users','bounce_rate','PPS', 'ASD', 'goal1CR','goal1']
input_mob = input_mob.set_index('device')
def p2f(x):
return float(x.strip('%'))/100
input_mob
conv_increase = float(input_mob.loc['mobile','sessions']) * p2f(input_mob.loc['desktop','goal1CR']) - float(input_mob.loc['mobile','goal1'])
conv_increase=int(conv_increase)
# Check if data is valid
is_valid = True
output_chart_data = input_mob.loc['mobile',['sessions','goal1']]
# convert percentages into float
mob_CR = p2f(input_mob.loc['desktop','goal1CR'])
desk_CR = p2f(input_mob.loc['mobile','goal1CR'])
def funnel_cart():
return
# calculate difference
mob_dif = mob_CR / desk_CR
# if mobile goal1CR is less by more than 20% then reult is failed
if mob_dif > 1.5:
#funnel_chart()
output_chart_data
else:
is_valid = False
# if not then build chart
# http://stackoverflow.com/questions/21397549/stack-bar-plot-in-matplotlib-and-add-label-to-each-section-and-suggestions
# http://pandas.pydata.org/pandas-docs/stable/visualization.html#bar-plots
#mob_chart = output_chart_data
x = {1}
y = {100,10}
#ou = pd.DataFrame([100,10], columns=['sessions', 'goal1'])
output_chart_data.plot.barh(stacked=True)
#plt.barh(x,y,'stacked')
mob_conv = input_mob.loc['mobile','goal1CR']
plt.xlabel('Visits')
plt.title('Only %s of all mobile visitors end up completing a conversion' % mob_conv)
#need to specify analytics time perdiod
print("Have %s more conversions per month by optmiising mobile UX" % conv_increase)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Settings
Step2: Skipped step
Step3: Mobile analytics
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.