Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
3,800
|
<ASSISTANT_TASK:>
Python Code::
# beam search
def beam_search_decoder(data, k):
sequences = [[list(), 0.0]]
# walk over each step in sequence
for row in data:
all_candidates = list()
# expand each current candidate
for i in range(len(sequences)):
seq, score = sequences[i]
for j in range(len(row)):
candidate = [seq + [j], score - log(row[j])]
all_candidates.append(candidate)
# order all candidates by score
ordered = sorted(all_candidates, key=lambda tup:tup[1])
# select k best
sequences = ordered[:k]
return sequences
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
3,801
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-3', 'sandbox-3', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
3,802
|
<ASSISTANT_TASK:>
Python Code:
1 / 2
1. / 2
4. + 4.0**(3.0/2)
4 + 3j
import math
math?
math.sqrt(2.0)
math.sin(math.pi / 2.0)
from math import *
sin(pi)
num_students = 80
room_capacity = 85
(room_capacity - num_students) / room_capacity * 100.0
float(room_capacity - num_students) / float(room_capacity) * 100.0
x = 4
if x > 5:
print "x is greater than 5"
elif x < 5:
print "x is less than 5"
else:
print "x is equal to 5"
for i in range(5):
print i
for i in range(3,7):
print i
for animal in ['cat', 'dog', 'chinchilla']:
print animal
for n in range(2, 10):
is_prime = True
for x in range(2, n):
if n % x == 0:
print n, 'equals', x, '*', n / x
is_prime = False
break
if is_prime:
print "%s is a prime number" % (n)
def my_print_function(x):
print x
my_print_function(3)
def my_add_function(a, b):
return a + b, b
my_add_function(3.0, 5.0)
def my_crazy_function(a, b, c=1.0):
d = a + b**c
return d
my_crazy_function(2.0, 3.0), my_crazy_function(2.0, 3.0, 2.0), my_crazy_function(2.0, 3.0, c=2.0)
def my_other_function(a, b, c=1.0):
return a + b, a + b**c, a + b**(3.0 / 7.0)
my_other_function(2.0, 3.0, c=2.0)
def fibonacci(n):
Return a list of the Fibonacci sequence up to n
values = [0, 1]
while values[-1] <= n:
values.append(values[-1] + values[-2])
print values
return values
fibonacci(100)
fibonacci?
import numpy
my_array = numpy.array([[1, 2], [3, 4]])
print my_array
numpy.linspace(-1, 1, 10)
numpy.zeros([3, 3])
numpy.ones([2, 3, 2])
numpy.empty([2,3])
my_array[0, 1]
my_array[:,0]
my_vec = numpy.array([[1], [2]])
print my_vec
numpy.dot(my_array, my_vec)
numpy.cross?
my_array * my_vec
A = numpy.array([[1, 2, 3], [4, 5, 6]])
print "A Shape = ", A.shape
print A
B = A.reshape((6,1))
print "A Shape = ", A.shape
print "B Shape = ", B.shape
print B
numpy.tile(A, (2,2))
A.transpose()
A = numpy.array([[1,2,3],[4,5,6],[7,8,9]])
print A
print A.shape
B = numpy.arange(1,10)
print B
print B.reshape((3,3))
B.reshape?
C = B.reshape((3,3))
print A * C
numpy.dot(A, C)
x = numpy.linspace(-2.0 * numpy.pi, 2.0 * numpy.pi, 62)
y = numpy.sin(x)
print y
x = numpy.linspace(-1, 1, 20)
numpy.sqrt(x)
x = numpy.linspace(-1, 1, 20, dtype=complex)
numpy.sqrt(x)
numpy.linalg.norm(x)
numpy.linalg.norm?
M = numpy.array([[0,2],[8,0]])
b = numpy.array([1,2])
print M
print b
x = numpy.linalg.solve(M,b)
print x
lamda,V = numpy.linalg.eig(M)
print lamda
print V
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python returns the floor of the 1 / 2 because we gave it integers to divide. It then interprets the result as also needing to be an integer. If one of the numbers was a decimal number we would have a decimal number as a result (really these are floating point numbers float).
Step2: In compound statements it can become more difficult to figure out where possible rounding might occur so be careful when you evaluate statements.
Step3: Python also understands imaginary numbers
Step4: Some of the more advanced mathematical functions are stored in modules. In order to use these functions we must first import them into our notebook and then use them.
Step5: Variables
Step6: Note that we do not get what we expect from this expression as we expected from above. What would we have to change to get this to work?
Step7: Control Flow
Step8: for allows us to repeat tasks over a range of values or objects.
Step10: Functions
Step11: NumPy
Step12: Constructors
Step13: Access
Step14: Manipulations
Step15: Mathematical Functions
Step16: Linear Algebra
|
3,803
|
<ASSISTANT_TASK:>
Python Code:
files = glob("/home/greg/Documents/Summer Research/rinex files/ma*")
poop=rinexobs(files[6])
plt.figure(figsize=(14,14))
ax1 = plt.subplot(211)
ax1.xaxis.set_major_formatter(fmt)
plt.plot(2.85*(poop[:,23,'P2','data']*1.0E9/3.0E8-poop[:,23,'C1','data']*1.0E9/3.0E8)[10:],
'.',markersize=3,label='pr tec')
plt.plot(2.85E9*((poop[:,23,'L1','data'])/f1-(poop[:,23,'L2','data'])/f2)[10:],
'.',markersize=3,label='ph tec')
plt.title('mah13 sv23, biased')
plt.xlabel('time')
plt.ylabel('TECu')
plt.legend()
plt.grid()
plt.show()
sl=200
plt.figure(figsize=(15,15))
ax1=plt.subplot(211)
ax1.xaxis.set_major_formatter(fmt)
plt.plot(2.85E9*(poop[:,23,'P2','data']/3.0E8
-poop[:,23,'C1','data']/3.0E8),'b.',label='prtec',markersize=3)
for i in range(int(len(poop[:,23,'L1','data'])/sl)):
phtec = 2.85E9*(poop[poop.labels[sl*i:sl*(i+1)],23,'L1','data']/f1
-poop[poop.labels[sl*i:sl*(i+1)],23,'L2','data']/f2)
prtec = 2.85E9*(poop[poop.labels[sl*i:sl*(i+1)],23,'P2','data']/3.0E8
-poop[poop.labels[sl*i:sl*(i+1)],23,'C1','data']/3.0E8)
b = np.average((phtec-prtec)[np.logical_not(np.isnan(phtec-prtec))])
plt.plot(phtec-b,'r-',linewidth=3,label='')
plt.axis([poop.labels[10],poop.labels[10000],-50,50])
plt.title('bias corrected phase data')
plt.xlabel('time')
plt.ylabel('TECu')
plt.grid()
plt.legend()
plt.show()
f1 = 1575.42E6
f2 = 1227.6E6
svn = 23
L1 = -1*3.0E8*poop[:,svn,'L1','data']/f1 #(1a)
L2 = -1*3.0E8*poop[:,svn,'L2','data']/f2 #(1b)
P1 = poop[:,svn,'C1','data'] #(1c)
P2 = poop[:,svn,'P2','data'] #(1d)
#wide lane combination
wld = 3.0E8/(f1-f2)
Ld = (f1*L1-f2*L2)/(f1-f2) #(3)
prd = (f1*P1+f2*P2)/(f1+f2) #(4)
bd = (Ld-prd)/wld #(5)
#wide lane cycle slip detection
bdmean = bd[1]
rms = 0
nans_in_a_row = 0
bdindex=2
ph_con_arc=0
avb=[]
rmsb=[]
lowers=[]
uppers=[]
inarc=False
while(nans_in_a_row<10):
if not np.isnan(bd[bdindex]):
if(bd[i]>4*np.sqrt(rms)):
avb.append(bdmean)
rmsb.append(np.sqrt(rms))
bdmean=bd[i+1]
rms=0
ph_con_arc=0
uppers.append(bdindex)
inarc=False
else:
if not inarc:
inarc=True
lowers.append(bdindex)
ph_con_arc+=1
nans_in_a_row=0
rms = rms+((bd[bdindex]-bdmean)**2-rms)/(ph_con_arc) #(8b)
bdmean = bdmean+(bd[bdindex]-bdmean)/(ph_con_arc) #(8a)
else:
if inarc:
avb.append(bdmean)
rmsb.append(rms)
ph_con_arc=0
bdmean=bd[bdindex+1]
rms=0
inarc=False
uppers.append(bdindex)
nans_in_a_row += 1
bdindex+=1
print('ended at index {}'.format(bdindex-nans_in_a_row))
lengths=[a-b for a,b in zip(uppers,lowers)]
eim = [np.sqrt(a)/np.sqrt(b-1) for a,b in zip(rmsb,lengths)]
#ionospheric combination
LI = L1-L2 #(6)
PI = P2-P1 #(7)
#ionospheric cycle slip detection
# get x and y vectors
mask=~np.isnan(PI)
x = np.arange(len(PI[mask]))
y = PI[mask]
# calculate polynomial
z = np.polyfit(x, y, 6)
f = np.poly1d(z)
# calculate new x's and y's
x_new = np.linspace(x[0], x[-1], len(x))
Q = f(x_new)
residual = LI[mask]-Q
print(lowers,uppers)
plt.figure(figsize=(16,16))
plt.subplot(211).xaxis.set_major_formatter(fmt)
plt.plot(2.85E9*(poop[:,23,'P2','data']/3.0E8
-poop[:,23,'C1','data']/3.0E8),'b.',label='prtec',markersize=3)
plt.plot(2.85E9*((poop[:,23,'L1','data'])/f1-(poop[:,23,'L2','data'])/f2)[10:],
'g.',markersize=3,label='ph tec')
for i in range(len(lowers)):
phtec = 2.85E9*(poop[poop.labels[lowers[i]:uppers[i]],23,'L1','data']/f1
-poop[poop.labels[lowers[i]:uppers[i]],23,'L2','data']/f2)
prtec = 2.85E9*(poop[poop.labels[lowers[i]:uppers[i]],23,'P2','data']/3.0E8
-poop[poop.labels[lowers[i]:uppers[i]],23,'C1','data']/3.0E8)
b = np.average((phtec-prtec)[np.logical_not(np.isnan(phtec-prtec))])
plt.plot(phtec-b,'r-',linewidth=2,label='')
plt.xlim([poop.labels[1],poop.labels[8000]])
plt.legend()
plt.grid()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This plot is uncorrected, it is a remake of the plot in Anthea's email on Wed, Jun 15, 2016 at 7
Step2: try some stuff out from "An Automatic Editing Algorithm for GPS Data" by Blewitt
|
3,804
|
<ASSISTANT_TASK:>
Python Code:
# Import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import psycopg2
import getpass
import pdvega
# for configuring connection
from configobj import ConfigObj
import os
%matplotlib inline
# Create a database connection using settings from config file
config='../db/config.ini'
# connection info
conn_info = dict()
if os.path.isfile(config):
config = ConfigObj(config)
conn_info["sqluser"] = config['username']
conn_info["sqlpass"] = config['password']
conn_info["sqlhost"] = config['host']
conn_info["sqlport"] = config['port']
conn_info["dbname"] = config['dbname']
conn_info["schema_name"] = config['schema_name']
else:
conn_info["sqluser"] = 'postgres'
conn_info["sqlpass"] = ''
conn_info["sqlhost"] = 'localhost'
conn_info["sqlport"] = 5432
conn_info["dbname"] = 'eicu'
conn_info["schema_name"] = 'public,eicu_crd'
# Connect to the eICU database
print('Database: {}'.format(conn_info['dbname']))
print('Username: {}'.format(conn_info["sqluser"]))
if conn_info["sqlpass"] == '':
# try connecting without password, i.e. peer or OS authentication
try:
if (conn_info["sqlhost"] == 'localhost') & (conn_info["sqlport"]=='5432'):
con = psycopg2.connect(dbname=conn_info["dbname"],
user=conn_info["sqluser"])
else:
con = psycopg2.connect(dbname=conn_info["dbname"],
host=conn_info["sqlhost"],
port=conn_info["sqlport"],
user=conn_info["sqluser"])
except:
conn_info["sqlpass"] = getpass.getpass('Password: ')
con = psycopg2.connect(dbname=conn_info["dbname"],
host=conn_info["sqlhost"],
port=conn_info["sqlport"],
user=conn_info["sqluser"],
password=conn_info["sqlpass"])
query_schema = 'set search_path to ' + conn_info['schema_name'] + ';'
patientunitstayid = 141168
query = query_schema +
select *
from pasthistory
where patientunitstayid = {}
order by pasthistoryoffset
.format(patientunitstayid)
df = pd.read_sql_query(query, con)
df.head()
dx = 'COPD'
query = query_schema +
select
pasthistoryvalue, count(*) as n
from pasthistory
where pasthistoryvalue ilike '%{}%'
group by pasthistoryvalue
.format(dx)
df_copd = pd.read_sql_query(query, con)
df_copd
dx = 'COPD'
query = query_schema +
select
patientunitstayid, count(*) as n
from pasthistory
where pasthistoryvalue ilike '%{}%'
group by patientunitstayid
.format(dx)
df_copd = pd.read_sql_query(query, con)
print('{} unit stays with {}.'.format(df_copd.shape[0], dx))
query = query_schema +
with t as
(
select distinct patientunitstayid
from pasthistory
)
select
pt.hospitalid
, count(distinct pt.patientunitstayid) as number_of_patients
, count(distinct t.patientunitstayid) as number_of_patients_with_tbl
from patient pt
left join t
on pt.patientunitstayid = t.patientunitstayid
group by pt.hospitalid
.format(patientunitstayid)
df = pd.read_sql_query(query, con)
df['data completion'] = df['number_of_patients_with_tbl'] / df['number_of_patients'] * 100.0
df.sort_values('number_of_patients_with_tbl', ascending=False, inplace=True)
df.head(n=10)
df[['data completion']].vgplot.hist(bins=10,
var_name='Number of hospitals',
value_name='Percent of patients with data')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Examine a single patient
Step5: We can make a few observations
Step7: Hospitals with data available
|
3,805
|
<ASSISTANT_TASK:>
Python Code:
from os import path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.forward import make_forward_dipole
from mne.evoked import combine_evoked
from mne.simulation import simulate_evoked
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_ave = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem', 'sample-5120-bem-sol.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
fname_surf_lh = op.join(subjects_dir, 'sample', 'surf', 'lh.white')
evoked = mne.read_evokeds(fname_ave, condition='Right Auditory',
baseline=(None, 0))
evoked.pick_types(meg=True, eeg=False)
evoked_full = evoked.copy()
evoked.crop(0.07, 0.08)
# Fit a dipole
dip = mne.fit_dipole(evoked, fname_cov, fname_bem, fname_trans)[0]
# Plot the result in 3D brain with the MRI image.
dip.plot_locations(fname_trans, 'sample', subjects_dir, mode='orthoview')
fwd, stc = make_forward_dipole(dip, fname_bem, evoked.info, fname_trans)
pred_evoked = simulate_evoked(fwd, stc, evoked.info, cov=None, nave=np.inf)
# find time point with highest GOF to plot
best_idx = np.argmax(dip.gof)
best_time = dip.times[best_idx]
print('Highest GOF %0.1f%% at t=%0.1f ms with confidence volume %0.1f cm^3'
% (dip.gof[best_idx], best_time * 1000,
dip.conf['vol'][best_idx] * 100 ** 3))
# rememeber to create a subplot for the colorbar
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=[10., 3.4])
vmin, vmax = -400, 400 # make sure each plot has same colour range
# first plot the topography at the time of the best fitting (single) dipole
plot_params = dict(times=best_time, ch_type='mag', outlines='skirt',
colorbar=False)
evoked.plot_topomap(time_format='Measured field', axes=axes[0], **plot_params)
# compare this to the predicted field
pred_evoked.plot_topomap(time_format='Predicted field', axes=axes[1],
**plot_params)
# Subtract predicted from measured data (apply equal weights)
diff = combine_evoked([evoked, -pred_evoked], weights='equal')
plot_params['colorbar'] = True
diff.plot_topomap(time_format='Difference', axes=axes[2], **plot_params)
plt.suptitle('Comparison of measured and predicted fields '
'at {:.0f} ms'.format(best_time * 1000.), fontsize=16)
dip_fixed = mne.fit_dipole(evoked_full, fname_cov, fname_bem, fname_trans,
pos=dip.pos[best_idx], ori=dip.ori[best_idx])[0]
dip_fixed.plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's localize the N100m (using MEG only)
Step2: Calculate and visualise magnetic field predicted by dipole with maximum GOF
Step3: Estimate the time course of a single dipole with fixed position and
|
3,806
|
<ASSISTANT_TASK:>
Python Code:
dir = 'data/testing/cyan_tests/'
man_E1 = dir + 'run-man.csv'
man_R1 = dir + 'run-man-R1.csv'
raw_man_E1 = pd.read_csv(man_E1)
#raw_man_E1 = pd.read_csv(man_R1)
#cyan_16 = dir + 'run-1-fac-16.dat'
#cyan_17 = dir + 'run-1-fac-17.dat'
#cyan_18 = dir + 'run-1-fac-18.dat'
cyan_16 = dir + 'run-1-fac-16-R1.dat'
cyan_17 = dir + 'run-1-fac-17-R1.dat'
cyan_18 = dir + 'run-1-fac-18-R1.dat'
raw_cyan_16 = pd.read_csv(cyan_16,sep='\s+')
raw_cyan_17 = pd.read_csv(cyan_17,sep='\s+')
raw_cyan_18 = pd.read_csv(cyan_18,sep='\s+')
man_LEU_E1 = raw_man_E1[raw_man_E1['Prototype'] == "LEU"].reset_index(drop=True)
man_delta_LEU_E1 = raw_man_E1[raw_man_E1['Prototype'] == "delta_LEU"].reset_index(drop=True)
man_covert_HEU_E1 = raw_man_E1[raw_man_E1['Prototype'] == "covert_HEU"].reset_index(drop=True)
cyan_LEU_tp = raw_cyan_16['Quantity'] - raw_cyan_16['Quantity'].shift(1)
cyan_delta_LEU_tp = raw_cyan_17['Quantity'] - raw_cyan_17['Quantity'].shift(1)
cyan_covert_HEU_tp = raw_cyan_18['Quantity'] - raw_cyan_18['Quantity'].shift(1)
man_LEU_tp_E1 =(man_LEU_E1[' Quantity']- man_LEU_E1[' Quantity'].shift(1))
man_delta_LEU_tp_E1 = man_delta_LEU_E1[' Quantity']- man_delta_LEU_E1[' Quantity'].shift(1)
man_covert_HEU_tp_E1 = man_covert_HEU_E1[' Quantity']- man_covert_HEU_E1[' Quantity'].shift(1)
plt.plot(man_LEU_E1['Time'],(man_LEU_tp_E1), label='Net LEU (manual)', color=tableau20[i_leu])
plt.plot(raw_cyan_16['Time'],(cyan_LEU_tp), label='Net LEU (cyan)', color=tableau20[i_leu+2])
plt.legend(loc = 'best')
plt.xlabel('Time')
plt.ylabel('Throughput (kg)')
plt.title("Regular Diversion, v1.2")
plt.plot(man_delta_LEU_E1['Time'],(man_delta_LEU_tp_E1), label='Delta LEU (man)', color=tableau20[i_delta])
plt.plot(raw_cyan_17['Time'],(cyan_delta_LEU_tp), label='Delta LEU (cyan)', color=tableau20[i_delta+2])
plt.legend(loc = 'best')
plt.xlabel('Time')
plt.ylabel('Throughput (kg)')
plt.title("Regular Diversion, v1.2")
plt.plot(man_covert_HEU_E1['Time'],(man_covert_HEU_tp_E1), label='Covert HEU (man)', color=tableau20[i_heu])
plt.plot(raw_cyan_18['Time'],(cyan_covert_HEU_tp), label='Covert HEU (cyan)', color=tableau20[i_heu+2])
plt.legend(loc = 'best')
plt.xlabel('Time')
plt.ylabel('Throughput (kg)')
plt.title("Regular Diversion, v1.2")
no_seed_16 = dir + '16_time_seed_no.sqlite.dat'
no_seed_18 = dir + '18_time_seed_no.sqlite.dat'
L_seed_16 = dir + '16_time_seed_L1.sqlite.dat'
L_seed_18 = dir + '18_time_seed_L1.sqlite.dat'
H_seed_16 = dir + '16_time_seed_H1.sqlite.dat'
H_seed_18 = dir + '18_time_seed_H1.sqlite.dat'
all_seed_16 = dir + '16_time_seed_yes.sqlite.dat'
all_seed_18 = dir + '18_time_seed_yes.sqlite.dat'
raw_no_seed_16 = pd.read_csv(no_seed_16,sep='\s+')
raw_no_seed_18 = pd.read_csv(no_seed_18,sep='\s+')
raw_L_seed_16 = pd.read_csv(L_seed_16,sep='\s+')
raw_L_seed_18 = pd.read_csv(L_seed_18,sep='\s+')
raw_H_seed_16 = pd.read_csv(H_seed_16,sep='\s+')
raw_H_seed_18 = pd.read_csv(H_seed_18,sep='\s+')
raw_all_seed_16 = pd.read_csv(all_seed_16,sep='\s+')
raw_all_seed_18 = pd.read_csv(all_seed_18,sep='\s+')
no_seed_LEU_tp = raw_no_seed_16['Quantity'] - raw_no_seed_16['Quantity'].shift(1)
L_seed_LEU_tp = raw_L_seed_16['Quantity'] - raw_L_seed_16['Quantity'].shift(1)
H_seed_LEU_tp = raw_H_seed_16['Quantity'] - raw_H_seed_16['Quantity'].shift(1)
all_seed_LEU_tp = raw_all_seed_16['Quantity'] - raw_all_seed_16['Quantity'].shift(1)
#plt.plot(raw_no_seed_16['Time'],(no_seed_LEU_tp), label='Net LEU (no seed)', color=tableau20[i_leu])
plt.plot(raw_L_seed_16['Time'],(L_seed_LEU_tp), label='Net LEU (L seed)', color=tableau20[i_leu+2])
plt.plot(raw_H_seed_16['Time'],(H_seed_LEU_tp), label='Net LEU (H seed)', color=tableau20[i_leu+4])
#plt.plot(raw_all_seed_16['Time'],(all_seed_LEU_tp), label='Net LEU (all seed)', color=tableau20[i_leu+6])
plt.legend(loc = 'best')
plt.xlabel('Time')
plt.ylabel('Throughput (kg)')
plt.title("v1.2")
no_seed_HEU_tp = raw_no_seed_18['Quantity'] - raw_no_seed_18['Quantity'].shift(1)
L_seed_HEU_tp = raw_L_seed_18['Quantity'] - raw_L_seed_18['Quantity'].shift(1)
H_seed_HEU_tp = raw_H_seed_18['Quantity'] - raw_H_seed_18['Quantity'].shift(1)
all_seed_HEU_tp = raw_all_seed_18['Quantity'] - raw_all_seed_18['Quantity'].shift(1)
#plt.plot(raw_no_seed_16['Time'],(no_seed_HEU_tp), label='Net LEU (no seed)', color=tableau20[i_leu])
plt.plot(raw_L_seed_16['Time'],(L_seed_HEU_tp), label='Net LEU (L seed)', color=tableau20[i_leu+2])
plt.plot(raw_H_seed_16['Time'],(H_seed_HEU_tp), label='Net LEU (H seed)', color=tableau20[i_leu+4])
#plt.plot(raw_all_seed_16['Time'],(all_seed_HEU_tp), label='Net LEU (all seed)', color=tableau20[i_leu+6])
plt.legend(loc = 'best')
plt.xlabel('Time')
plt.ylabel('Throughput (kg)')
plt.title("v1.2")
UM_dir = 'data/UM_data/initial_tests_v1.2/'
LEU_file = UM_dir + 'run-1-fac-16.dat'
ylab = "Throughput (kg)"
plot_dir = UM_dir + 'png/'
ps =1
from manip_data import trunc_flt
from manip_data import import_data_cyan
time, LEU_tp = import_data_cyan(LEU_file)
precision=2
bins=30
xmin=31
xmax=35
pre_hist = LEU_tp[:999].plot(kind='hist', bins=bins,color=tableau20[i_delta+1],
alpha=1.0,range=(xmin,xmax),normed=1,label='baseline LEU')
pre_mean = np.mean(LEU_tp[:999])
pre_variance = np.var(LEU_tp[:999])
pre_sigma = np.sqrt(pre_variance)
x = np.linspace(xmin,xmax,100)
plt.plot(x,mlab.normpdf(x,pre_mean,pre_sigma),color=tableau20[i_delta],
label='$\mu$=' + trunc_flt(pre_mean, precision) + ', $\sigma_{\mu}$=' + trunc_flt(pre_sigma, precision))
legend()
plt.xlabel(ylab)
plt.title("Baseline Declared LEU")
if ps == 1:
savefig(plot_dir + 'baseline_LEU.png')
else:
plt.title("Baseline Declared LEU")
post_hist = LEU_tp[1000:].plot(kind='hist', bins=bins,color=tableau20[i_leu+1],
alpha=1.0,range=(xmin,xmax),normed=1,label='declared LEU')
post_mean = np.mean(LEU_tp[1000:])
post_variance = np.var(LEU_tp[1000:])
post_sigma = np.sqrt(post_variance)
x = np.linspace(xmin,xmax,100)
plt.plot(x,mlab.normpdf(x,pre_mean,pre_sigma),color=tableau20[i_delta],
label='$\mu$=' + trunc_flt(pre_mean, precision) + ', $\sigma_{\mu}$=' + trunc_flt(pre_sigma, precision))
plt.plot(x,mlab.normpdf(x,post_mean,post_sigma),color=tableau20[i_leu],
label='$\mu$=' + trunc_flt(post_mean, precision) + ', $\sigma_{\mu}$=' + trunc_flt(post_sigma, precision))
plt.xlabel(ylab)
legend()
plt.xlabel(ylab)
plt.title("Suspect Declared LEU")
if ps == 1:
savefig(plot_dir + 'suspect_LEU.png')
else:
plt.title("Suspect Declared LEU")
det_dir = 'data/testing/v1.3_merge/'
#here 'no_seed' means that HEU does not trade - so should be 'natural LEU demand'
no_seed_16 = det_dir + '16_baseline.sqlite.dat'
no_seed_17 = det_dir + '17_baseline.sqlite.dat'
no_seed_18 = det_dir + '18_baseline.sqlite.dat'
# 'all_seed' means that HEU does trade (using a fixed seed) - so includes effects of diversion
all_seed_16 = det_dir + '16_heu.sqlite.dat'
all_seed_17 = det_dir + '17_heu.sqlite.dat'
all_seed_18 = det_dir + '18_heu.sqlite.dat'
time_no, LEU_no_tp = import_data_cyan(no_seed_16)
time_no, delta_no_tp = import_data_cyan(no_seed_17)
time_yes, LEU_yes_tp = import_data_cyan(all_seed_16)
time_yes, delta_yes_tp = import_data_cyan(all_seed_17)
time_HEU_yes, HEU_yes_tp = import_data_cyan(all_seed_18)
plt.plot(time_no,LEU_no_tp, label='Net LEU (no diversion)', color=tableau20[i_leu])
plt.plot(time_yes,LEU_yes_tp, label='Net LEU (w/diversion)', color=tableau20[i_leu+1])
plt.plot(time_HEU_yes, (HEU_yes_tp)*2000, label='Net LEU (w/diversion)', color=tableau20[i_heu+1])
plt.plot(time_no,delta_no_tp, label='delta LEU (no diversion)', color=tableau20[i_delta])
plt.plot(time_yes,delta_yes_tp, label='delta LEU (w/diversion)', color=tableau20[i_delta+1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Check Seeding Random on Time
Step2: For LEU Signal, 'H seed' should match 'no seed' because both of them have deterministic NormalDist.
Step3: For HEU signal, 'no seed' should match 'L seed' because both have deterministic HEU frequency (EveryRandomX)
Step4: Make a histogram of one example plot from dataset sent to UM
Step5: 27-July-2015
|
3,807
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# You can ignore the pink warning that appears
import itertools
import math
import nltk
import string
nltk.data.path = ['../nltk_data']
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram
from sklearn.feature_extraction.text import TfidfVectorizer
# code example from Building Machine Learning Systems with Python (Richert & Coelho)
# - modified slightly by Lynn
import math
def tfidf(t, d, D):
tf = float(d.count(t)) / sum(d.count(w) for w in set(d)) # normalized
# Note his version doesn't use +1 in denominator.
idf = math.log( float(len(D)) / (len([doc for doc in D if t in doc])))
return tf * idf
a, abb, abc = ["a"], ["a", "b", "b"], ["a", "b", "c"] # try adding another c to the last doc!
D = [a, abb, abc]
print(tfidf("a", a, D)) # a is in all of them
print(tfidf("a", abc, D)) # a is in all of them
print(tfidf("b", abc, D)) # b occurs only once here, but in 2 docs
print(tfidf("b", abb, D)) # b occurs more frequently in this doc
print(tfidf("c", abc, D)) # c is unique in the doc set
filelist = !ls ../data/movie_reviews/positive/*
filelist
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
import collections
def clean_tokens(tokens, stopwords):
import string
Lowercases, takes out punct and stopwords and short strings
return [token.lower() for token in tokens if (token not in string.punctuation)and (token.lower() not in stopwords) and len(token) > 2]
def makeText(filename, stopwords):
from nltk import Text
with open(filename) as handle:
text = handle.read()
return Text(clean_tokens(nltk.word_tokenize(text.decode('ascii', 'ignore')), stopwords))
def makeTextCollection(files, stopwords=stopwords):
from nltk import TextCollection
texts= [makeText(filename, stopwords) for filename in files]
collection = TextCollection(texts)
return collection, texts
# use the data for the vocab in a single doc for a wordcloud, for instance
def compute_tfidf_by_doc(coll, texts, filenames):
tfidf_by_doc = collections.defaultdict(list)
for i, text in enumerate(texts):
for word in set(text.tokens): # just use the words in this text
tfidfscore = coll.tf_idf(word, text)
tf = coll.tf(word, text) # is actually count / len(text)
count = text.count(word)
if tfidfscore:
tfidf_by_doc[filenames[i]].append({
"word": word,
"tfidf": tfidfscore,
"tf": tf,
"count": count
})
return tfidf_by_doc
# We need to make the text collection, then use it to compute the tf-idf for the words in the docs.
res = makeTextCollection(filelist)
coll = res[0]
texts = res[1]
texts[0]
coll.tf_idf("woman", texts[3])
tfidfs = compute_tfidf_by_doc(coll, texts, filelist)
tfidfs[tfidfs.keys()[0]] # the first filename is the first key... it contains a list of words and scores
import json
jsonified = json.dumps(tfidfs)
with open('../outputdata/pos_movies_tfidf.json', 'w') as handle:
handle.write(jsonified)
!ls -al ../outputdata/pos_movies_tfidf.json
# Load in the docs... again. We're going to make TF-IDF vectors with sklearn (scikit-learn) because it's faster.
def load_texts(filenames, dirpath):
filenames are the leaves, dirpath is the path to them with the /
loaded_text = {}
for filen in filenames:
with open(dirpath + filen) as handle:
loaded_text[filen] = handle.read()
return loaded_text
texts = load_texts(filelist, "")
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer().fit_transform([text.decode('ascii', 'ignore') for text in texts.values()])
vectors = tfidf.toarray()
vectors[0]
#If you haven't managed to load sklearn, you can try the slower version of the vectorizing:
def get_unique_terms_for_all(collection):
#get a list of unique terms
unique_terms = list(set(collection))
print "Unique terms found: ", len(unique_terms)
return unique_terms
# Function to create a TF*IDF vector for one document. For each of
# our unique words, we have a feature which is the td*idf for that word
# in the current document
def TFIDF(document, unique_terms_in_all):
word_tfidf = []
for word in unique_terms_in_all:
word_tfidf.append(collection.tf_idf(word,document))
return word_tfidf
def tfidf_doc_vectors(texts, coll):
uniques = get_unique_terms_for_all(coll)
vectors = [numpy.array(TFIDF(f, uniques)) for f in texts]
print "Vectors created."
return vectors
import numpy as np
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram
vectors
dist = pdist(vectors, metric='cosine') # look at the manpage and pick a different measure to try
linkage(dist)
# this is a base diagram, using defaults...
dendrogram(linkage(dist)) # this plotting function has a ton of things you can manipulate if you look at the docs.
texts[texts.keys()[14]]
def make_dend(data, labels=None, height=8):
from pylab import rcParams
dist = pdist(data, metric='cosine')
link = linkage(dist, method='complete')
rcParams['figure.figsize'] = 6, height
rcParams['axes.labelsize'] = 5
if not labels:
dend = dendrogram(link, orientation='right') #labels=names)
else:
dend = dendrogram(link, orientation='right', labels=[str(i) + label for i, label in enumerate(labels)])
return dist
dist = make_dend(vectors, height=20, labels=texts.keys())
texts.keys()[23]
texts[texts.keys()[23]]
texts[texts.keys()[4]]
# Code borrowed from: http://nbviewer.ipython.org/github/OxanaSachenkova/hclust-python/blob/master/hclust.ipynb
def make_heatmap_matrix(dist, method='complete'):
Pass in the distance matrix; method options are complete or single
# Compute and plot first dendrogram.
fig = plt.figure(figsize=(10,10))
# x ywidth height
ax1 = fig.add_axes([0.05,0.1,0.2,0.6])
Y = linkage(dist, method=method)
Z1 = dendrogram(Y, orientation='right') # adding/removing the axes
ax1.set_xticks([])
# Compute and plot second dendrogram.
ax2 = fig.add_axes([0.3,0.71,0.6,0.2])
Z2 = dendrogram(Y)
ax2.set_xticks([])
ax2.set_yticks([])
#Compute and plot the heatmap
axmatrix = fig.add_axes([0.3,0.1,0.6,0.6])
idx1 = Z1['leaves']
idx2 = Z2['leaves']
D = squareform(dist)
D = D[idx1,:]
D = D[:,idx2]
im = axmatrix.matshow(D, aspect='auto', origin='lower', cmap=plt.cm.YlGnBu)
axmatrix.set_xticks([])
axmatrix.set_yticks([])
# Plot colorbar.
axcolor = fig.add_axes([0.91,0.1,0.02,0.6])
plt.colorbar(im, cax=axcolor)
make_heatmap_matrix(dist, method='complete')
## clustering in NLTK:
import numpy
from nltk.cluster import KMeansClusterer, GAAClusterer, euclidean_distance,cosine_distance
import nltk.corpus
import nltk.stem
stemmer_func = nltk.stem.snowball.SnowballStemmer("english").stem
stopwords = set(nltk.corpus.stopwords.words('english'))
cluster = KMeansClusterer(5, cosine_distance)
cluster.cluster(vectors)
classified_examples = [cluster.classify(vec) for vec in vectors]
for i,val in enumerate(classified_examples):
print val, texts.keys()[i]
texts['../data/movie_reviews/positive/cv673_tok-6552.txt']
texts['../data/movie_reviews/positive/cv680_tok-18142.txt']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TF-IDF (Term Frequency, Inverse Document Frequency)
Step2: What if you change some of those docs, or add another one? Add another c in the last doc, e.g.
Step4: Some Utilities to Make a File We Can Save
Step6: Now we can look at these reviews as little wordclouds, using different measures to size our words. Let's work with word_clouds_tfidf.html and we can compare how our clouds look using regular word counts, term frequencies (which is count / length of the document), and tfidf across all the documents.
Step7: This gets us the input we need for clustering and making dendrograms.
Step8: Scipy's pdist is pairwise distance - see http
Step9: Let's do this with a nicer layout now...
Step10: Let's inspect a pair that are grouped closely in the cosine-similarity tree -- 23, 4
Step12: What do you notice about them both?
Step13: Relevant links
Step14: Let's look at the items in cluster 0
|
3,808
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import liboptpy.unconstr_solvers.fo as fo
import liboptpy.step_size as ss
import liboptpy.base_optimizer as base
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(42)
n = 100
A = np.random.randn(n, n)
A = A.T.dot(A)
A_eigvals = np.linalg.eigvalsh(A)
mu = np.min(A_eigvals)
A = A - mu * np.eye(n)
x_true = np.random.randn(n)
b = A.dot(x_true)
f = lambda x: 0.5 * x.dot(A.dot(x)) - b.dot(x)
grad = lambda x: A.dot(x) - b
A_eigvals = np.linalg.eigvalsh(A)
L = np.max(A_eigvals)
mu = np.min(A_eigvals)
print(L, mu)
class HeavyBall(base.LineSearchOptimizer):
def __init__(self, f, grad, step_size, beta, **kwargs):
super().__init__(f, grad, step_size, **kwargs)
self._beta = beta
def get_direction(self, x):
self._current_grad = self._grad(x)
return -self._current_grad
def _f_update_x_next(self, x, alpha, h):
if len(self.convergence) < 2:
return x + alpha * h
else:
return x + alpha * h + self._beta * (x - self.convergence[-2])
def get_stepsize(self):
return self._step_size.get_stepsize(self._grad_mem[-1], self.convergence[-1], len(self.convergence))
beta_test = 0.8
methods = {
"GD fixed": fo.GradientDescent(f, grad, ss.ConstantStepSize(1 / L)),
"GD Armijo": fo.GradientDescent(f, grad,
ss.Backtracking("Armijo", rho=0.5, beta=0.1, init_alpha=1.)),
r"HB, $\beta = {}$".format(beta_test): HeavyBall(f, grad, ss.ConstantStepSize(1 / L), beta=beta_test),
"Nesterov": fo.AcceleratedGD(f, grad, ss.ConstantStepSize(1 / L)),
}
x0 = np.random.randn(n)
max_iter = 4000
tol = 1e-6
for m in methods:
_ = methods[m].solve(x0=x0, max_iter=max_iter, tol=tol)
figsize = (10, 8)
fontsize = 26
plt.figure(figsize=figsize)
for m in methods:
plt.semilogy([np.linalg.norm(grad(x)) for x in methods[m].get_convergence()], label=m)
plt.legend(fontsize=fontsize, loc="best")
plt.xlabel("Number of iteration, $k$", fontsize=fontsize)
plt.ylabel(r"$\| f'(x_k)\|_2$", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
_ = plt.yticks(fontsize=fontsize)
for m in methods:
print(m)
%timeit methods[m].solve(x0=x0, max_iter=max_iter, tol=tol)
beta_test = 0.9
methods = {
"GD Armijo": fo.GradientDescent(f, grad,
ss.Backtracking("Armijo", rho=0.5, beta=0.1, init_alpha=1.)),
r"HB, $\beta = {}$".format(beta_test): HeavyBall(f, grad, ss.ConstantStepSize(1 / L), beta=beta_test),
"Nesterov": fo.AcceleratedGD(f, grad, ss.ConstantStepSize(1 / L)),
"Nesterov adaptive": fo.AcceleratedGD(f, grad, ss.Backtracking(rule_type="Lipschitz", rho=0.5, init_alpha=1)),
}
x0 = np.zeros(n)
max_iter = 2000
tol = 1e-6
for m in methods:
_ = methods[m].solve(x0=x0, max_iter=max_iter, tol=tol)
figsize = (10, 8)
fontsize = 26
plt.figure(figsize=figsize)
for m in methods:
plt.semilogy([np.linalg.norm(grad(x)) for x in methods[m].get_convergence()], label=m)
plt.legend(fontsize=fontsize, loc="best")
plt.xlabel("Number of iteration, $k$", fontsize=fontsize)
plt.ylabel(r"$\| f'(x_k)\|_2$", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
_ = plt.yticks(fontsize=fontsize)
for m in methods:
print(m)
%timeit methods[m].solve(x0=x0, max_iter=max_iter, tol=tol)
import jax
import jax.numpy as jnp
from jax.config import config
config.update("jax_enable_x64", True)
import sklearn.datasets as skldata
n = 300
m = 1000
X, y = skldata.make_classification(n_classes=2, n_features=n, n_samples=m, n_informative=n//3, random_state=42)
C = 1
@jax.jit
def f(w):
return jnp.linalg.norm(w)**2 / 2 + C * jnp.mean(jnp.logaddexp(jnp.zeros(X.shape[0]), -y * (X @ w)))
# def grad(w):
# denom = scspec.expit(-y * X.dot(w))
# return w - C * X.T.dot(y * denom) / X.shape[0]
autograd_f = jax.jit(jax.grad(f))
x0 = jnp.ones(n)
print("Initial function value = {}".format(f(x0)))
print("Initial gradient norm = {}".format(jnp.linalg.norm(autograd_f(x0))))
beta_test = 0.9
L_trial = (1 + C*100)
methods = {
"GD Armijo": fo.GradientDescent(f, autograd_f,
ss.Backtracking("Armijo", rho=0.5, beta=0.01, init_alpha=1.)),
r"HB, $\beta = {}$".format(beta_test): HeavyBall(f, autograd_f, ss.ConstantStepSize(1 / L_trial), beta=beta_test),
"Nesterov": fo.AcceleratedGD(f, autograd_f, ss.ConstantStepSize(1 / L_trial)),
"Nesterov adaptive": fo.AcceleratedGD(f, autograd_f, ss.Backtracking(rule_type="Lipschitz", rho=0.5, init_alpha=1)),
}
max_iter = 2000
tol = 1e-6
for m in methods:
_ = methods[m].solve(x0=x0, max_iter=max_iter, tol=tol)
figsize = (10, 8)
fontsize = 26
plt.figure(figsize=figsize)
for m in methods:
plt.semilogy([np.linalg.norm(autograd_f(x)) for x in methods[m].get_convergence()], label=m)
plt.legend(fontsize=fontsize, loc="best")
plt.xlabel("Number of iteration, $k$", fontsize=fontsize)
plt.ylabel(r"$\| f'(x_k)\|_2$", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
_ = plt.yticks(fontsize=fontsize)
for m in methods:
print(m)
%timeit methods[m].solve(x0=x0, max_iter=max_iter, tol=tol)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Главное про ускоренный метод Нестерова
Step2: Эксперимент на неквадратичной задаче
|
3,809
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
df = pd.DataFrame({'a': np.arange(1E6), 'b': np.arange(1E6)})
%timeit -o df.c = df.a + df.b
a = df.a.values
b = df.b.values
%%timeit
c = a + b
df = pd.DataFrame({'a': np.arange(100), 'b': np.arange(100)})
%%timeit
df.c = df.a + df.b
a = df.a.values
b = df.b.values
%%timeit
c = a + b
from IPython.display import Image
Image(filename='_static/snakeviz_add.png')
from gtable import Table
tb = Table({'a': np.arange(1E6), 'b': np.arange(1E6)})
tb
%%timeit
tb.c = tb.a + tb.b
tb = Table({'a': np.arange(100), 'b': np.arange(100)})
%%timeit
tb.c = tb.a + tb.b
%%timeit
tb['a'] = tb['a'] + tb['b']
tb = Table({'a': pd.date_range('2000-01-01', freq='M', periods=10),
'b': np.random.randn(10)})
tb
tb.add_column('schedule', np.array(['first ']))
tb
tb
tb1 = tb.copy()
tb1.schedule.values[0] = 'second'
tb.stitch(tb1)
tb
tb.data
tb.keys
tb.index
b_col = tb.b
b_col
tb.sum_b = b_col.values.cumsum()
tb.sum_b.values
tb['sum_b']
%load_ext snakeviz
%%snakeviz
tb.a > tb.b
col = tb.a >= tb.b
col.values
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We have just created a relatively large dataframe with some dummy data, enough to prove my initial point. Let's see how much time it takes to add the two columns and to insert the result into the third one.
Step2: Is that fast or slow? Well, let's try to make the very same computation in a slightly different manner
Step3: If we compare how fast it is to a simple sum of two numpy arrays, it is pretty fast. But we are adding two relatively large arrays. Let's try the exact same thing with smaller arrays.
Step4: Now things have changed quite a lot. Just adding two arrays takes two orders of magnitude less than adding from the Pandas Dataframe. But this comparison is not fare at all. Those 145µs are not spent waiting. Pandas does lots of things with the value of the Series resulting from the sum before it inserts it to the dataframe. If we profile the execution of that simple sum, we'll see that almost a fifth of the time is spent on a function called _sanitize_array.
Step5: The most important characteristic of Pandas is that it always does what it is supposed to do with data regardless of how dirty, heterogeneous, sparse (you name it) your data is. And it does an amazing job with that. But the price we have to pay are those two orders of magnitude in time.
Step6: You can see that for large arrays, the computation time shadows the overhead. Let's see how well it does with smaller arrays
Step7: We have improved by a factor of 7, which is crucial if that's the difference between running in one or seven servers. We can still improve the computation by a little bit more if we fallback into some kind of I know what I am doing mode, and we want to reuse memory to avoid allocations
Step8: Now the performance of arithmetic operations with gtable is closer to operate with plain arrays to the overhead-driven performance of Pandas. You can seriously break the table if you really don't know what you are doing. But for obvious reasons, having this kind of performance tricks is key to us.
Step9: I have been able to concatenate a full column in the horizontal direction with a single value, and it's part of the information that the printed value of the table gives. Storing the data and the indexes separately is a nice and efficient way of dealing with sparse data. We can visualize the table by converting it to a pandas Dataframe
Step10: Gtable is not designed as a general tool for data analysis, but as an efficient data container. We can also concatenate data in the vertical direction efficiently, also keeping a single copy of data when necessary
Step11: If you care a little about how it is done. The internal storage is just a list of arrays and a bitmap index. The bitmap index is interesting because some computations, like sorting or filtering, only involve the index. The storage of the table is stored within the _data, the _keys and _index attributes
Step12: We can take some advantage of knowing the internal representation of the data to insert data into the table in an efficient way. Every attribute of the table corresponds to a column, and each column stores the data as a numpy array in values and a piece of the index in index.
Step13: This means that it is relatively simple make efficient computations with a whole column, to add yet another colum
Step14: We'll see where it will go from here
|
3,810
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Eric Larson <larson.eric.d@gmail.com>
# Adam Li <adam2392@gmail.com>
# Alex Rockhill <aprockhill@mailbox.org>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
import mne
from mne.datasets import fetch_fsaverage
# paths to mne datasets - sample sEEG and FreeSurfer's fsaverage subject
# which is in MNI space
misc_path = mne.datasets.misc.data_path()
sample_path = mne.datasets.sample.data_path()
subjects_dir = op.join(sample_path, 'subjects')
# use mne-python's fsaverage data
fetch_fsaverage(subjects_dir=subjects_dir, verbose=True) # downloads if needed
raw = mne.io.read_raw(op.join(misc_path, 'seeg', 'sample_seeg_ieeg.fif'))
events, event_id = mne.events_from_annotations(raw)
epochs = mne.Epochs(raw, events, event_id, detrend=1, baseline=None)
epochs = epochs['Response'][0] # just process one epoch of data for speed
montage = epochs.get_montage()
# first we need a head to mri transform since the data is stored in "head"
# coordinates, let's load the mri to head transform and invert it
this_subject_dir = op.join(misc_path, 'seeg')
head_mri_t = mne.coreg.estimate_head_mri_t('sample_seeg', this_subject_dir)
# apply the transform to our montage
montage.apply_trans(head_mri_t)
# now let's load our Talairach transform and apply it
mri_mni_t = mne.read_talxfm('sample_seeg', op.join(misc_path, 'seeg'))
montage.apply_trans(mri_mni_t) # mri to mni_tal (MNI Taliarach)
# for fsaverage, "mri" and "mni_tal" are equivalent and, since
# we want to plot in fsaverage "mri" space, we need use an identity
# transform to equate these coordinate frames
montage.apply_trans(
mne.transforms.Transform(fro='mni_tal', to='mri', trans=np.eye(4)))
epochs.set_montage(montage)
# compute the transform to head for plotting
trans = mne.channels.compute_native_head_t(montage)
# note that this is the same as:
# ``mne.transforms.invert_transform(
# mne.transforms.combine_transforms(head_mri_t, mri_mni_t))``
fig = mne.viz.plot_alignment(epochs.info, trans, 'fsaverage',
subjects_dir=subjects_dir, show_axes=True,
surfaces=['pial', 'head'], coord_frame='mri')
aseg = 'aparc+aseg' # parcellation/anatomical segmentation atlas
labels, colors = mne.get_montage_volume_labels(
montage, 'fsaverage', subjects_dir=subjects_dir, aseg=aseg)
# separate by electrodes which have names like LAMY 1
electrodes = set([''.join([lttr for lttr in ch_name
if not lttr.isdigit() and lttr != ' '])
for ch_name in montage.ch_names])
print(f'Electrodes in the dataset: {electrodes}')
electrodes = ('LPM', 'LSMA') # choose two for this example
for elec in electrodes:
picks = [ch_name for ch_name in epochs.ch_names if elec in ch_name]
fig, ax = mne.viz.plot_channel_labels_circle(labels, colors, picks=picks)
fig.text(0.3, 0.9, 'Anatomical Labels', color='white')
picks = [ii for ii, ch_name in enumerate(epochs.ch_names) if
any([elec in ch_name for elec in electrodes])]
labels = ('ctx-lh-caudalmiddlefrontal', 'ctx-lh-precentral',
'ctx-lh-superiorfrontal', 'Left-Putamen')
fig = mne.viz.plot_alignment(mne.pick_info(epochs.info, picks), trans,
'fsaverage', subjects_dir=subjects_dir,
surfaces=[], coord_frame='mri')
brain = mne.viz.Brain('fsaverage', alpha=0.1, cortex='low_contrast',
subjects_dir=subjects_dir, units='m', figure=fig)
brain.add_volume_labels(aseg='aparc+aseg', labels=labels)
brain.show_view(azimuth=120, elevation=90, distance=0.25)
epochs.plot()
# get standard fsaverage volume (5mm grid) source space
fname_src = op.join(subjects_dir, 'fsaverage', 'bem',
'fsaverage-vol-5-src.fif')
vol_src = mne.read_source_spaces(fname_src)
evoked = epochs.average()
stc = mne.stc_near_sensors(
evoked, trans, 'fsaverage', subjects_dir=subjects_dir, src=vol_src,
verbose='error') # ignore missing electrode warnings
stc = abs(stc) # just look at magnitude
clim = dict(kind='value', lims=np.percentile(abs(evoked.data), [10, 50, 75]))
brain = stc.plot_3d(
src=vol_src, subjects_dir=subjects_dir,
view_layout='horizontal', views=['axial', 'coronal', 'sagittal'],
size=(800, 300), show_traces=0.4, clim=clim,
add_data_kwargs=dict(colorbar_kwargs=dict(label_font_size=8)))
# You can save a movie like the one on our documentation website with:
# brain.save_movie(time_dilation=3, interpolation='linear', framerate=5,
# time_viewer=True, filename='./mne-test-seeg.m4')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's load some sEEG data with channel locations and make epochs.
Step2: Let use the Talairach transform computed in the Freesurfer recon-all
Step3: Let's check to make sure everything is aligned.
Step4: Let's also look at which regions of interest are nearby our electrode
Step5: Now, let's the electrodes and a few regions of interest that the contacts
Step6: Next, we'll get the epoch data and plot its amplitude over time.
Step7: We can visualize this raw data on the fsaverage brain (in MNI space) as
Step8: Plot 3D source (brain region) visualization
|
3,811
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import antipackage
import github.ellisonbg.misc.vizarray as va
def checkerboard(size):
Return a 2d checkboard of 0.0 and 1.0 as a NumPy array
a = np.zeros((size,size))
x=0
while x < size:
a[x,0] = 1
x += 2
for x in range(0,size):
for y in range(1,size):
a[x,y]= (-1)**(x+y) + a[x,y-1]
return a
checkerboard(4)
a = checkerboard(4)
assert a[0,0]==1.0
assert a.sum()==8.0
assert a.dtype==np.dtype(float)
assert np.all(a[0,0:5:2]==1.0)
assert np.all(a[1,0:5:2]==0.0)
b = checkerboard(5)
assert b[0,0]==1.0
assert b.sum()==13.0
assert np.all(b.ravel()[0:26:2]==1.0)
assert np.all(b.ravel()[1:25:2]==0.0)
# YOUR CODE HERE
va.enable()
va.set_block_size(10)
checkerboard(20)
assert True
# YOUR CODE HERE
va.set_block_size(5)
checkerboard(27)
assert True
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Checkerboard
Step3: Use vizarray to visualize a checkerboard of size=20 with a block size of 10px.
Step4: Use vizarray to visualize a checkerboard of size=27 with a block size of 5px.
|
3,812
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@test {"skip": true}
# If you're running this in Colab, first install the adanet package:
!pip install adanet
import functools
import json
import os
import time
import adanet
from google.colab import auth
import tensorflow.compat.v1 as tf
BUCKET = '' #@param {type: 'string'}
MODEL_DIR = 'gs://{}/{}'.format(
BUCKET, time.strftime('adanet-tpu-estimator/%Y-%m-%d-%H-%M-%S'))
MASTER = ''
if 'COLAB_TPU_ADDR' in os.environ:
auth.authenticate_user()
MASTER = 'grpc://' + os.environ['COLAB_TPU_ADDR']
# Authenticate TPU to use GCS Bucket.
with tf.Session(MASTER) as sess:
with open('/content/adc.json', 'r') as file_:
auth_info = json.load(file_)
tf.contrib.cloud.configure_gcs(sess, credentials=auth_info)
# The random seed to use.
RANDOM_SEED = 42
(x_train, y_train), (x_test, y_test) = (
tf.keras.datasets.fashion_mnist.load_data())
FEATURES_KEY = "images"
def generator(images, labels):
Returns a generator that returns image-label pairs.
def _gen():
for image, label in zip(images, labels):
yield image, label
return _gen
def preprocess_image(image, label):
Preprocesses an image for an `Estimator`.
image = image / 255.
image = tf.reshape(image, [28, 28, 1])
features = {FEATURES_KEY: image}
return features, label
def input_fn(partition, training, batch_size):
Generate an input_fn for the Estimator.
def _input_fn(params): # TPU: specify `params` argument.
# TPU: get the TPU set `batch_size`, if available.
batch_size_ = params.get("batch_size", batch_size)
if partition == "train":
dataset = tf.data.Dataset.from_generator(
generator(x_train, y_train), (tf.float32, tf.int32), ((28, 28), ()))
elif partition == "predict":
dataset = tf.data.Dataset.from_generator(
generator(x_test[:10], y_test[:10]), (tf.float32, tf.int32),
((28, 28), ()))
else:
dataset = tf.data.Dataset.from_generator(
generator(x_test, y_test), (tf.float32, tf.int32), ((28, 28), ()))
if training:
dataset = dataset.shuffle(10 * batch_size_, seed=RANDOM_SEED).repeat()
# TPU: drop the remainder batch when training on TPU.
dataset = dataset.map(preprocess_image).batch(
batch_size_, drop_remainder=training)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
return _input_fn
#@title Define the Builder and Generator
class SimpleCNNBuilder(adanet.subnetwork.Builder):
Builds a CNN subnetwork for AdaNet.
def __init__(self, learning_rate, max_iteration_steps, seed):
Initializes a `SimpleCNNBuilder`.
Args:
learning_rate: The float learning rate to use.
max_iteration_steps: The number of steps per iteration.
seed: The random seed.
Returns:
An instance of `SimpleCNNBuilder`.
self._learning_rate = learning_rate
self._max_iteration_steps = max_iteration_steps
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
See `adanet.subnetwork.Builder`.
images = list(features.values())[0]
kernel_initializer = tf.keras.initializers.he_normal(seed=self._seed)
x = tf.keras.layers.Conv2D(
filters=16,
kernel_size=3,
padding="same",
activation="relu",
kernel_initializer=kernel_initializer)(
images)
x = tf.keras.layers.MaxPool2D(pool_size=2, strides=2)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(
units=64, activation="relu", kernel_initializer=kernel_initializer)(
x)
logits = tf.keras.layers.Dense(
units=10, activation=None, kernel_initializer=kernel_initializer)(
x)
complexity = tf.constant(1)
return adanet.Subnetwork(
last_layer=x,
logits=logits,
complexity=complexity,
persisted_tensors={})
def build_subnetwork_train_op(self,
subnetwork,
loss,
var_list,
labels,
iteration_step,
summary,
previous_ensemble=None):
See `adanet.subnetwork.Builder`.
learning_rate = tf.train.cosine_decay(
learning_rate=self._learning_rate,
global_step=iteration_step,
decay_steps=self._max_iteration_steps)
optimizer = tf.train.MomentumOptimizer(learning_rate, .9)
# TPU: wrap the optimizer in a CrossShardOptimizer.
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
return optimizer.minimize(loss=loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
See `adanet.subnetwork.Builder`.
return tf.no_op("mixture_weights_train_op")
@property
def name(self):
See `adanet.subnetwork.Builder`.
return "simple_cnn"
class SimpleCNNGenerator(adanet.subnetwork.Generator):
Generates a `SimpleCNN` at each iteration.
def __init__(self, learning_rate, max_iteration_steps, seed=None):
Initializes a `Generator` that builds `SimpleCNNs`.
Args:
learning_rate: The float learning rate to use.
max_iteration_steps: The number of steps per iteration.
seed: The random seed.
Returns:
An instance of `Generator`.
self._seed = seed
self._dnn_builder_fn = functools.partial(
SimpleCNNBuilder,
learning_rate=learning_rate,
max_iteration_steps=max_iteration_steps)
def generate_candidates(self, previous_ensemble, iteration_number,
previous_ensemble_reports, all_reports):
See `adanet.subnetwork.Generator`.
seed = self._seed
# Change the seed according to the iteration so that each subnetwork
# learns something different.
if seed is not None:
seed += iteration_number
return [self._dnn_builder_fn(seed=seed)]
#@test {"skip": true}
get_ipython().system_raw(
'tensorboard --logdir {} --host 0.0.0.0 --port 6006 &'
.format(MODEL_DIR)
)
# Install ngrok binary.
! wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip
! unzip ngrok-stable-linux-amd64.zip
print("Follow this link to open TensorBoard in a new tab.")
get_ipython().system_raw('./ngrok http 6006 &')
! curl -s http://localhost:4040/api/tunnels | python3 -c \
"import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])"
#@title AdaNet Parameters
LEARNING_RATE = 0.25 #@param {type:"number"}
TRAIN_STEPS = 1000 #@param {type:"integer"}
BATCH_SIZE = 512 #@param {type:"integer"}
ADANET_ITERATIONS = 2 #@param {type:"integer"}
# TPU: switch `tf.estimator.RunConfig` to `tf.contrib.tpu.RunConfig`.
# The main required changes are specifying `tpu_config` and `master`.
config = tf.contrib.tpu.RunConfig(
tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=200),
master=MASTER,
save_checkpoints_steps=200,
save_summary_steps=200,
tf_random_seed=RANDOM_SEED)
head = tf.contrib.estimator.multi_class_head(
n_classes=10, loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
max_iteration_steps = TRAIN_STEPS // ADANET_ITERATIONS
# TPU: switch `adanet.Estimator` to `adanet.TPUEstimator`.
try:
estimator = adanet.TPUEstimator(
head=head,
subnetwork_generator=SimpleCNNGenerator(
learning_rate=LEARNING_RATE,
max_iteration_steps=max_iteration_steps,
seed=RANDOM_SEED),
max_iteration_steps=max_iteration_steps,
evaluator=adanet.Evaluator(
input_fn=input_fn("train", training=False, batch_size=BATCH_SIZE),
steps=None),
adanet_loss_decay=.99,
config=config,
model_dir=MODEL_DIR,
# TPU: specify `use_tpu` and the batch_size parameters.
use_tpu=True,
# We evaluate on CPU since train_and_evaluate() will shut the TPU down
# after evaluating the first time. However, AdaNet fully supports
# evaluating on TPU.
eval_on_tpu=False,
train_batch_size=BATCH_SIZE,
eval_batch_size=32)
except tf.errors.InvalidArgumentError as e:
raise Exception(
"Invalid GCS Bucket: you must provide a valid GCS bucket in the "
"`BUCKET` form field of the first cell.") from e
results, _ = tf.estimator.train_and_evaluate(
estimator,
train_spec=tf.estimator.TrainSpec(
input_fn=input_fn("train", training=True, batch_size=BATCH_SIZE),
max_steps=TRAIN_STEPS),
eval_spec=tf.estimator.EvalSpec(
input_fn=input_fn("test", training=False, batch_size=BATCH_SIZE),
steps=None,
start_delay_secs=1,
throttle_secs=1,
))
print("Accuracy:", results["accuracy"])
print("Loss:", results["average_loss"])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: AdaNet on TPU
Step2: Fashion MNIST
Step6: input_fn Changes
Step16: model_fn Changes
Step17: Launch TensorBoard
Step18: Using adanet.TPUEstimator to Train and Evaluate
|
3,813
|
<ASSISTANT_TASK:>
Python Code:
reload(pynoddy.history)
reload(pynoddy.output)
reload(pynoddy.experiment.uncertainty_analysis)
reload(pynoddy)
from pynoddy.experiment.uncertainty_analysis import UncertaintyAnalysis
# the model itself is now part of the repository, in the examples directory:
history_file = os.path.join(repo_path, "examples/fold_dyke_fault.his")
#this file defines the statistical distributions to sample from
params = os.path.join(repo_path, "examples/fold_dyke_fault.csv")
uc_experiment = UncertaintyAnalysis(history_file,params)
#plot the intial model
uc_experiment.change_cube_size(55)
uc_experiment.plot_section(direction='y',position='center')
uc_experiment.estimate_uncertainty(10,verbose=False)
uc_experiment.plot_probability(4, direction='y',position='center')
uc_experiment.plot_entropy(direction='y',position='center')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The next step is to perform the Monte Carlo purturbation of this initial model, and use this to estimate uncertainty. This sampling is wrapped into the estimate_uncertainty function - all that is required from us is the number of trials to produce. Realistically, several thousand samples are typically necessary before sampling can be considered representative. However, in order to speed things up a bit we'll produce 10 model samples.
Step2: Now, a quick description of what we have done... the estimate_uncertainty function generates the specified amount (10) of randomly varying models using the MonteCarlo class. It then loads the output and loops calculates the lithology present at each voxel in each model. This information is used to calculate probability maps for each lithology at each point in the model! This can be seen if we plot the probability of observing lithology 3
Step3: These probability maps can then be used to calculate the information entropy of each cell. These can then be plotted as follows
|
3,814
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
def soliton(x, t, c, a):
Return phi(x, t) for a soliton wave with constants c and a.
if type(x) or type(t) == np.array:
answer = np.array(0.5 * c/(np.cosh(np.sqrt(c)/2*(x-c*t-a)))**2)
else:
answer = 0.5 * c/(np.cosh(np.sqrt(c)/2*(x-c*t-a)))
return answer
assert np.allclose(soliton(np.array([0]),0.0,1.0,0.0), np.array([0.5]))
tmin = 0.0
tmax = 10.0
tpoints = 100
t = np.linspace(tmin, tmax, tpoints)
xmin = 0.0
xmax = 10.0
xpoints = 200
x = np.linspace(xmin, xmax, xpoints)
c = 1.0
a = 0.0
phi = np.ndarray(shape=(xpoints,tpoints), dtype = float)
for i in x:
for j in t:
phi[i,j] = soliton(x[i],t[j],c,a)
assert phi.shape==(xpoints, tpoints)
assert phi.ndim==2
assert phi.dtype==np.dtype(float)
assert phi[0,0]==soliton(x[0],t[0],c,a)
def plot_soliton_data(i=0):
Plot the soliton data at t[i] versus x.
plt.plot(soliton(x,t[i],c,a))
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.axes.get_yaxis().tick_left()
plt.title('Soliton Wave')
plt.xlabel('X')
plt.ylabel('Psi(x,t)')
plot_soliton_data(0)
assert True # leave this for grading the plot_soliton_data function
interact(plot_soliton_data,i = (0,50))
assert True # leave this for grading the interact with plot_soliton_data cell
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Using interact for animation with data
Step3: To create an animation of a soliton propagating in time, we are going to precompute the soliton data and store it in a 2d array. To set this up, we create the following variables and arrays
Step4: Compute a 2d NumPy array called phi
Step6: Write a plot_soliton_data(i) function that plots the soliton wave $\phi(x, t[i])$. Customize your plot to make it effective and beautiful.
Step7: Use interact to animate the plot_soliton_data function versus time.
|
3,815
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from functools import partial
from sklearn.manifold import TSNE
import sklearn.metrics as metrics
from scRNA.simulation import generate_toy_data, split_source_target
from scRNA.nmf_clustering import NmfClustering_initW, NmfClustering, DaNmfClustering
from scRNA.sc3_clustering_impl import data_transformation_log2, cell_filter, gene_filter
n_genes = 1000
n_cells = 2000
cluster_spec = [1, 2, 3, [4, 5], [6, [7, 8]]]
np.random.seed(42)
data, labels = generate_toy_data(num_genes=n_genes,
num_cells=n_cells,
cluster_spec=cluster_spec)
print(data.shape)
model = TSNE(n_components=2, random_state=0, init='pca', method='exact', metric='euclidean', perplexity=30)
ret = model.fit_transform(data.T)
plt.title('tSNE'.format())
plt.scatter(ret[:, 0], ret[:, 1], 10, labels)
plt.xticks([])
plt.yticks([])
plt.figure(0)
inds = np.argsort(labels)
plt.pcolor(data[:, inds] / np.max(data), cmap='Greys')
plt.clim(0.,+1.)
plt.xticks([])
plt.yticks([])
for i in range(len(labels)):
plt.vlines(i, 0, n_genes, colors='C{0}'.format(labels[inds[i]]), alpha=0.07)
plt.title('Read counts')
plt.xlabel('Cells')
plt.ylabel('Genes')
n_trg = 100
n_src = 400
np.random.seed(2)
data_source, data_target, true_labels_source, true_labels_target = \
split_source_target(
data,
labels,
target_ncells = n_trg,
source_ncells = n_src,
source_clusters = [1,2,3,4,5,6,7,8],
mode = 6,
common = 0,
cluster_spec = cluster_spec
)
trg_labels = np.unique(true_labels_target)
src_labels = np.unique(true_labels_source)
print('Source cluster: ', np.unique(true_labels_source))
print('Target cluster: ', np.unique(true_labels_target))
np.random.seed(1)
nmf = NmfClustering(data_source.copy(), np.arange(n_genes), labels=None, num_cluster=src_labels.size)
nmf.apply(alpha=1., l1=0.75, rel_err=1e-8)
score = metrics.adjusted_rand_score(true_labels_source, nmf.cluster_labels)
print('Adjusted Rand Score w/o labels: ', score)
np.random.seed(1)
nmf = NmfClustering_initW(data_source.copy(), np.arange(n_genes), labels=true_labels_source, num_cluster=src_labels.size)
nmf.apply(alpha=1., l1=0.75, rel_err=1e-8)
score = metrics.adjusted_rand_score(true_labels_source, nmf.cluster_labels)
print('Adjusted Rand Score w/ labels: ', score)
cell_filter_fun = partial(cell_filter, num_expr_genes=0, non_zero_threshold=-1)
gene_filter_fun = partial(gene_filter, perc_consensus_genes=1, non_zero_threshold=-1)
data_transf_fun = partial(data_transformation_log2)
np.random.seed(1)
nmf_transf = NmfClustering_initW(data_source.copy(), np.arange(n_genes), labels=true_labels_source, num_cluster=src_labels.size)
nmf_transf.add_cell_filter(cell_filter_fun)
nmf_transf.add_gene_filter(gene_filter_fun)
nmf_transf.set_data_transformation(data_transf_fun)
nmf_transf.apply(alpha=1., l1=0.75, rel_err=1e-8)
# nmf.print_reconstruction_error(data_source, nmf.dictionary, nmf.data_matrix)
score = metrics.adjusted_rand_score(true_labels_source, nmf_transf.cluster_labels)
print('Adjusted Rand Score: ', score)
print('(Iteration) adjusted Rand score:')
da_nmf_target = DaNmfClustering(nmf, data_target.copy(), np.arange(n_genes), num_cluster=trg_labels.size)
thetas = np.linspace(0, 1, 20)
res = np.zeros(thetas.size)
for i in range(thetas.size):
da_nmf_target.apply(mix=thetas[i], alpha=1., l1=0.75, rel_err=1e-8, calc_transferability=False)
# print(da_nmf_target.cluster_labels)
res[i] = metrics.adjusted_rand_score(true_labels_target, da_nmf_target.cluster_labels)
print('(', i,')', res[i])
plt.figure(0)
plt.bar(thetas, res)
plt.xticks([])
plt.yticks([0., 1.])
plt.xlabel('theta')
plt.ylabel('adjusted Rand score')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Simulating scRNA read count data
Step2: Let's have a tSNE plot on the simulated data. We se that cluster are nicely
Step3: Plotting the read counts as matrix reveals that many entries are zero, or
Step4: 2. Splitting data into source and target
Step5: 3. Clustering source data w/ and w/o labels
Step6: We can transform and filter any data using sc3 inspired methods
Step7: 4. Transfer learning
|
3,816
|
<ASSISTANT_TASK:>
Python Code:
import pyensae
from jyquickhelper import add_notebook_menu
add_notebook_menu()
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import pyensae
import os
from pyensae.finance import StockPrices
cache = os.path.abspath("cache")
if not os.path.exists(cache):
os.mkdir(cache)
source = 'yahoo_new'
tick = 'MSFT'
stock = StockPrices(tick, folder=cache, url=source)
stock.head()
stock.tail()
stock.dataframe.columns
import pandas
data = [{"Date":"2014-04-01", "Close":105.6}, {"Date":"2014-04-02", "Close":104.6},
{"Date":"2014-04-03", "Close":105.8}, ]
df = pandas.DataFrame(data)
stock = StockPrices("donnees",df)
stock.head()
import datetime
stock = StockPrices(tick, folder=cache, url=source)
ax = StockPrices.draw(stock, figsize=(12,6))
stock = StockPrices(tick, folder=cache, url=source)
StockPrices.draw(stock, figsize=(12,6));
stock = StockPrices("MSFT", folder=cache, url='yahoo')
StockPrices.draw(stock, field=["Open", "Close"], figsize=(12,6));
stock.head()
stock = StockPrices(tick)
ret = stock.returns()["2019-01-04":"2019-02-02"]
ret.dataframe.loc["2019-01-11":"2019-01-18","Close"] = 0 # on annule certains valeurs
ax = stock.plot(figsize=(16, 5))
ret.plot(axis=2, ax=ax, label_prefix="r", color='blue');
os.listdir(cache)
stock.tail()
ret = stock.returns()
ret.tail()
StockPrices.draw(ret, figsize=(12,6), begin="2013-12-01", date_format="%Y-%m");
from datetime import datetime, timedelta
dt = datetime.strptime("2014-03-31","%Y-%m-%d")
dt
delta = timedelta(1)
dt = dt + delta
dt
s = dt.strftime("%Y-%m-%d")
s
tick2 = 'GOOGL'
stock = StockPrices(tick2, folder=cache, url=source)
df = stock.dataframe
print("A", df["2005-01-04":"2005-01-06"])
print("D", df.loc["2005-01-04","Close"])
print("G", df.index.get_loc("2005-01-06")) # retourne la position de cette date
stock = StockPrices(tick2, folder=cache, url=source)
stock.dataframe.to_csv("donnees.txt", sep="\t")
[_ for _ in os.listdir(".") if "donnees" in _]
import pandas
df = pandas.read_csv("donnees.txt", sep="\t")
df.head()
with open("donnees.txt","r") as f:
text = f.read()
print(text[:400])
stock = StockPrices(tick2, folder=cache, url=source)
stock.dataframe.to_csv("donnees.txt", sep="\t", index=False)
df = pandas.read_csv("donnees.txt",sep="\t")
df.head()
with open("donnees.txt", "r") as f:
text = f.read()
print(text[:400])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initialisation
Step2: Créer un objet StockPrices
Step3: La classe <tt>StockPrices</tt> contient un objet <a href="http
Step4: De la même manière, on peut créer un objet <tt>StockPrices</tt> à partir d'un DataFrame
Step5: Quelques graphes
Step6: La même chose se produit sur une autre série financière mais pas à la même date. On trace maintenant la série Open (Adj Close défini
Step7: Ce type de série ne fait pas toujours apparaître les saut de prix qui survient comme par exemple le <a href="http
Step8: Quelques opérations
Step9: On affiche les dernières lignes.
Step10: On récupère la série des rendements.
Step11: On trace la série des rendements pour les derniers mois.
Step12: Quelques notions sur les dates
Step13: On ajoute un jour
Step14: Puis on convertit dans l'autre sens
Step15: Promenade dans l'index
Step16: Sauver les tables
Step17: Le fichier est sauvé. Pour le récupérer avec pandas
Step18: Les dates apparaissent deux fois.
Step19: Cela est dû au fait que les dates sont à la fois une colonne et servent d'index. Pour éviter de les conserver deux fois, on demande explicitement à ce que l'index ne soit pas ajouté au fichier
Step20: Puis on récupère les données
Step21: On vérifie le fichier sur disque dur
|
3,817
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
def random_line(m, b, sigma, size=10):
Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
x = np.linspace(-1, 1, size)
n = np.random.randn(size)
y = np.zeros(size)
for a in range(size):
y[a] = m*x[a] + b + (sigma * n[a])
# formula for normal sitribution found on SciPy.org
return x, y
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
def ticks_out(ax):
Move the ticks to the outside of the box.
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
Plot a random line with slope m, intercept b and size points.
x, y = random_line(m, b, sigma, size)
plt.scatter(x,y,color=color)
plt.xlim(-1.1,1.1)
plt.ylim(-10.0,10.0)
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
interact(plot_random_line, m=(-10.0,10.0,0.1),b=(-5.0,5.0,.1),sigma=(0.0,5.0,.01),size=(10,100,10),color = ['red','green','blue']);
#### assert True # use this cell to grade the plot_random_line interact
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Line with Gaussian noise
Step5: Write a function named plot_random_line that takes the same arguments as random_line and creates a random line using random_line and then plots the x and y points using Matplotlib's scatter function
Step6: Use interact to explore the plot_random_line function using
|
3,818
|
<ASSISTANT_TASK:>
Python Code:
from ndreg import *
import matplotlib
import ndio.remote.neurodata as neurodata
server = "dev.neurodata.io"
userToken = txtRead("userToken.pem").strip()
refToken = "ara3"
refImg = imgDownload(refToken, channel="average", server=server, userToken=userToken)
refThreshold = imgPercentile(refImg, 0.99)
print(refThreshold)
imgShow(refImg, vmax=refThreshold)
refAnnoImg = imgDownload(refToken, channel="annotation", server=server, userToken=userToken)
refAnnoImgOrig = refAnnoImg[:,:,:]
imgShow(refAnnoImg, vmax=1000)
randValues = np.random.rand(1000,3)
randValues = np.concatenate(([[0,0,0]],randValues))
randCmap = matplotlib.colors.ListedColormap (randValues)
imgShow(refAnnoImg, vmax=1000, cmap=randCmap)
imgShow(refImg, vmax=refThreshold, newFig=False)
imgShow(refAnnoImg, vmax=1000, cmap=randCmap, alpha=0.3, newFig=False)
plt.show()
inToken = "s275"
nd = neurodata(hostname="dev.neurodata.io", user_token=userToken)
print(nd.get_metadata(inToken)['dataset']['imagesize'])
inImg = imgDownload(inToken, resolution=1, userToken=userToken, server=server)
inImg.SetSpacing(np.array(inImg.GetSpacing())*1000) ###
inImgOrig = inImg[:,:,:]
inImg = inImgOrig[:,:,:]
inThreshold = imgPercentile(inImg, 0.95)
imgShow(inImg, vmax=inThreshold)
imgShow(refImg, vmax=refThreshold)
imgShow(inImg, vmax=inThreshold)
inOrient = "IAL"
refOrient = "RSA"
inImg = imgReorient(inImg, inOrient, refOrient)
imgShow(inImg, vmax=inThreshold)
inImgSize_reorient = inImg.GetSize()
inImgSpacing_reorient= inImg.GetSpacing()
spacing = [0.1,0.1, 0.1]
inImg_ds = imgResample(inImg, spacing)
imgShow(inImg_ds, vmax=inThreshold)
refImg_ds = imgResample(refImg, spacing)
imgShow(refImg_ds, vmax=refThreshold)
imgWrite(inImg_ds,"/cis/project/clarity/data/ailey/s275_ch0_rsa_100um.img")
roiStart = [5.4, 1.2, 2.1]
roiSize = [4.5,6.5,7.5]
roiStartVoxel = (roiStart / np.array(spacing)).astype('uint16').tolist()
print(roiStartVoxel)
roiSizeVoxel = (roiSize / np.array(spacing)).astype('uint16').tolist()
print(roiSizeVoxel)
roiImg = sitk.Image(roiSizeVoxel,sitk.sitkUInt8)
roiImg += 255
emptyImg = sitk.Image(refImg_ds.GetSize(),sitk.sitkUInt8) # Create an empty image
emptyImg.CopyInformation(refImg_ds) # Copy spacing, origin and direction from reference image
refMask = sitk.Paste(emptyImg, roiImg, roiSizeVoxel, [0,0,0], roiStartVoxel)
imgShow(refMask, vmax=255)
refImg_ds = sitk.Mask(refImg_ds, refMask)
imgShow(refImg_ds, vmax=refThreshold)
imgShow(inImg_ds, vmax=inThreshold)
threshold = imgPercentile(inImg_ds,0.95)
inMask_ds = sitk.BinaryThreshold(inImg_ds, 0, threshold, 255, 0)
imgShow(inMask_ds, vmax=255)
translation = -np.array(roiStart)
inAffine = [1.2,0,0,0,1.2,0,0,0,1]+translation.tolist()
print(inAffine)
imgShow(imgApplyAffine(inImg_ds, inAffine, size=refImg_ds.GetSize()),vmax = inThreshold)
affine = imgAffineComposite(inImg_ds, refImg_ds, inMask=inMask_ds, iterations=100, useMI=True, verbose=True, inAffine=inAffine)
inImg_affine = imgApplyAffine(inImg, affine, size=refImg.GetSize(), spacing=refImg.GetSpacing())
imgShow(inImg_affine, vmax=inThreshold)
inImg_ds = imgResample(inImg_affine, spacing=spacing, size=refImg_ds.GetSize())
imgShow(imgChecker(inImg_ds, refImg_ds), vmax=refThreshold)
inMask_ds = imgApplyAffine(inMask_ds, affine, useNearest=True, size=refImg_ds.GetSize())
imgShow(inMask_ds, vmax=255)
imgShow(inImg_ds, vmax=inThreshold)
inImg_ds = imgResample(inImg_affine, spacing=spacing, size=refImg_ds.GetSize())
(field, invField) = imgMetamorphosisComposite(inImg_ds, refImg_ds, inMask=inMask_ds, alphaList=[0.1, 0.05,0.02],
scaleList = [1.0, 1.0,1.0], useMI=True, iterations=100, verbose=True)
inImg_lddmm = imgApplyField(inImg_affine, field, size=refImg.GetSize())
imgShow(inImg_lddmm, vmax=inThreshold)
inImg_ds = imgResample(inImg_lddmm, spacing=spacing, size=refImg_ds.GetSize())
imgShow(imgChecker(inImg_ds, refImg_ds), vmax=refThreshold)
imgShow(inImg_lddmm, vmax=inThreshold, newFig=False)
imgShow(refAnnoImg, vmax=1000, cmap=randCmap, alpha=0.2, newFig=False)
plt.show()
outToken = inToken + "_to_" + refToken
imgUpload(inImg_lddmm, outToken, server=server, userToken=userToken)
### imgWrite(inImg_lddmm, "/cis/project/clarity/data/ailey/"+outToken+"_new.img")
spacing_ds = invField.GetSpacing()
size_ds = np.ceil(np.array(refAnnoImg.GetSize())*np.array(refAnnoImg.GetSpacing())/np.array(spacing_ds))
size_ds = list(size_ds.astype(int))
invAffine = affineInverse(affine)
invAffineField = affineToField(invAffine, size_ds, spacing_ds)
invField2 = fieldApplyField(invAffineField, invField)
inAnnoImg = imgApplyField(refAnnoImg, invField2,useNearest=True, size=inImgSize_reorient, spacing=inImgSpacing_reorient)
inAnnoThreshold = imgPercentile(inAnnoImg,0.99)
imgShow(inAnnoImg, vmax=inAnnoThreshold)
inAnnoImg = imgReorient(inAnnoImg, refOrient, inOrient)
imgShow(inAnnoImg, vmax=inAnnoThreshold)
outToken = "ara3_to_AutA"
outChannel = "annotation_draft"
imgUpload(inAnnoImg, outToken, outChannel, resolution=5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We define the server and our user token
Step2: First we'll download the atlas image
Step3: Next we'll visuaize the image. To ensure that the visuization is has good contrast we'll only show intensity values below the 99th percentile.
Step4: When given an image volume imgShow displays it as a set of evenly spaced z-slices (1st column), y-slices (2nd column) and x slices (3rd column).
Step5: Now we download the corresponding annotations
Step6: It's usually better to visuaize a set of annotations in color. Let's display the atlas annotations using a random colormap. We begin by creating a 1000x3 array of random values. Since we always want the backgrond (label 0) to be dark we make the 0th row is [0,0,0].
Step7: Now we can display the annotations.
Step8: Let's overlay these annotations on the atlas image using alpha transparency. To do this we must set newFig to False so that matplotlib appends the annotation data to the current figure instead of creating new one.
Step9: Downloading input image
Step10: Clearly resolution 1 is a resonable resolution. So we'll download the image at that resolution. Depending on your internet connection downloading may take several minutes.
Step11: We'll only use intensity values below the 95th percentile in the visualization.
Step12: Reorienting input image
Step13: By examining the the z slices in the first column its clear that the x-axis goes from Right to Left side of the brain. The y-axis varies from Superior to Inferior and the z-axis goes from Anterior to posterior. Thus it is in RSA orientation. Looking at the the input brain...
Step14: ...we see that the x-axis goes from Inferior to Superior, the y-axis goes from Anterior to Posterior and the Z axis goes from Left to Right. Thus it's in IAL orientation. Therefore we reorient the input image from IAL to RSA
Step15: Compare the above to the Atlas. The slices should now correspond with the atlas. Let's make a copy of the reorianted image size and spacing because we'll need it later.
Step16: Downample images
Step17: Create and apply mask of reference image
Step18: We then convert these values to from mm to voxels
Step19: We create an Region Of Interest of value 255
Step20: We then paste this into an empty image to create a mask of the reference image
Step21: Now we apply this mask to our downsampled reference image
Step22: Create input image mask
Step23: This will likly interfere with the registration. Therefore we create a registration mask which excludes those regions
Step24: Affine Registration
Step25: Now we compute the affine transform. Unlike in the basic registration tutiorial we'll use the imgAffineComposite instead of imgAffine. imgAffine simply computes the affine transform between the input and reference images. imgAffineComposite computes a translation then a rigid then an affine transformation. It's output is the composition of those three transforms. We use Mutual Information since the input and reference images have very differnt intensity profiles. We also enable the verbose option so that each iteration is printed.
Step26: Now we apply the affine transform to the input image and mask
Step27: We can evaluate the affine registration by generating a checkerboard of the reference and input images. In this method the input image is placed on the black squares of the metaphorical checkerboard while the reference image is placed on the red squares. Idealy anatomical sturctures should line up across squares.
Step28: We also apply the affine to the input mask
Step29: LDDMM registration
Step30: Now we apply the displacement field
Step31: Evaluation LDDMM registration
Step32: We can also evaluate the registration by overlaying the atlas annotations over the deformed input image.
Step33: Uploading Results
Step34: Uploading deformed atlas annotations
Step35: Before we can overlay the atlas annotations on the downloaded image we must transfom them to its space. Fortunatly this can be done since all spatial transforms in this tutorial are invertable. First we construct an inverse displacement field which transforms the annotations from the reference space to the input space before registration.
Step36: Were not done yet. We still need to reorient these annotations to their original
Step37: We can upload these annotations at the lowest possible resolution.
|
3,819
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
np.random.randint(low=1, high=20+1, size=1)
rolls = np.random.randint(low=1, high=20+1, size=(5, 2))
rolls
rolls.max(axis=1)
def expected_value_advantage(n_rolls: int=100, n_sides: int=20) -> float:
rolls = np.random.randint(low=1, high=n_sides+1, size=(n_rolls, 2))
return rolls.max(axis=1).mean()
expected_value_advantage(n_rolls=10**6, n_sides=20)
def expected_value_disadvantage(n_rolls: int=100, n_sides: int=20) -> float:
rolls = np.random.randint(low=1, high=n_sides+1, size=(n_rolls, 2))
return rolls.min(axis=1).mean()
expected_value_disadvantage(n_rolls=10**6, n_sides=20)
def expected_value_roll(n_rolls: int=100, n_sides: int=20) -> float:
rolls = np.random.randint(low=1, high=n_sides+1, size=n_rolls)
return rolls.mean()
expected_value_roll(n_rolls=10**6, n_sides=20)
rolls = np.random.randint(low=1, high=20+1, size=(5, 2, 2))
rolls
rolls[0]
rolls[0, 0]
rolls.min(axis=2)
rolls.min(axis=2).max(axis=1)
def expected_value_advantage_of_disadvantage(n_rolls: int=100, n_sides: int=20) -> float:
rolls = np.random.randint(low=1, high=n_sides+1, size=(n_rolls, 2, 2))
return rolls.min(axis=2).max(axis=1).mean()
expected_value_advantage_of_disadvantage(n_rolls=10**6, n_sides=20)
def expected_value_disadvantage_of_advantage(n_rolls: int=100, n_sides: int=20) -> float:
rolls = np.random.randint(low=1, high=n_sides+1, size=(n_rolls, 2, 2))
return rolls.max(axis=2).min(axis=1).mean()
expected_value_disadvantage_of_advantage(n_rolls=10**6, n_sides=20)
def dist_roll(n_rolls: int=100, n_sides: int=20) -> pd.DataFrame:
rolls = np.random.randint(low=1, high=n_sides+1, size=n_rolls)
hist, bins = np.histogram(rolls, bins=n_sides, range=(1, n_sides+1), density=True)
df = pd.DataFrame(data={'roll': hist}, index=(int(x) for x in bins[:-1]))
df['roll_or_higher'] = df['roll'][::-1].cumsum()
return df
dist_roll(n_rolls=10**6, n_sides=20)
def dist_advantage_of_disadvantage(n_rolls: int=100, n_sides: int=20) -> pd.DataFrame:
rolls = np.random.randint(low=1, high=n_sides+1, size=(n_rolls, 2, 2))
values = rolls.min(axis=2).max(axis=1)
hist, bins = np.histogram(values, bins=n_sides, range=(1, n_sides+1), density=True)
df = pd.DataFrame(data={'aod': hist}, index=(int(x) for x in bins[:-1]))
df['aod_or_higher'] = df['aod'][::-1].cumsum()
return df
dist_advantage_of_disadvantage(n_rolls=10**6, n_sides=20)
def dist_disadvantage_of_advantage(n_rolls: int=100, n_sides: int=20) -> pd.DataFrame:
rolls = np.random.randint(low=1, high=n_sides+1, size=(n_rolls, 2, 2))
values = rolls.max(axis=2).min(axis=1)
hist, bins = np.histogram(values, bins=n_sides, range=(1, n_sides+1), density=True)
df = pd.DataFrame(data={'doa': hist}, index=(int(x) for x in bins[:-1]))
df['doa_or_higher'] = df['doa'][::-1].cumsum()
return df
dist_disadvantage_of_advantage(n_rolls=10**6, n_sides=20)
def plot_strategies(n_rolls=100, n_sides=20):
df_roll = dist_roll(n_rolls=n_rolls, n_sides=n_sides)
df_aod = dist_advantage_of_disadvantage(n_rolls=n_rolls, n_sides=n_sides)
df_doa = dist_disadvantage_of_advantage(n_rolls=n_rolls, n_sides=n_sides)
df = pd.concat([df_roll, df_aod, df_doa], axis=1)
ax = df.plot.line(y=['roll_or_higher', 'aod_or_higher', 'doa_or_higher'], figsize=(10,6))
ax.set_title('Comparative Strategies')
ax.set_ylim(top=1.0, bottom=0.0)
ax.set_xlim(left=1, right=n_sides)
ax.set_xticks(range(1, n_sides+1))
ax.grid(True, axis='x', alpha=0.5)
plot_strategies(n_rolls=10**6, n_sides=20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Expected value of "advantage" and "disadvantage"
Step2: And if we want to simulate two rolls of the die, repeated 4 times, we pass a shape of (5, 2).
Step3: For "advantage", we take the max of the two rolls. Running this along axis=1 (running max along the columns) should give us what we want.
Step4: And finally we can take the mean. With a large enough sample size, this will approximate the expected value.
Step5: We can do something similar for "disadvantage".
Step6: And as a comparison, we should make sure we understand the expected value of a straight up roll.
Step7: Expected value of "advantage of disadvantage"
Step8: So again, we want to apply min (for "disadvantage") across the last dimension.
Step9: And for "advantage" of these pairs of ("disadvantage") rolls, we apply max across the last dimension.
Step10: Now we can roll it up in a method and run the simulation.
Step11: So we see that our intuition was correct and that "disadvantage of advantage" is a better strategy than "advantage of disadvantage".
|
3,820
|
<ASSISTANT_TASK:>
Python Code:
from arboretum.datasets import load_diabetes
xtr, ytr, xte, yte = load_diabetes()
xtr.shape, xte.shape
from sklearn.metrics import mean_squared_error as mse
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from arboretum import SmoothRegressionTree
from sklearn.model_selection import GridSearchCV
dtr = DecisionTreeRegressor(min_samples_leaf=5)
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=5)
mytree = SmoothRegressionTree(vss= 5, min_leaf=5)
dtr.fit(xtr, ytr)
pred = dtr.predict(xte)
mse(yte, pred)
mytree.fit(xtr, ytr)
pred = mytree.predict(xte)
mse(yte, pred)
rf.fit(xtr, ytr)
pred = rf.predict(xte)
mse(yte, pred)
params = {'min_samples_leaf':[5, 10, 20, 50, 100], 'max_depth':[2,4,8,16, None]}
gcv = GridSearchCV(dtr, params, 'neg_mean_squared_error')
gcv.fit(xtr, ytr)
gcv.best_score_, gcv.best_params_
pred = gcv.predict(xte)
mse(yte, pred)
myparams = {'min_leaf':[5, 10, 20, 50, 100], 'vss':[5, 10, 20, 50, 100]}
gcv = GridSearchCV(mytree, myparams, 'neg_mean_squared_error')
gcv.fit(xtr, ytr)
mypred = gcv.predict(xte)
mse(yte, mypred), gcv.best_score_, gcv.best_params_
myparams = {'max_depth':[2,4,8,16, None], 'vss':[5, 10, 20, 50, 100]}
gcv = GridSearchCV(mytree, myparams, 'neg_mean_squared_error')
gcv.fit(xtr, ytr)
mypred = gcv.predict(xte)
mse(yte, mypred), gcv.best_score_, gcv.best_params_
from arboretum.datasets import load_als
xtr, ytr, xte, yte = load_als()
xtr.shape, xte.shape
mse(yte, 0 * yte + ytr.mean())
dtr.fit(xtr, ytr)
pred = dtr.predict(xte)
mse(yte, pred)
mytree.fit(xtr, ytr)
pred = mytree.predict(xte)
mse(yte, pred)
rf.n_estimators = 100
rf.fit(xtr, ytr)
pred = rf.predict(xte)
mse(yte, pred)
mytree.vss = 100
pred = mytree.predict(xte)
mse(yte, pred)
params = {'min_samples_leaf':[5, 10, 20, 50, 100, 200, 400], 'max_depth':[2,4,8,16, None]}
gcv = GridSearchCV(dtr, params, 'neg_mean_squared_error')
gcv.fit(xtr, ytr)
pred = gcv.predict(xte)
mse(yte, pred), gcv.best_score_, gcv.best_params_
myparams = {'min_leaf':[5, 10, 20, 50, 100], 'vss':[5, 10, 20, 50, 100, 200, 400]}
gcv = GridSearchCV(mytree, myparams, 'neg_mean_squared_error')
gcv.fit(xtr, ytr)
mypred = gcv.predict(xte)
mse(yte, mypred), gcv.best_score_, gcv.best_params_
myparams = {'max_depth':[2,4,8,16, None], 'vss':[5, 10, 20, 50, 100, 200, 400]}
gcv = GridSearchCV(mytree, myparams, 'neg_mean_squared_error')
gcv.fit(xtr, ytr)
mypred = gcv.predict(xte)
mse(yte, mypred), gcv.best_score_, gcv.best_params_
from arboretum import RFRegressor
myrf = RFRegressor()
myrf.base_estimator = mytree
myrf.fit(xtr[:10], ytr[:10])
myrf.n_trees = 100
myrf.fit(xtr, ytr)
pred = myrf.predict(xte)
mse(yte, pred)
rf.fit(xtr, ytr)
pred = rf.predict(xte)
mse(yte, pred)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will compare a smoothed regression tree from arboretum to a regression tree and a random forest from scikit-learn. First, we'll just run the models once, then we will investiagte their performance in more detail.
Step2: So, off-hand, it looks like the smoothed regression tree is in-between one tree and a random forest in terms of accuracy, but closer to one tree. However, it has more free parameters, so we need to investiagte more. We'll give the regular tree one more control parameter so that both models have two.
Step3: Which is better than the naively set smoothing tree. Note that scikit-learn uses negative mean squared error as a scoring function because GridSearchCV maximizes the scoring function. And on the test set, that estimator gets
Step4: So how does the smoothing tree do? We'll try it two ways, once with vss and min_leaf set and then with vss and max_depth.
Step5: That's about the same. Next we'll try it with the other parameter set
Step6: And that's worse. So the initial impression that we had, that the smoothing tree was moderately better was due to better model capacity control from the extra parameter. It disappeared when we conducted a search over equal numbers of parameters.
Step7: The constant model gets mse of about 0.32, which is better than both the tree and smoothed tree (at these parameters), but worse than the RF model. The 0.26 of the RF model is a good score on this data.
Step8: The $v_{ss}$ parameter can be changed without refitting on an arboretum.SmoothRegressionTree. For this noisy data, much higher smoothing values are better.
Step9: Once again, it looks like smoothing trees are in between the results for a single tree and an RF. Like before, we'll compare the smoothing tree with vss and one other control parameter to a regular tree with two control parameters.
Step10: So once again, on closer examination, the smoothing tree is not better than a well-tuned regular tree.
Step11: From before we got
|
3,821
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
import numpy as np
from ipywidgets import interact
import matplotlib.pyplot as plt
%matplotlib nbagg
def potential(x, V0=20.0):
if x < 0 or x > 1:
return V0
else:
return 0
# Smallest position for which we will calculate psi
x_min = -1
# Largest position for which we will calculate psi
x_max = 2
# Number of points
n_points = 1000
# Create a list of all points at which we will calculate psi, and the spacing
# between those points.
all_x, delta_x = np.linspace(x_min, x_max, num=n_points, retstep=True)
def wave_function(E, all_x, delta_x, psi_0=0.0001, dpsi_dx_0=0):
Calculate the wave function at the positions all_x.
Parameters
----------
E : float
Energy to use in calculating psi. Most Energies do *not* lead to physical
solutions.
all_x : numpy array
Positions at which psi should be calculated.
delta_x: float
Spacing between those positions.
psi_0: float, optional
Value of the wave function at the first position.
dpsi_dx_0: float, optional
Value of the derivative of psi at the first position.
Returns
-------
psi: numpy array
Wave function at all positions.
## FILL THIS IN WITH REAL CODE (which will include a loop)
return np.array(psi)
plt.figure(figsize=(12,10))
E_min = 2.0
E_max = 10.0
for E in np.linspace(E_min, E_max, num=7):
plt.plot(all_x, wave_function(E, all_x), label=str(E))
plt.legend(loc='lower left')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The cell below defines the potential
Step2: Double click here to edit, and describe this potential in words
Step4: YOU NEED TO ADD THE CODE BELOW TO CALCULATE PSI
Step5: The cell below calculates $\psi$ and graphs it
|
3,822
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
# Text Dataset type
DATA_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/metadata/text_1.0.0.yaml"
# Text Labeling type
LABEL_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/ioformat/text_sentiment_io_format_1.0.0.yaml"
# Text Training task
TRAINING_SCHEMA = "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_text_sentiment_1.0.0.yaml"
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_dataset_client():
client = aip.DatasetServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_pipeline_client():
client = aip.PipelineServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["dataset"] = create_dataset_client()
clients["model"] = create_model_client()
clients["pipeline"] = create_pipeline_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
TIMEOUT = 90
def create_dataset(name, schema, labels=None, timeout=TIMEOUT):
start_time = time.time()
try:
dataset = aip.Dataset(
display_name=name, metadata_schema_uri=schema, labels=labels
)
operation = clients["dataset"].create_dataset(parent=PARENT, dataset=dataset)
print("Long running operation:", operation.operation.name)
result = operation.result(timeout=TIMEOUT)
print("time:", time.time() - start_time)
print("response")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" metadata_schema_uri:", result.metadata_schema_uri)
print(" metadata:", dict(result.metadata))
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
print(" etag:", result.etag)
print(" labels:", dict(result.labels))
return result
except Exception as e:
print("exception:", e)
return None
result = create_dataset("claritin-" + TIMESTAMP, DATA_SCHEMA)
# The full unique ID for the dataset
dataset_id = result.name
# The short numeric ID for the dataset
dataset_short_id = dataset_id.split("/")[-1]
print(dataset_id)
IMPORT_FILE = "gs://cloud-samples-data/language/claritin.csv"
SENTIMENT_MAX = 4
if "IMPORT_FILES" in globals():
FILE = IMPORT_FILES[0]
else:
FILE = IMPORT_FILE
count = ! gsutil cat $FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $FILE | head
def import_data(dataset, gcs_sources, schema):
config = [{"gcs_source": {"uris": gcs_sources}, "import_schema_uri": schema}]
print("dataset:", dataset_id)
start_time = time.time()
try:
operation = clients["dataset"].import_data(
name=dataset_id, import_configs=config
)
print("Long running operation:", operation.operation.name)
result = operation.result()
print("result:", result)
print("time:", int(time.time() - start_time), "secs")
print("error:", operation.exception())
print("meta :", operation.metadata)
print(
"after: running:",
operation.running(),
"done:",
operation.done(),
"cancelled:",
operation.cancelled(),
)
return operation
except Exception as e:
print("exception:", e)
return None
import_data(dataset_id, [IMPORT_FILE], LABEL_SCHEMA)
def create_pipeline(pipeline_name, model_name, dataset, schema, task):
dataset_id = dataset.split("/")[-1]
input_config = {
"dataset_id": dataset_id,
"fraction_split": {
"training_fraction": 0.8,
"validation_fraction": 0.1,
"test_fraction": 0.1,
},
}
training_pipeline = {
"display_name": pipeline_name,
"training_task_definition": schema,
"training_task_inputs": task,
"input_data_config": input_config,
"model_to_upload": {"display_name": model_name},
}
try:
pipeline = clients["pipeline"].create_training_pipeline(
parent=PARENT, training_pipeline=training_pipeline
)
print(pipeline)
except Exception as e:
print("exception:", e)
return None
return pipeline
PIPE_NAME = "claritin_pipe-" + TIMESTAMP
MODEL_NAME = "claritin_model-" + TIMESTAMP
task = json_format.ParseDict(
{
"sentiment_max": SENTIMENT_MAX,
},
Value(),
)
response = create_pipeline(PIPE_NAME, MODEL_NAME, dataset_id, TRAINING_SCHEMA, task)
# The full unique ID for the pipeline
pipeline_id = response.name
# The short numeric ID for the pipeline
pipeline_short_id = pipeline_id.split("/")[-1]
print(pipeline_id)
def get_training_pipeline(name, silent=False):
response = clients["pipeline"].get_training_pipeline(name=name)
if silent:
return response
print("pipeline")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" state:", response.state)
print(" training_task_definition:", response.training_task_definition)
print(" training_task_inputs:", dict(response.training_task_inputs))
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", dict(response.labels))
return response
response = get_training_pipeline(pipeline_id)
while True:
response = get_training_pipeline(pipeline_id, True)
if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_to_deploy_id = None
if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:
raise Exception("Training Job Failed")
else:
model_to_deploy = response.model_to_upload
model_to_deploy_id = model_to_deploy.name
print("Training Time:", response.end_time - response.start_time)
break
time.sleep(60)
print("model to deploy:", model_to_deploy_id)
def list_model_evaluations(name):
response = clients["model"].list_model_evaluations(parent=name)
for evaluation in response:
print("model_evaluation")
print(" name:", evaluation.name)
print(" metrics_schema_uri:", evaluation.metrics_schema_uri)
metrics = json_format.MessageToDict(evaluation._pb.metrics)
for metric in metrics.keys():
print(metric)
print("meanAbsoluteError", metrics["meanAbsoluteError"])
print("precision", metrics["precision"])
return evaluation.name
last_evaluation = list_model_evaluations(model_to_deploy_id)
ENDPOINT_NAME = "claritin_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
MIN_NODES = 1
MAX_NODES = 1
DEPLOYED_NAME = "claritin_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"automatic_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
},
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
test_item = ! gsutil cat $IMPORT_FILE | head -n1
if len(test_item[0]) == 3:
_, test_item, test_label, max = str(test_item[0]).split(",")
else:
test_item, test_label, max = str(test_item[0]).split(",")
print(test_item, test_label)
def predict_item(data, endpoint, parameters_dict):
parameters = json_format.ParseDict(parameters_dict, Value())
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{"content": data}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", dict(prediction))
return response
response = predict_item(test_item, endpoint_id, None)
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Set up variables
Step8: Vertex constants
Step9: AutoML constants
Step10: Tutorial
Step11: Dataset
Step12: Now save the unique dataset identifier for the Dataset resource instance you created.
Step13: Data preparation
Step14: Quick peek at your data
Step15: Import data
Step16: Train the model
Step17: Construct the task requirements
Step18: Now save the unique identifier of the training pipeline you created.
Step19: Get information on a training pipeline
Step20: Deployment
Step21: Model information
Step22: Deploy the Model resource
Step23: Now get the unique identifier for the Endpoint resource you created.
Step24: Compute instance scaling
Step25: Deploy Model resource to the Endpoint resource
Step26: Make a online prediction request
Step27: Make a prediction
Step28: Undeploy the Model resource
Step29: Cleaning up
|
3,823
|
<ASSISTANT_TASK:>
Python Code:
import pypsa
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# use 24 hour period for consideration
index = pd.date_range("2016-01-01 00:00", "2016-01-01 23:00", freq="H")
# consumption pattern of BEV
bev_usage = pd.Series([0.0] * 7 + [9.0] * 2 + [0.0] * 8 + [9.0] * 2 + [0.0] * 5, index)
# solar PV panel generation per unit of capacity
pv_pu = pd.Series(
[0.0] * 7
+ [0.2, 0.4, 0.6, 0.75, 0.85, 0.9, 0.85, 0.75, 0.6, 0.4, 0.2, 0.1]
+ [0.0] * 5,
index,
)
# availability of charging - i.e. only when parked at office
charger_p_max_pu = pd.Series(0, index=index)
charger_p_max_pu["2016-01-01 09:00":"2016-01-01 16:00"] = 1.0
df = pd.concat({"BEV": bev_usage, "PV": pv_pu, "Charger": charger_p_max_pu}, axis=1)
df.plot.area(subplots=True, figsize=(10, 7))
plt.tight_layout()
network = pypsa.Network()
network.set_snapshots(index)
network.add("Bus", "place of work", carrier="AC")
network.add("Bus", "battery", carrier="Li-ion")
network.add(
"Generator",
"PV panel",
bus="place of work",
p_nom_extendable=True,
p_max_pu=pv_pu,
capital_cost=1000.0,
)
network.add("Load", "driving", bus="battery", p_set=bev_usage)
network.add(
"Link",
"charger",
bus0="place of work",
bus1="battery",
p_nom=120, # super-charger with 120 kW
p_max_pu=charger_p_max_pu,
efficiency=0.9,
)
network.add("Store", "battery storage", bus="battery", e_cyclic=True, e_nom=100.0)
network.lopf()
print("Objective:", network.objective)
network.generators.p_nom_opt["PV panel"]
network.generators_t.p.plot.area(figsize=(9, 4))
plt.tight_layout()
df = pd.DataFrame(
{attr: network.stores_t[attr]["battery storage"] for attr in ["p", "e"]}
)
df.plot(grid=True, figsize=(10, 5))
plt.legend(labels=["Energy output", "State of charge"])
plt.tight_layout()
(
network.generators_t.p.loc[:, "PV panel"].sum()
- network.loads_t.p.loc[:, "driving"].sum()
)
network.links_t.p0.plot.area(figsize=(9, 5))
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initialize the network
Step2: The optimal panel size in kW is
Step3: The losses in kWh per pay are
|
3,824
|
<ASSISTANT_TASK:>
Python Code:
import healpix_util as hu
import astropy as ap
import numpy as np
from astropy.io import fits
from astropy.table import Table
import astropy.io.ascii as ascii
from astropy.io import fits
from astropy.constants import c
import matplotlib.pyplot as plt
import math as m
from math import pi
#from scipy.constants import c
import scipy.special as sp
from astroML.decorators import pickle_results
from scipy import integrate
import warnings
from sklearn.neighbors import BallTree
import pickle
import multiprocessing as mp
import time
from lcdmmetric import *
from progressbar import *
from tqdm import *
from functools import partial
import pymangle
from scipy.optimize import curve_fit
#from astroML.datasets import fetch_sdss_specgals
#from astroML.correlation import bootstrap_two_point_angular
%matplotlib inline
dr12q=fits.open("./input/DR12Q.fits")
dr12q
dr12qdat=dr12q[1].data
dr12qdat.columns
z=dr12qdat['Z_PIPE']
ra=dr12qdat['RA']
dec=dr12qdat['DEC']
fdata = open("./output/DR12Q.dat",'w')
fdata.write("z\t ra\t dec \n")
for i in range(0,len(ra)-1):
fdata.write("%f\t" %z[i])
fdata.write("%f\t" %ra[i])
fdata.write("%f\n" %dec[i])
fdata.close()
z
ra,dec
dat=np.array([z,ra,dec])
dat
dat=dat.transpose()
dat
# Saving the objects:
with open('datDR12Q.pkl', 'w') as f: # Python 3: open(..., 'wb')
pickle.dump(dat, f)
# Getting back the objects:
with open('datDR12Q.pkl') as f: # Python 3: open(..., 'rb')
dat = pickle.load(f)
dat
Ez = lambda x: 1/m.sqrt(0.3*(1+x)**3+0.7)
np.vectorize(Ez)
#Calculate comoving distance of a data point using the Redshift - This definition is based on the cosmology model we take. Here the distance for E-dS universe is considered. Also note that c/H0 ratio is cancelled in the equations and hence not taken.
def DC_LCDM(z):
return integrate.quad(Ez, 0, z)[0]
DC_LCDM=np.vectorize(DC_LCDM)
dr12f = open("./output/DR12Qsrarf.dat",'w')
dr12f.write("z\t ra\t dec\t s\t rar\t decr \n")
for i in range(0,len(z)):
dr12f.write("%f\t " %z[i])
dr12f.write("%f\t %f\t " %(ra[i],dec[i]))
dr12f.write("%f\t " %DC_LCDM(z[i]))
dr12f.write("%f\t %f\n " %(ra[i]*pi/180.0,dec[i]*pi/180.0))
dr12f.close()
dr12Qdat=ascii.read("./output/DR12Qsrarf.dat")
s=dr12Qdat['s']
rar=dr12Qdat['ra']
decr=dr12Qdat['dec']
dat=np.array([s,rar,decr])
dat
dat=dat.transpose()
dat
# Saving the objects:
with open('datDR12Q.pkl', 'w') as f: # Python 3: open(..., 'wb')
pickle.dump(dat, f)
# Getting back the objects:
with open('datDR12Q.pkl') as f: # Python 3: open(..., 'rb')
dat = pickle.load(f)
dat
bins=np.arange(0.,0.08,0.005)
print bins
binsq=bins**2
binsq
len(dat)
LCDMmetricsq(dat[0],dat[1])
%%time
BTD = BallTree(dat,metric='pyfunc',func=LCDMmetricsq,leaf_size=5)
with open('BTDDR12QLCDM.pkl', 'w') as f:
pickle.dump(BTD,f)
with open('BTDDR12QLCDM.pkl') as f:
BTD = pickle.load(f)
BTD
%%time
start_time=time.time()
counts_DD=BTD.two_point_correlation(dat,binsq)
print counts_DD
end_time=time.time()
tottime=end_time-start_time
print "Total run time:"
print tottime
with open('BTD12QcDDLCDM.pkl', 'w') as f:
pickle.dump(counts_DD,f)
with open('BTD12QcDDLCDM.pkl') as f:
counts_DD = pickle.load(f)
counts_DD
DD=np.diff(counts_DD)
DD
plt.plot(bins[1:len(bins)],DD,'ro-')
dataR=ascii.read("./output/rand200kDR12Q.dat")
dataR
len(dataR)
len(dat)
rdr12f = open("./output/DR12Qsrarf.dat",'w')
rdr12f.write("z\t ra\t dec\t s\t rar\t decr \n")
for i in range(0,len(dataR)):
rdr12f.write("%f\t " %dataR['z'][i])
rdr12f.write("%f\t %f\t " %(dataR['ra'][i],dataR['dec'][i]))
rdr12f.write("%f\t " %DC_LCDM(dataR['z'][i]))
rdr12f.write("%f\t %f\n " %((dataR['ra'][i]*pi)/180.0,(dataR['dec'][i]*pi)/180.0))
rdr12f.close()
datR=ascii.read("./output/DR12Qsrarf.dat")
datR
rs=np.array(datR['s'])
rrar=np.array(datR['rar'])
rdecr=np.array(datR['decr'])
datR=np.array([rs,rrar,rdecr])
datR
datR.reshape(3,len(dataR))
datR=datR.transpose()
datR
# Saving the objects:
with open('./output/rDR12Qsrarf.pkl', 'w') as f: # Python 3: open(..., 'wb')
pickle.dump(datR, f)
# Getting back the objects:
with open('./output/rDR12Qsrarf.pkl') as f: # Python 3: open(..., 'rb')
datR = pickle.load(f)
datR
%%time
BT_R2 = BallTree(datR,metric='pyfunc',func=LCDMmetricsq,leaf_size=5)
with open('./output/BTRDR12QLCDM.pkl', 'w') as f:
pickle.dump(BT_R2,f)
with open('./output/BTRDR12QLCDM.pkl') as f:
BTR = pickle.load(f)
BTR
%%time
start_time=time.time()
counts_RR=BTR.two_point_correlation(datR,bins)
print counts_RR
end_time=time.time()
tottime=end_time-start_time
print "Total run time:"
print tottime
with open('./output/BTRDR12QcRRLCDM.pkl', 'w') as f:
pickle.dump(counts_RR,f)
with open('./output/BTRDR12QcRRLCDM.pkl') as f:
counts_RR = pickle.load(f)
counts_RR
counts_RR
RR=np.diff(counts_RR)
RR
plt.plot(bins[1:len(bins)],RR,'bo-')
RR_zero = (RR == 0)
RR[RR_zero] = 1
%%time
start_time=time.time()
counts_DR=BTR.two_point_correlation(dat,bins)
print counts_DR
end_time=time.time()
tottime=end_time-start_time
print "Total run time:"
print tottime
with open('./output/BTRDR12QcDRLCDM.pkl', 'w') as f:
pickle.dump(counts_DR,f)
with open('./output/BTRDR12QcDRLCDM.pkl') as f:
counts_DR = pickle.load(f)
counts_DR
DR=np.diff(counts_DR)
corrells=(4.0 * DD - 4.0 * DR + RR) / RR
DR
corrells
plt.plot(bins[1:len(bins)],corrells,'go-')
plt.plot(bins[1:len(bins)],bins[1:len(bins)]*bins[1:len(bins)]*corrells*(c*1e-5)**2,'go-')
plt.plot(bins[2:len(bins)],bins[2:len(bins)]*bins[2:len(bins)]*corrells[1:len(bins)]*(c*1e-5)**2,'go-')
plt.plot(bins[2:len(bins)],corrells[1:len(bins)],'go-')
plt.plot(bins[2:len(bins)],corrells[1:len(bins)],'go-')
plt.savefig("correlDR12Qls.pdf")
plt.plot(bins[2:len(bins)]*c/1e5,corrells[1:len(bins)],'bo-')
plt.savefig("correl12Q1ls.pdf")
plt.yscale('log')
plt.plot(bins[1:len(bins)]*c/1e5,corrells,'bo-')
plt.savefig("correllsfiglogDR12Q.pdf")
plt.yscale('log')
plt.plot(bins[2:len(bins)]*c/1e5,corrells[1:len(bins)],'ro-')
plt.savefig("correllslog2xDR12Q.pdf")
plt.yscale('log')
plt.xscale('log')
plt.plot(bins[1:len(bins)]*c/1e5,corrells,'bo-')
plt.savefig("correllsloglogDR12Q.pdf")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read the data file (taken from http
Step2: Read the data file (taken from http
Step3: BallTree.two_point_correlation works almost 10 times faster! with leaf_size=5 Going with it to the random catalog
|
3,825
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
#load the files
train = pd.read_csv('input/train.csv')
test = pd.read_csv('input/test.csv')
data = pd.concat([train, test])
#size of training dataset
train_samples = train.shape[0]
#print some of them
data.head()
# remove the Id feature
data.drop(['Id'],1, inplace=True);
data.info()
print("Size training: {}".format(train.shape[0]))
print("Size testing: {}".format(test.shape[0]))
datanum = data.select_dtypes([np.number])
datanum.describe()
data.select_dtypes(exclude=[np.number]).head()
datanum.columns[datanum.isnull().any()].tolist()
#number of row without NaN
print(datanum.shape[0] - datanum.dropna().shape[0])
#list of columns with NaN
datanum.columns[datanum.isnull().any()].tolist()
#Filling with the mean
datanum_no_nan = datanum.fillna(datanum.mean())
#check
datanum_no_nan.columns[datanum_no_nan.isnull().any()].tolist()
import matplotlib.pyplot as plt
datanum_no_nan.drop(['SalePrice'], axis=1).head(15).plot()
plt.show()
#Squeeze the data to [0,1]
from sklearn import preprocessing
scaler = preprocessing.MinMaxScaler()
columns = datanum_no_nan.columns
columns = columns.drop('SalePrice')
print("Features: {}".format(columns))
data_norm = datanum_no_nan
data_norm[columns] = scaler.fit_transform(datanum_no_nan[columns])
print("Train shape: {}".format(data_norm.shape))
data_norm.drop(['SalePrice'], axis=1).head(15).plot()
plt.show()
data_norm.describe().T
#plotting distributions of numeric features
data_norm.hist(bins=50, figsize=(22,16))
plt.show()
data_norm['1stFlrSF'].hist()
plt.show()
#transform the data so it's closest to normal
from scipy import stats
data_gauss = data_norm.copy()
for f in datanum.columns.tolist():
data_gauss[f], _ = stats.boxcox(data_gauss[f]+0.01)
#rescale again
std_scaler = preprocessing.StandardScaler()
data_gauss[columns] = std_scaler.fit_transform(data_gauss[columns])
data_gauss['1stFlrSF'].hist()
plt.show()
#plotting distributions of numeric features
data_gauss.hist(bins=50, figsize=(22,16))
plt.show()
#include no numbers columns
data.select_dtypes(exclude=[np.number]).head()
data_categorical = pd.get_dummies(data.select_dtypes(exclude=[np.number]))
data_all = pd.concat([data_norm, data_categorical], axis=1)
#data_norm.columns.tolist()
feat_list = ['1stFlrSF',
#'2ndFlrSF',
#'3SsnPorch',
'BedroomAbvGr',
'BsmtFinSF1',
#'BsmtFinSF2',
#'BsmtFullBath',
#'BsmtHalfBath',
'BsmtUnfSF',
#'EnclosedPorch',
#'Fireplaces',
#'FullBath',
'GarageArea',
'GarageCars',
'GarageYrBlt',
#'GrLivArea',
#'HalfBath',
#'KitchenAbvGr',
'LotArea',
'LotFrontage',
#'LowQualFinSF',
'MSSubClass',
'MasVnrArea',
#'MiscVal',
'MoSold',
'OpenPorchSF',
'OverallCond',
'OverallQual',
'PoolArea',
#'SalePrice',
#'ScreenPorch',
'TotRmsAbvGrd',
'TotalBsmtSF',
'WoodDeckSF',
'YearBuilt',
'YearRemodAdd']
#'YrSold']
%matplotlib inline
import seaborn as sns
fig = plt.figure(figsize=(14, 10))
sns.heatmap(data_norm[feat_list+['SalePrice']].corr())
#heatmap
fig = plt.figure(figsize=(14, 10))
sns.heatmap(data_norm.corr())
# Correlation features
data_norm.corr()['SalePrice'].sort_values().tail(13)
feat_low_corr = ['KitchenAbvGr',
'EnclosedPorch',
'MSSubClass',
'OverallCond',
'YrSold',
'LowQualFinSF',
'MiscVal',
'BsmtHalfBath',
'BsmtFinSF2',
'MoSold',
'3SsnPorch',
'PoolArea',
'ScreenPorch']
feat_high_corr = ['Fireplaces',
'MasVnrArea',
'YearRemodAdd',
'YearBuilt',
'TotRmsAbvGrd',
'FullBath',
'1stFlrSF',
'TotalBsmtSF',
'GarageArea',
'GarageCars',
'GrLivArea',
'OverallQual']
data_norm_low_corr = data_norm[feat_low_corr]
data_norm_high_corr = data_norm[feat_high_corr]
from sklearn.model_selection import KFold
y = np.array(data_all['SalePrice'])
X = np.array(data_norm_high_corr)
#split by idx
idx = train_samples
X_train, X_test = X[:idx], X[idx:]
y_train, y_test = y[:idx], y[idx:]
print("Shape X train: {}".format(X_train.shape))
print("Shape y train: {}".format(y_train.shape))
print("Shape X test: {}".format(X_test.shape))
print("Shape y test: {}".format(y_test.shape))
kf = KFold(n_splits=3, random_state=9, shuffle=True)
print(kf)
#plotting PCA
from sklearn.decomposition import PCA
def plotPCA(X, y):
pca = PCA(n_components=1)
X_r = pca.fit(X).transform(X)
plt.plot(X_r, y, 'x')
from sklearn.covariance import EllipticEnvelope
# fit the model
ee = EllipticEnvelope(contamination=0.05,
assume_centered=True,
random_state=9)
ee.fit(X_train)
pred = ee.predict(X_train)
X_train = X_train[pred == 1]
y_train = y_train[pred == 1]
print(X_train.shape)
print(y_train.shape)
#after removing anomalies
plotPCA(X_train, y_train)
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_squared_error
rf = MLPRegressor(activation='relu',
solver='lbfgs',
#learning_rate_init=1e-2,
#learning_rate='adaptive',
#alpha=0.0001,
max_iter=400,
#shuffle=True,
hidden_layer_sizes=(64,64),
warm_start=True,
random_state=9,
verbose=False)
for e in range(1):
batch = 1;
for train_idx, val_idx in kf.split(X_train, y_train):
X_t, X_v = X_train[train_idx], X_train[val_idx]
y_t, y_v = y_train[train_idx], y_train[val_idx]
#training
rf.fit(X_t, y_t)
#calculate costs
t_error = mean_squared_error(y_t, rf.predict(X_t))**0.5
v_error = mean_squared_error(y_v, rf.predict(X_v))**0.5
print("{}-{}) Training error: {:.2f} Validation error: {:.2f}".format(e, batch, t_error, v_error))
batch += 1
#Scores
print("Training score: {:.4f}".format(rf.score(X_train, y_train)))
# Gradient boosting
from sklearn import ensemble
params = {'n_estimators': 100, 'max_depth': 50, 'min_samples_split': 5,
'learning_rate': 0.1, 'loss': 'ls', 'random_state':9, 'warm_start':True}
gbr = ensemble.GradientBoostingRegressor(**params)
batch = 0
for train_idx, val_idx in kf.split(X_train, y_train):
X_t, X_v = X_train[train_idx], X_train[val_idx]
y_t, y_v = y_train[train_idx], y_train[val_idx]
#training
gbr.fit(X_t, y_t)
#calculate costs
t_error = mean_squared_error(y_t, gbr.predict(X_t))**0.5
v_error = mean_squared_error(y_v, gbr.predict(X_v))**0.5
print("{}) Training error: {:.2f} Validation error: {:.2f}".format(batch, t_error, v_error))
batch += 1
#Scores
print("Training score: {:.4f}".format(gbr.score(X_train, y_train)))
# AdaBoost
from sklearn.ensemble import AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
abr = AdaBoostRegressor(DecisionTreeRegressor(max_depth=50),
n_estimators=100, random_state=9)
batch = 0
for train_idx, val_idx in kf.split(X_train, y_train):
X_t, X_v = X_train[train_idx], X_train[val_idx]
y_t, y_v = y_train[train_idx], y_train[val_idx]
#training
abr.fit(X_t, y_t)
#calculate costs
t_error = mean_squared_error(y_t, abr.predict(X_t))**0.5
v_error = mean_squared_error(y_v, abr.predict(X_v))**0.5
print("{}) Training error: {:.2f} Validation error: {:.2f}".format(batch, t_error, v_error))
batch += 1
#Scores
print("Training score: {:.4f}".format(abr.score(X_train, y_train)))
# Lasso
from sklearn.linear_model import Lasso
lr = Lasso()
batch = 0
for train_idx, val_idx in kf.split(X_train, y_train):
X_t, X_v = X_train[train_idx], X_train[val_idx]
y_t, y_v = y_train[train_idx], y_train[val_idx]
#training
lr.fit(X_t, y_t)
#calculate costs
t_error = mean_squared_error(y_t, lr.predict(X_t))**0.5
v_error = mean_squared_error(y_v, lr.predict(X_v))**0.5
print("{}) Training error: {:.2f} Validation error: {:.2f}".format(batch, t_error, v_error))
batch += 1
#Scores
print("Training score: {:.4f}".format(lr.score(X_train, y_train)))
### Testing
### Ada + mlp + gradient boosting -> level 1 predictions
### level 1 -> mlp -> level 2 predictions (final)
# Training
#mlp1 = MLPRegressor(activation='logistic',
# solver='sgd',
# hidden_layer_sizes=(5,5),
# learning_rate='adaptive',
# random_state=9,
# warm_start=True,
# verbose=False)
from sklearn.linear_model import LogisticRegression
mlp = LogisticRegression(random_state=9)
sclr = preprocessing.StandardScaler()
def stack_training(X, y):
X0 = rf.predict(X)
X1 = gbr.predict(X)
X2 = abr.predict(X)
X3 = lr.predict(X)
Xt = np.array([X0, X1, X2, X3]).T
#Xt = np.array([X0, X1, X2, X3, X1+X3, X2*X3, X0*X2*X3, X0/X2, X1/X3, X0/X3, (X0+X1+X2+X3)/4]).T
Xt = sclr.fit_transform(Xt)
mlp.fit(Xt, y)
def stack_predict(X, verbose=False):
X0 = rf.predict(X)
X1 = gbr.predict(X)
X2 = abr.predict(X)
X3 = lr.predict(X)
Xt = np.array([X0, X1, X2, X3]).T
#Xt = np.array([X0, X1, X2, X3, X1+X3, X2*X3, X0*X2*X3, X0/X2, X1/X3, X0/X3, (X0+X1+X2+X3)/4]).T
Xt = sclr.transform(Xt)
if verbose:
print("Training score: {:.4f}".format(mlp.score(Xt, y_train)))
plotPCA(Xt, y_train)
return mlp.predict(Xt)
#
batch = 0
kf = KFold(n_splits=10, random_state=9, shuffle=True)
for train_idx, val_idx in kf.split(X_train, y_train):
X_t, X_v = X_train[train_idx], X_train[val_idx]
y_t, y_v = y_train[train_idx], y_train[val_idx]
#training
stack_training(X_t, y_t)
#calculate costs
t_error = mean_squared_error(y_t, abr.predict(X_t))**0.5
v_error = mean_squared_error(y_v, abr.predict(X_v))**0.5
print("{}) Training error: {:.2f} Validation error: {:.2f}".format(batch, t_error, v_error))
batch += 1
rmse = mean_squared_error(y_train, stack_predict(X_train, True))**0.5
print("RMSE: {:.4f}".format(rmse))
from sklearn.metrics import mean_squared_error
import random
RMSE_rf = mean_squared_error(y_train, rf.predict(X_train))**0.5
RMSE_gbr = mean_squared_error(y_train, gbr.predict(X_train))**0.5
RMSE_abr = mean_squared_error(y_train, abr.predict(X_train))**0.5
RMSE_lr = mean_squared_error(y_train, lr.predict(X_train))**0.5
RMSE_stack = mean_squared_error(y_train, stack_predict(X_train))**0.5
def avg_predict(X):
return (rf.predict(X) + gbr.predict(X) + abr.predict(X) + lr.predict(X))/4
predictions = avg_predict(X_train)
RMSE_total = mean_squared_error(y_train, predictions)**0.5
print("RMSE mlp: {:.3f}".format(RMSE_rf))
print("RMSE gbr: {:.3f}".format(RMSE_gbr))
print("RMSE abr: {:.3f}".format(RMSE_abr))
print("RMSE lr: {:.3f}".format(RMSE_lr))
print("====")
print("RMSE average: {:.3f}".format(RMSE_total))
print("RMSE stacked: {:.3f}".format(RMSE_stack))
import os
#predict = avg_predict(X_test)
predict = stack_predict(X_test)
file = "Id,SalePrice" + os.linesep
startId = 1461
for i in range(len(X_test)):
file += "{},{}".format(startId, (int)(predict[i])) + os.linesep
startId += 1
#print(file)
# Save to file
with open('attempt.txt', 'w') as f:
f.write(file)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First problem
Step2: Selecting only numeric columns (by now)
Step3: Find if there's null values
Step4: Normalizing
Step5: Using Box-Cox
Step6: Splitting dataset in train and test (getting batches)
Step7: Selecting good features...
Step8: KFold
Step9: Anomaly Detection
Step10: Models
Step11: Stacked model
Step12: Evaluation
Step13: Get Predictions
|
3,826
|
<ASSISTANT_TASK:>
Python Code:
# --- INITIAL DEFINITIONS ---
from dscribe.descriptors import LMBTR
import numpy as np
from visualise import view
from ase import Atoms
import ase.data
import matplotlib.pyplot as mpl
# atomic positions as matrix
molxyz = np.load("./data/molecule.coords.npy")
# atom types
moltyp = np.load("./data/molecule.types.npy")
atoms_sys = Atoms(positions=molxyz, numbers=moltyp)
view(atoms_sys)
# Create the MBTR desciptor for the system
mbtr = LMBTR(
species=['H', 'C', 'N', 'O', 'F'],
periodic=False,
k2={
"geometry": {"function": "distance"},
"grid": { "min": 0.0, "max": 2.0, "sigma": 0.1, "n": 100 },
"weighting": {"function": "unity"}
},
k3={
"geometry": {"function": "cosine"},
"grid": { "min": -1.0, "max": 1.0, "sigma": 0.05, "n": 100 },
"weighting": {"function": "unity"}
},
flatten=True,
sparse=False
)
print("Number of features: {}".format(mbtr.get_number_of_features()))
#Create Descriptor
desc = mbtr.create(atoms_sys, positions=[0])
print("shape of descriptor: ", desc.shape)
# Plot K2
x2 = mbtr.get_k2_axis() # this is the x axis of the histogram
# create some dictionaries to make atom Z <-- type index --> type name
imap = mbtr.index_to_atomic_number
smap = {}
for index, number in imap.items():
smap[index] = ase.data.chemical_symbols[number]
# make the plots
for i in range(1, mbtr.n_elements): # avoid showing type 0 = X (the central atom)
# this is the slice of the flattened MBTR tensor that contains the histogram
# for X-type_i - X is the central atom of the LMBTR expansion
slc = mbtr.get_location(('X',smap[i]))
# this is the slice
y2 = desc[0][slc]
mpl.plot(x2, y2, label="{}".format(smap[i]))
mpl.ylabel("$\phi$ (arbitrary units)", size=14)
mpl.xlabel("Distance (angstrom)", size=14)
mpl.title("Distance distribution", size=20)
mpl.legend()
mpl.show()
# Plot K3
x3 = mbtr.get_k3_axis()
for i in range(1, mbtr.n_elements):
for j in range(1, mbtr.n_elements):
if j <= i:
slc = mbtr.get_location(('X',smap[i],smap[j]))
mpl.plot(x3, desc[0][slc], label="{}, {}".format(smap[i], smap[j]))
mpl.xlim(left=-2)
mpl.ylabel("$\phi$ (arbitrary units)", size=14)
mpl.xlabel("cos(angle)", size=14)
mpl.title("Angle distribution", size=20)
mpl.legend(loc=3)
mpl.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Atom description
Step2: Setting LMBTR hyper-parameters
Step3: Calculate LMBTR
Step4: Plotting
|
3,827
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
# Create TensorFlow object called tensor
hello_constant = tf.constant('Hello World!')
with tf.Session() as sess:
# Run the tf.constant operatin in the session
output = sess.run(hello_constant)
print(output)
# A is a 0-dimensional int32 tensor
A = tf.constant(1234)
# B is a 1-dimensional int32 tensor
B = tf.constant([123,456,789])
# C is a 2-dimensional int32 tensor
C = tf.constant([ [123,456,789], [222,333,444] ])
with tf.Session() as sess:
output = sess.run(hello_constant)
x = tf.placeholder(tf.string)
with tf.Session() as sess:
output = sess.run(x, feed_dict={x: 'Hello World'})
print(output)
x = tf.placeholder(tf.string)
y = tf.placeholder(tf.int32)
z = tf.placeholder(tf.float32)
with tf.Session() as sess:
output_x = sess.run(x, feed_dict={x: 'Test String', y: 123, z: 45.67})
output_y = sess.run(y, feed_dict={x: 'Test String', y: 123, z:45.67})
print(output_x)
print(output_y)
import tensorflow as tf
def run():
output = None
x = tf.placeholder(tf.int32)
with tf.Session() as sess:
# TODO: Feed the x tensor 123
output = sess.run(x, feed_dict={x: 123})
return output
run()
x = tf.add(5, 2) # 7
x = tf.subtract(10, 4) # 6
y = tf.multiply(2, 5) # 10
tf.subtract(tf.constant(2.0),tf.constant(1))
# Fails with ValueError: Tensor conversion requested dtype float32 for Tensor with dtype int32:
tf.subtract(tf.cast(tf.constant(2.0), tf.int32), tf.constant(1)) # 1
import tensorflow as tf
# TODO: Convert the following to TensorFlow:
x = tf.constant(10)
y = tf.constant(2)
z = tf.subtract(tf.divide(x, y), 1)
# TODO: Print z from a session
with tf.Session() as sess:
output = sess.run(z)
print(output)
x = tf.Variable(5)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
n_features = 120
n_labels = 5
weights = tf.Variable(tf.truncated_normal((n_features, n_labels)))
n_labels = 5
bias = tf.Variable(tf.zeros(n_labels))
x = tf.nn.softmax([2.0, 1.0, 0.2])
import tensorflow as tf
def run_2():
output = None
logit_data = [2.0, 1.0, 0.1]
logits = tf.placeholder(tf.float32)
# TODO: Calculate the softmax of the logits
softmax = tf.nn.softmax(logit_data)
with tf.Session() as sess:
# TODO: Feed in the logit data
output = sess.run(softmax, feed_dict={logits: logit_data})
return output
print(run_2())
import numpy as np
from sklearn import preprocessing
# Example labels
labels = np.array([1,5,3,2,1,4,2,1,3])
# Create the encoder
lb = preprocessing.LabelBinarizer()
# Here the encoder finds the classes and assigns one-hot vectors
lb.fit(labels)
# And finally, transform the labels into one-hot encoded vectors
lb.transform(labels)
x = tf.reduce_sum([1, 2, 3, 4, 5]) # 15
l = tf.log(100) # 4.60517
import tensorflow as tf
softmax_data = [0.7, 0.2, 0.1]
one_hot_data = [1.0, 0.0, 0.0]
softmax = tf.placeholder(tf.float32)
one_hot = tf.placeholder(tf.float32)
# TODO: Print cross entropy from session
cross_entropy = -tf.reduce_sum(tf.multiply(one_hot_data, tf.log(softmax_data)))
with tf.Session() as session:
output = session.run(cross_entropy, feed_dict={one_hot: one_hot_data, softmax: softmax_data})
print(output)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tensor
Step2: The tensor returned by tf.constant() is called a constant tensor, because the value of the tensor never changes.
Step3: The code has already created the tensor, hello_constant, from the previous lines. The next step is to evaluate the tensor in a session.
Step4: Use the feed_dict parameter in tf.session.run() to set the placeholder tensor. The above example shows the tensor x being set to the string "Hello, world". It's also possible to set more than one tensor using feed_dict as shown below
Step5: Note
Step6: TensorFlow Math
Step7: Let's start with the add function. The tf.add() function does exactly what you expect it to do. It takes in two numbers, two tensors, or one of each, and returns their sum as a tensor.
Step8: The x tensor will evaluate to 6, because 10 - 4 = 6. The y tensor will evaluate to 10, because 2 * 5 = 10. That was easy!
Step9: That's because the constant 1 is an integer but the constant 2.0 is a floating point value and subtract expects them to match.
Step10: Quiz
Step11: TensorFlow Linear Functions
Step12: The tf.Variable class creates a tensor with an initial value that can be modified, much like a normal Python variable. This tensor stores its state in the session, so you must initialize the state of the tensor manually. You'll use the tf.global_variables_initializer() function to initialize the state of all the Variable tensors
Step13: The tf.global_variables_initializer() call returns an operation that will initialize all TensorFlow variables from the graph. You call the operation using a session to initialize all the variables as shown above. Using the tf.Variable class allows us to change the weights and bias, but an initial value needs to be chosen.
Step14: The tf.truncated_normal() function returns a tensor with random values from a normal distribution whose magnitude is no more than 2 standard deviations from the mean.
Step15: The tf.zeros() function returns a tensor with all zeros.
Step16: Easy as that! tf.nn.softmax() implements the softmax function for you. It takes in logits and returns softmax activations.
Step17: One-Hot Encoding
Step18: TensorFlow Cross Entropy
Step19: The tf.reduce_sum() function takes an array of numbers and sums them together.
Step20: This function does exactly what you would expect it to do. tf.log() takes the natural log of a number.
|
3,828
|
<ASSISTANT_TASK:>
Python Code:
import os,json,glob,pickle
import numpy,pandas
import nibabel
import sklearn.multiclass
from sklearn.svm import SVC
import sklearn.metrics
import sklearn.model_selection
import sklearn.preprocessing
import scipy.stats,scipy.io
import random
import seaborn
%matplotlib inline
import matplotlib.pyplot as plt
datadir='data'
print('using data from %s'%datadir)
lv1_ts=scipy.io.loadmat(os.path.join(datadir,'lv1_tseries.mat'))['lv1']
rv1_ts=scipy.io.loadmat(os.path.join(datadir,'rv1_tseries.mat'))['rv1']
# scale the data so that we don't need to bother with intercept in the model
lv1_ts=sklearn.preprocessing.scale(lv1_ts.T)
rv1_ts=sklearn.preprocessing.scale(rv1_ts.T)
tsdata={'leftV1':lv1_ts,'rightV1':rv1_ts}
desmtx=scipy.io.loadmat(os.path.join(datadir,'design.mat'))['design']
labels=desmtx[:,0]
print(labels)
ntrials=desmtx.shape[0]
ntp,nvox=lv1_ts.shape
print(ntrials,'trials')
print(nvox,'voxels')
print(ntp,'timepoints')
lv1_ts.shape
# Reproduce the deconvolution analysis using an FIR model
# the onset times are in volumes, so we just use tr=1
# use 20-second window
def make_fir_model(onsets,tslength,hrflength=48,tr=1):
generate an FIR model design matrix
this only works for a single condition
X=numpy.zeros((tslength,int(hrflength/tr)))
for i in range(hrflength):
for o in onsets:
try:
X[o+i,i]=1
except:
pass
return X
desmtx_df=pandas.DataFrame(desmtx,columns=['condition','onset'])
onsets={}
onsets['neutral']=desmtx_df.query('condition==0').onset.values
onsets['attendleft']=desmtx_df.query('condition==1').onset.values
onsets['attendright']=desmtx_df.query('condition==2').onset.values
left_fir=make_fir_model(onsets['attendleft'],ntp)
right_fir=make_fir_model(onsets['attendright'],ntp)
neutral_fir=make_fir_model(onsets['neutral'],ntp)
fir=numpy.hstack((left_fir,right_fir,neutral_fir))
# show the design matrix
plt.imshow(fir[:400,:])
plt.axis('auto')
print(fir.shape)
# estimate the model
beta_hat_left=numpy.linalg.inv(fir.T.dot(fir)).dot(fir.T).dot(lv1_ts)
beta_hat_right=numpy.linalg.inv(fir.T.dot(fir)).dot(fir.T).dot(rv1_ts)
plt.figure(figsize=(12,6))
plt.subplot(1,2,1)
plt.plot(beta_hat_left[:48].mean(1))
plt.plot(beta_hat_left[48:96].mean(1))
plt.plot(beta_hat_left[96:144].mean(1))
plt.legend(['attend left','attend right','neutral'])
plt.title('Left V1')
plt.subplot(1,2,2)
plt.plot(beta_hat_right[:48].mean(1))
plt.plot(beta_hat_right[48:96].mean(1))
plt.plot(beta_hat_right[96:144].mean(1))
plt.legend(['attend left','attend right','neutral'])
plt.title('Right V1')
pred_left=fir.dot(beta_hat_left)
# check fit of the model over first 500 timepoints
plt.figure(figsize=(14,4))
plt.plot(sklearn.preprocessing.scale(lv1_ts.mean(1)[:500]))
plt.plot(sklearn.preprocessing.scale(rv1_ts.mean(1)[:500]))
meanpred=sklearn.preprocessing.scale(pred_left.mean(1))
plt.plot(meanpred[:500])
pred_left.mean(1).shape
def run_classifier(data,labels, shuffle=False,nfolds=8,scale=True,
clf=None):
run classifier for a single dataset
features=data
if scale:
features=sklearn.preprocessing.scale(features)
if shuffle:
numpy.random.shuffle(labels)
if not clf:
clf=sklearn.svm.SVC(C=C)
skf = sklearn.model_selection.StratifiedKFold(5,shuffle=True)
pred=numpy.zeros(labels.shape[0])
for train, test in skf.split(features,labels):
clf.fit(features[train,:],labels[train])
pred[test]=clf.predict(features[test,:])
acc=sklearn.metrics.accuracy_score(labels, pred)
return acc
def get_accuracy_timeseries(tsdata,labels_attend,onsets,shuffle=False,clf=None,window=40,
voxels=None):
iterate over timepoints
acc=numpy.zeros(window)
for tp in range(window):
# pull out data for each trial/timepoint
if voxels is None:
data=numpy.zeros((len(labels_attend),tsdata['leftV1'].shape[1] + tsdata['rightV1'].shape[1]))
else:
data=numpy.zeros((len(labels_attend),tsdata[voxels+'V1'].shape[1]))
ctr=0
for cond in ['attendleft','attendright']:
for ons in onsets[cond]:
if voxels is None:
data[ctr,:]=numpy.hstack((tsdata['leftV1'][ons+tp,:],tsdata['rightV1'][ons+tp,:]))
else:
data[ctr,:]=tsdata[voxels+'V1'][ons+tp,:]
ctr+=1
acc[tp]=run_classifier(data,labels_attend,clf=clf,shuffle=shuffle)
return acc
labels_attend=numpy.array([i for i in labels if i > 0])
#clf=sklearn.linear_model.LogisticRegressionCV(penalty='l1',solver='liblinear')
#clf=sklearn.svm.SVC(C=1)
tuned_parameters = [{'C': [0.0005,0.001,0.005,0.01,0.05, 0.1]}]
clf = sklearn.model_selection.GridSearchCV(sklearn.svm.LinearSVC(C=1), tuned_parameters, cv=5)
acc_all=get_accuracy_timeseries(tsdata,labels_attend,onsets,clf=clf)
acc_left=get_accuracy_timeseries(tsdata,labels_attend,onsets,voxels='left',clf=clf)
acc_right=get_accuracy_timeseries(tsdata,labels_attend,onsets,voxels='right',clf=clf)
plt.figure(figsize=(14,5))
plt.subplot(1,3,1)
plt.plot(numpy.arange(0,20,0.5),acc_all)
plt.axis([0,20,0,1])
plt.plot([0,20],[0.5,0.5],'k--')
plt.title('All voxels')
plt.xlabel('Time (seconds)')
plt.ylabel('Pecent classification accuracy')
plt.subplot(1,3,2)
plt.plot(numpy.arange(0,20,0.5),acc_left)
plt.axis([0,20,0,1])
plt.plot([0,20],[0.5,0.5],'k--')
plt.title('Left V1')
plt.xlabel('Time (seconds)')
plt.ylabel('Pecent classification accuracy')
plt.subplot(1,3,3)
plt.plot(numpy.arange(0,20,0.5),acc_right)
plt.axis([0,20,0,1])
plt.plot([0,20],[0.5,0.5],'k--')
plt.title('Right V1')
plt.xlabel('Time (seconds)')
plt.ylabel('Pecent classification accuracy')
# if the saved results already exist then just reload them, to save time
if os.path.exists('shuffled_accuracy.pkl'):
print('loading existing shuffled data')
acc_all_rand,acc_left_rand,acc_right_rand,clf=pickle.load(open('shuffled_accuracy.pkl','rb'))
else:
acc_all_rand=numpy.zeros((100,40))
acc_left_rand=numpy.zeros((100,40))
acc_right_rand=numpy.zeros((100,40))
for i in range(100):
print(i)
acc_all_rand[i,:]=get_accuracy_timeseries(tsdata,labels_attend,onsets,shuffle=True,clf=clf)
acc_left_rand[i,:]=get_accuracy_timeseries(tsdata,labels_attend,onsets,voxels='left',shuffle=True,clf=clf)
acc_right_rand[i,:]=get_accuracy_timeseries(tsdata,labels_attend,onsets,voxels='right',shuffle=True,clf=clf)
pickle.dump((acc_all_rand,acc_left_rand,acc_right_rand,clf),open('shuffled_accuracy.pkl','wb'))
rand_percentile=(1 - 0.05/40)*100 # percent cutoff for randomization, bonferroni corrected
nrand=acc_all_rand.shape[0]
plt.figure(figsize=(14,5))
plt.subplot(1,3,1)
plt.plot(numpy.arange(0,20,0.5),acc_all)
plt.axis([0,20,0,1])
plt.plot([0,20],[0.5,0.5],'k--')
plt.title('All voxels')
plt.xlabel('Time (seconds)')
plt.ylabel('Percent classification accuracy')
for i in range(nrand):
plt.plot(numpy.arange(0,20,0.5),acc_all_rand[i,:],'r',linewidth=0.01)
cutoff=numpy.zeros(40)
for i in range(40):
cutoff[i]=scipy.stats.scoreatpercentile(acc_all_rand[:,i],rand_percentile)
if acc_all[i]>cutoff[i]:
plt.text(i/2,0.9,'*')
plt.plot(numpy.arange(0,20,0.5),cutoff,'g--')
plt.subplot(1,3,2)
plt.plot(numpy.arange(0,20,0.5),acc_left)
plt.axis([0,20,0,1])
plt.plot([0,20],[0.5,0.5],'k--')
plt.title('Left V1')
plt.xlabel('Time (seconds)')
plt.ylabel('Pecent classification accuracy')
for i in range(nrand):
plt.plot(numpy.arange(0,20,0.5),acc_left_rand[i,:],'r',linewidth=0.01)
cutoff=numpy.zeros(40)
for i in range(40):
cutoff[i]=scipy.stats.scoreatpercentile(acc_left_rand[:,i],rand_percentile)
if acc_left[i]>cutoff[i]:
plt.text(i/2,0.9,'*')
plt.plot(numpy.arange(0,20,0.5),cutoff,'g--')
plt.subplot(1,3,3)
plt.plot(numpy.arange(0,20,0.5),acc_right)
plt.axis([0,20,0,1])
plt.plot([0,20],[0.5,0.5],'k--')
plt.title('Right V1')
plt.xlabel('Time (seconds)')
plt.ylabel('Pecent classification accuracy')
for i in range(nrand):
plt.plot(numpy.arange(0,20,0.5),acc_right_rand[i,:],'r',linewidth=0.01)
cutoff=numpy.zeros(40)
for i in range(40):
cutoff[i]=scipy.stats.scoreatpercentile(acc_right_rand[:,i],rand_percentile)
if acc_right[i]>cutoff[i]:
plt.text(i/2,0.9,'*')
plt.plot(numpy.arange(0,20,0.5),cutoff,'g--')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this exercise we will decode orientation using data collected for the Cognitive Neuroscience module in 2017. The subject performed a task that manipulated whether attention was placed towards the left or right visual field, or with no attentional focus (control condition). The stimulus was two gabor patches left and right of fixation flickering at 5 Hz, with the following timing
Step4: Classification analysis
Step5: Plot the results
Step6: Now let's run it with the labels shuffled 100 times to see how good these results are compared to chance. This will take a little while to complete. For a real analysis one would want to do this many more times (up to ~5000) in order for the distribution of extreme values to stabilize.
Step7: Now we plot those results alongside the true classification results, adding an asterisk at the timepoints where the observed accuracy is greater than the 99th percentile of the random accuracies.
|
3,829
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
np.random.seed(1)
# Define a function that,
def f(x):
# Outputs x multiplied by a random number drawn from a normal distribution
return x * np.random.normal(size=1)[0]
# Create some values of x
xs = [1,2,3,4,5,6]
#Define argmin that
def argmin(f, xs):
# Applies f on all the x's
data = [f(x) for x in xs]
# Finds index of the smallest output of f(x)
index_of_min = data.index(min(data))
# Returns the x that produced that output
return xs[index_of_min]
# Run the argmin function
argmin(f, xs)
print('x','|', 'f(x)')
print('--------------')
for x in xs:
print(x,'|', f(x))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define A Function, f(x)
Step2: Create Some Values Of x
Step3: Find The Argmin Of f(x)
Step4: Check Our Results
|
3,830
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
import statsmodels.formula.api as smf
df = pd.read_csv("data/hanford.csv")
df
df['Exposure'].mean()
df['Exposure'].describe()
df.corr()
df.plot(kind='scatter', x='Mortality', y='Exposure')
lm = smf.ols(formula='Mortality~Exposure',data=df).fit()
lm.params
intercept, Exposure = lm.params
Mortality = Exposure*10+intercept
Mortality
intercept, Exposure = lm.params
Mortality = Exposure*10+intercept
Mortality
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Read in the hanford.csv file
Step2: 3. Calculate the basic descriptive statistics on the data
Step3: 4. Calculate the coefficient of correlation (r) and generate the scatter plot. Does there seem to be a correlation worthy of investigation?
Step4: 5. Create a linear regression model based on the available data to predict the mortality rate given a level of exposure
Step5: 6. Plot the linear regression line on the scatter plot of values. Calculate the r^2 (coefficient of determination)
|
3,831
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Install TensorFlow Dececision Forests and the dependencies used in this colab.
!pip install tensorflow_decision_forests plotly wurlitzer -U -qq
import tensorflow_decision_forests as tfdf
import matplotlib.colors as mcolors
import math
import os
import numpy as np
import pandas as pd
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from plotly.offline import iplot
import plotly.graph_objs as go
# Download a copy of the adult dataset.
!wget -q https://raw.githubusercontent.com/google/yggdrasil-decision-forests/main/yggdrasil_decision_forests/test_data/dataset/adult_train.csv -O /tmp/adult_train.csv
!wget -q https://raw.githubusercontent.com/google/yggdrasil-decision-forests/main/yggdrasil_decision_forests/test_data/dataset/adult_test.csv -O /tmp/adult_test.csv
# Load the dataset in memory
train_df = pd.read_csv("/tmp/adult_train.csv")
test_df = pd.read_csv("/tmp/adult_test.csv")
# , and convert it into a TensorFlow dataset.
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_df, label="income")
test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_df, label="income")
# Print the first 5 examples.
train_df.head()
# Train a Random Forest
model = tfdf.keras.RandomForestModel(num_trees=1000)
model.fit(train_ds)
model_inspector = model.make_inspector()
out_of_bag_accuracy = model_inspector.evaluation().accuracy
print(f"Out-of-bag accuracy: {out_of_bag_accuracy:.4f}")
# The test accuracy is measured on the test datasets.
model.compile(["accuracy"])
test_accuracy = model.evaluate(test_ds, return_dict=True, verbose=0)["accuracy"]
print(f"Test accuracy: {test_accuracy:.4f}")
print("The model contains", model_inspector.num_trees(), "trees.")
print("The test dataset contains", test_df.shape[0], "examples.")
leaves = model.predict_get_leaves(test_ds)
print("The leaf indices:\n", leaves)
print("The predicted leaves have shape", leaves.shape,
"(we expect [num_examples, num_trees]")
def compute_proximity(leaves, step_size=100):
Computes the proximity between each pair of examples.
Args:
leaves: A matrix of shape [num_example, num_tree] where the value [i,j] is
the index of the leaf reached by example "i" in the tree "j".
step_size: Size of the block of examples for the computation of the
proximity. Does not impact the results.
Returns:
The example pair-wise proximity matrix of shape [n,n] with "n" the number of
examples.
example_idx = 0
num_examples = leaves.shape[0]
t_leaves = np.transpose(leaves)
proximities = []
# Instead of computing the proximity in between all the examples at the same
# time, we compute the similarity in blocks of "step_size" examples. This
# makes the code more efficient with the the numpy broadcast.
while example_idx < num_examples:
end_idx = min(example_idx + step_size, num_examples)
proximities.append(
np.mean(
leaves[..., np.newaxis] == t_leaves[:,
example_idx:end_idx][np.newaxis,
...],
axis=1))
example_idx = end_idx
return np.concatenate(proximities, axis=1)
proximity = compute_proximity(leaves)
print("The shape of proximity is", proximity.shape)
proximity
distance = 1 - proximity
t_sne = TSNE(
# Number of dimensions to display. 3d is also possible.
n_components=2,
# Control the shape of the projection. Higher values create more
# distinct but also more collapsed clusters. Can be in 5-50.
perplexity=20,
metric="precomputed",
init="random",
verbose=1,
square_distances=True,
learning_rate="auto").fit_transform(distance)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
ax.grid(False)
# Color the points according to the label value.
colors = (test_df["income"] == ">50K").map(lambda x: ["orange", "green"][x])
ax.scatter(
t_sne[:, 0], t_sne[:, 1], c=colors, linewidths=0.5, marker="x", s=20)
# docs_infra: no_execute
# Note: Run the colab (click the "Run in Google Colab" link at the top) to see
# the interactive plot.
def interactive_plot(dataset, projections):
def label_fn(row):
HTML printer over each example.
return "<br>".join([f"<b>{k}:</b> {v}" for k, v in row.items()])
labels = list(dataset.apply(label_fn, axis=1).values)
iplot({
"data": [
go.Scatter(
x=projections[:, 0],
y=projections[:, 1],
text=labels,
mode="markers",
marker={
"color": colors,
"size": 3,
})
],
"layout": go.Layout(width=600, height=600, template="simple_white")
})
interactive_plot(test_df, t_sne)
# Number of columns and rows in the multi-plot.
num_plot_cols = 5
num_plot_rows = math.ceil(test_df.shape[1] / num_plot_cols)
# Color palette for the categorical features.
palette = list(mcolors.TABLEAU_COLORS.values())
# Create the plot
plot_size_in = 3.5
fig, axs = plt.subplots(
num_plot_rows,
num_plot_cols,
figsize=(num_plot_cols * plot_size_in, num_plot_rows * plot_size_in))
# Hide the borders.
for row in axs:
for ax in row:
ax.set_axis_off()
for col_idx, col_name in enumerate(test_df):
ax = axs[col_idx // num_plot_cols, col_idx % num_plot_cols]
colors = test_df[col_name]
if colors.dtypes in [str, object]:
# Use the color palette on categorical features.
unique_values = list(colors.unique())
colors = colors.map(
lambda x: palette[unique_values.index(x) % len(palette)])
ax.set_title(col_name)
ax.scatter(t_sne[:, 0], t_sne[:, 1], c=colors.values, linewidths=0.5,
marker="x", s=5)
def select_example(labels, distance_matrix, k):
Selects the example with the highest number of neighbors with the same class.
Usage example:
n = 5
select_example(
np.random.randint(0,2, size=n),
np.random.uniform(size=(n,n)),
2)
Returns:
The list of neighbors for the selected example. Includes the selected
example.
partition = np.argpartition(distance_matrix, k)[:,:k]
same_label = np.mean(np.equal(labels[partition], np.expand_dims(labels, axis=1)), axis=1)
selected_example = np.argmax(same_label)
return partition[selected_example, :]
def extract_prototype_examples(labels, distance_matrix, k, num_prototypes):
Extracts a list of examples in each prototype.
Usage example:
n = 50
print(extract_prototype_examples(
labels=np.random.randint(0, 2, size=n),
distance_matrix=np.random.uniform(size=(n, n)),
k=2,
num_prototypes=3))
Returns:
An array where E[i][j] is the index of the j-th examples of the i-th
prototype.
example_idxs = np.arange(len(labels))
prototypes = []
examples_per_prototype = []
for iter in range(num_prototypes):
print(f"Iter #{iter}")
# Select the example
neighbors = select_example(labels, distance_matrix, k)
# Index of the examples in the prototype
examples_per_prototype.append(list(example_idxs[neighbors]))
# Remove the selected examples
example_idxs = np.delete(example_idxs, neighbors)
labels = np.delete(labels, neighbors)
distance_matrix = np.delete(distance_matrix, neighbors, axis=0)
distance_matrix = np.delete(distance_matrix, neighbors, axis=1)
return examples_per_prototype
examples_per_prototype = extract_prototype_examples(test_df["income"].values, distance, k=20, num_prototypes=10)
print(f"Found examples for {len(examples_per_prototype)} prototypes.")
def build_prototype(dataset):
Exacts the feature statistics of a prototype.
For numerical features, returns the quantiles.
For categorical features, returns the most frequent value.
Usage example:
n = 50
print(build_prototype(
pd.DataFrame({
"f1": np.random.uniform(size=n),
"f2": np.random.uniform(size=n),
"f3": [f"v_{x}" for x in np.random.randint(0, 2, size=n)],
"label": np.random.randint(0, 2, size=n)
})))
Return:
A prototype as a dictionary of strings.
prototype = {}
for col in dataset.columns:
col_values = dataset[col]
if col_values.dtypes in [str, object]:
# A categorical feature.
# Remove the missing values
col_values = [x for x in col_values if isinstance(x,str) or not math.isnan(x)]
# Frequency of each possible value.
frequency_item, frequency_count = np.unique(col_values, return_counts=True)
top_item_idx = np.argmax(frequency_count)
top_item_probability = frequency_count[top_item_idx] / np.sum(frequency_count)
# Print the most common item.
prototype[col] = f"{frequency_item[top_item_idx]} ({100*top_item_probability:.0f}%)"
else:
# A numerical feature.
quartiles = np.nanquantile(col_values.values, [0.25, 0.5, 0.75])
# Print the 3 quantiles.
prototype[col] = f"{quartiles[0]} {quartiles[1]} {quartiles[2]}"
return prototype
# Extract the statistics of each prototype.
prototypes = []
for examples in examples_per_prototype:
# Prorotype statistics.
prototypes.append(build_prototype(test_df.iloc[examples, :]))
prototypes = pd.DataFrame(prototypes)
prototypes
# Extract the projection of each prototype.
prototypes_projection = []
for examples in examples_per_prototype:
# t-SNE for each prototype.
prototypes_projection.append(np.mean(t_sne[examples,:],axis=0))
prototypes_projection = np.stack(prototypes_projection)
# Plot the mean 2d t-SNE projection of the elements in the prototypes.
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
ax.grid(False)
# Color the points according to the label value.
colors = (test_df["income"] == ">50K").map(lambda x: ["orange", "green"][x])
ax.scatter(
t_sne[:, 0], t_sne[:, 1], c=colors, linewidths=0.5, marker="x", s=20)
# Add the prototype indices.
for i in range(prototypes_projection.shape[0]):
ax.text(prototypes_projection[i, 0],
prototypes_projection[i, 1],
f"{i}",
fontdict={"size":18},
c="red")
example_idx = 0
k = 20
neighbors = np.argpartition(distance[example_idx, :], k)[:k]
print(f"The example #{example_idx} is:")
print("===============================")
print(test_df.iloc[example_idx, :])
print("")
print(f"The prototype around the example #{example_idx} is:")
print("============================================")
print(pd.Series(build_prototype(test_df.iloc[neighbors, :])))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Proximities and Prototypes with Random Forests
Step2: Train a Random Forest model
Step3: Following are the first five examples of the training dataset. Notice that
Step4: A Random Forest is trained as follows
Step5: The performance of the Random Forest model is
Step6: This is an expected accuracy value for Random Forest models on this dataset. It indicates that the model is correctly trained.
Step7: Proximities
Step8: The method predict_get_leaves() returns the index of the active leaf for each example and each tree.
Step10: Here, leaves[i,j] is the index of the active leaf of the i-th
Step11: Here, proximity[i,j] is the proximity in between the example i and j.
Step12: The proximity matrix has several interesting properties, notably, it is symmetrical, positive, and the diagonal elements are all 1.
Step13: The next plot shows a two-dimensional projection of the test example features. The color of the points
Step15: Observations
Step16: Instructions
Step19: Prototypes
Step20: Using the methods above, let's extract the examples for 10 prototypes.
Step22: For each of those prototypes, we want to display the statistics of the feature values. In this example, we will look at the quartiles of the numerical features, and the most frequent values for the categorical features.
Step23: Now, let's look at our prototypes.
Step24: Try to make sense of the prototypes.
Step25: We see that the 10 prototypes cover around half of the domain. Clusters of examples without a prototype would be best explained with more prototypes.
|
3,832
|
<ASSISTANT_TASK:>
Python Code:
x = 0
while x < 10:
print 'x is currently: ',x
print ' x is still less than 10, adding 1 to x'
x+=1
x = 0
while x < 10:
print 'x is currently: ',x
print ' x is still less than 10, adding 1 to x'
x+=1
else:
print 'All Done!'
x = 0
while x < 10:
print 'x is currently: ',x
print ' x is still less than 10, adding 1 to x'
x+=1
if x ==3:
print 'x==3'
else:
print 'continuing...'
continue
x = 0
while x < 10:
print 'x is currently: ',x
print ' x is still less than 10, adding 1 to x'
x+=1
if x ==3:
print 'Breaking because x==3'
break
else:
print 'continuing...'
continue
# DO NOT RUN THIS CODE!!!!
while True:
print 'Uh Oh infinite Loop!'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Notice how many times the print statements occured and how the while loop kept going until the True condition was met, which occured once x==10. Its important to note that once this occured the code stopped. Lets see how we could add an else statement
Step2: break, continue, pass
Step3: Note how we have a printed statement when x==3, and a continue being printed out as we continue through the outer while loop. Let's put in a break once x ==3 and see if the result makes sense
Step4: Note how the other else statement wasn't reached and continuing was never printed!
|
3,833
|
<ASSISTANT_TASK:>
Python Code:
for i in range(4):
print(i)
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
X = np.linspace(-np.pi, np.pi, 656)
F = np.sin(1/(X**2+0.07))
plt.plot(X,F)
from ipywidgets import interact
def f(x):
print(x)
interact(f, x=10);
from ipywidgets import widgets
w = widgets.IntSlider(min=0,max=10,value=3,step=1,width="430px")
w
w.value
w.value = 4
from math import sin,cos
%timeit sin(cos(sin(1.23)))
import numpy as np
%timeit np.sum(np.sin(np.random.randn(int(1e6))))
%timeit np.sum(np.sin(np.random.randn(int(1e6))))
%time np.sum(np.sin(np.random.randn(int(1e6))))
%%sh
pwd
for i in `ls`
do
echo Plik: $i
done
from ipywidgets import widgets
w = widgets.Text()
w
def mojcallback(w):
print("OK ----",w.value)
w.on_submit(mojcallback)
w.value = "dads"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: widgets
Step2: Notebook magics
Step3: Widget callback przykład
|
3,834
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from apiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
import pprint
# This is a public OAuth config that you can use to run this guide.
# However, use different credentials when building your own solution.
CLIENT_CONFIG = {
'installed': {
'client_id':'882252295571-uvkkfelq073vq73bbq9cmr0rn8bt80ee.apps.googleusercontent.com',
'client_secret': 'S2QcoBe0jxNLUoqnpeksCLxI',
'auth_uri':'https://accounts.google.com/o/oauth2/auth',
'token_uri':'https://accounts.google.com/o/oauth2/token'
}
}
SCOPES = ['https://www.googleapis.com/auth/androidmanagement']
# Run the OAuth flow.
flow = InstalledAppFlow.from_client_config(CLIENT_CONFIG, SCOPES)
credentials = flow.run_console()
# Create the API client.
androidmanagement = build('androidmanagement', 'v1', credentials=credentials)
print('\nAuthentication succeeded.')
# To improve the readability of deeply nested data structures, we create a
# helper function to pretty print a result:
prettyprint = pprint.PrettyPrinter(indent=4).pprint
# Paste your enterprise name here.
# It should be of the format 'enterprises/LC...'
enterprise_name = ''
IFRAME_URL = "https://storage.googleapis.com/android-management-api-samples/managed_play_iframe.html"
web_token = androidmanagement.enterprises().webTokens().create(
parent=enterprise_name,
body={
"parentFrameUrl": IFRAME_URL
}
).execute()
full_iframe_url = IFRAME_URL + "?mode=SELECT&token=" + web_token["value"]
print('Open the managed Google Play iframe:', full_iframe_url)
policy_name = enterprise_name + '/policies/policy1'
# Paste the package name of the web app here
# It should be of the format 'com.google.enterprise.webapp.*'.
web_app_package_name = ''
policy = {
'applications': [
{
'installType': 'FORCE_INSTALLED',
'packageName': 'com.android.chrome'
},
{
'installType': 'FORCE_INSTALLED',
'packageName': web_app_package_name
}
],
'debuggingFeaturesAllowed': True
}
# Write the new policy:
result = androidmanagement.enterprises().policies().patch(
name=policy_name,
body=policy
).execute()
prettyprint(result)
import base64
import urllib.request
# A useful picture found on the Wikipedia page:
icon_url = 'https://developer.android.com/images/brand/Android_Robot.png'
icon_raw_data = urllib.request.urlopen(icon_url).read()
icon_encoded_data = base64.urlsafe_b64encode(icon_raw_data).decode("utf-8")
web_app = {
"title": "Android",
"startUrl": "https://en.m.wikipedia.org/wiki/Android_(operating_system)",
"displayMode": "MINIMAL_UI",
"icons": [
{
"imageData": icon_encoded_data
}
]
}
result = androidmanagement.enterprises().webApps().create(
parent=enterprise_name,
body=web_app
).execute()
# Take note of the app's name (which is it's id) for future operations:
web_app_name = result['name']
prettyprint(result)
web_app = {
"title": "Wikipedia: Android",
"startUrl": "https://en.m.wikipedia.org/wiki/Android_(operating_system)",
"displayMode": "STANDALONE",
"icons": [
{
"imageData": icon_encoded_data
}
]
}
result = androidmanagement.enterprises().webApps().patch(
name=web_app_name,
body=web_app
).execute()
prettyprint(result)
webapps = androidmanagement.enterprises().webApps().list(
parent=enterprise_name
).execute()
prettyprint(webapps)
webapp = androidmanagement.enterprises().webApps().get(
name=web_app_name
).execute()
prettyprint(webapp)
androidmanagement.enterprises().webApps().delete(
name=web_app_name
).execute()
print ("Admins visit https://play.google.com/work/apps/details?id={}, users https://play.google.com/store/apps/details?id={} to see the Play store listing.".format(web_app_package_name, web_app_package_name))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Android Management API - Quickstart for Web Apps
Step2: Declare your enterprise
Step3: Create a web app with the managed Google Play iframe
Step4: Distributing the web app
Step5: Create a web app with the Android Management API
Step6: The result of the create call basically mirrors its inputs but with the icon data and the name filled in.
Step7: enterprises.webApps.list
Step8: enterprises.webApps.get
Step9: enterprises.webApps.delete
Step10: Web apps in the Play store
|
3,835
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import os
s3file = 'https://dsclouddata.s3.amazonaws.com/churn.csv'
churnDF = pd.read_csv(s3file, delimiter=',')
churnDF.head(5)
# Install MySQLdb, the interface to the popular MySQL database server for Python for use with SQLalchemy
!sudo apt-get install python-mysqldb
!pip install sqlalchemy
from sqlalchemy import create_engine
#Create these environment variables in your DataScience.com project or user profile
mySQL_Username = os.environ['BRETT_MYSQL_USERNAME']
mySQL_Password = os.environ['BRETT_MYSQL_PASSWORD']
mySQL_IP = os.environ['BRETT_MYSQL_IP']
#Create a connection to the MySQL database
engine = create_engine("mysql+mysqldb://"+mySQL_Username+":"+mySQL_Password+"@"+mySQL_IP+"/customers")
# Use Pandas.to_sql to write the dataframe as a database table called 'account_info'. This is a useful way to make
# Python data available to external BI tools such as Tableau
churnDF.to_sql(con=engine, name='account_info', if_exists='replace')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Put the Data in a Database
Step2: Write the dataframe as a table called 'account_info'
|
3,836
|
<ASSISTANT_TASK:>
Python Code:
class Student(object):
skills = []
def __init__(self, name):
self.name = name
stu = Student('ly')
print Student.skills # 访问类数据属性
Student.skills.append('Python')
print Student.skills
print stu.skills # 通过实例也能访问类数据属性
print dir(Student)
Student.age = 25 # 通过类名动态添加类数据属性
print dir(Student)
print stu.age
class Student(object):
Student calss
skills = []
def __init__(self, name):
self.name = name
stu = Student('ly')
print Student.__name__ # 类名:Student
print Student.__doc__ # 类的说明文档
print Student.__bases__
print Student.__class__ # 类的__class__
print stu.__class__ # 实例对象的__class__
print isinstance(stu, Student)
print dir(Student)
print dir(stu)
print Student.__name__ # 通过类Student能调用__name__
print stu.__name__ # 但是通过实例对象不能调用__name__
class Animal(object):
# 初始化方法
def __init__(self, name):
self.name = name
# 普通实例方法
def get_name(self):
return self.name
animal = Animal(name='ly')
print animal.name
print animal.get_name()
class Animal(object):
@staticmethod
def hello():
print 'hello'
animal = Animal()
Animal.hello() # 类调用
animal.hello() # 实例对象调用
class Countable(object):
counter = 0
def __init__(self):
Countable.counter += 1 # 在方法中调用类数据属性要用引用的方式
@classmethod
def get_count(cls):
return Countable.counter
a = Countable()
b = Countable()
print Countable.get_count() # 类调用
print a.get_count() # 实例对象调用
class Animal(object):
def __init__(self, name, age):
self.__name = name
self.age = age
animal = Animal('ly', 25)
print animal.age
print animal._Animal__name
print animal.name
class Animal(object):
def __init__(self, name, age):
self.__name = name
self.__age = age
def get_age(self):
return self.__age
def modified_age(self, age):
if age > 0 and age < 120: # 参数检查
self.__age = age
animal = Animal('ly', 25)
print animal.get_age()
animal.modified_age(26)
print animal.get_age()
class Mystr(str): # 继承自str
pass
s = Mystr(123)
print issubclass(Mystr, str)
print isinstance(s, str), isinstance(s, Mystr)
class Parent(object):
'''
parent class
'''
numList = []
def numAdd(self, a, b):
return a+b
class Child(Parent):
pass
parent = Parent()
child = Child()
print Parent.__doc__
print Child.__doc__ # 子类无法继承__doc__
print Child.__bases__
print Parent.__bases__
print Parent.__class__ # 类型
print Child.__class__
print parent.__class__
print child.__class__
class Parent(object):
def f(self):
self.g()
def g(self):
print 'Parent.f.g'
class Child(Parent):
def g(self):
print 'Child.f.g'
child = Child()
child.f()
class Root(object):
def __init__(self):
print("this is Root")
class B(Root):
def __init__(self):
print("enter B")
super(B, self).__init__()
print("leave B")
class C(Root):
def __init__(self):
print("enter C")
super(C, self).__init__()
print("leave C")
class D(B, C):
pass
d = D()
print(d.__class__.__mro__)
print D.__mro__
print D.mro()
class Parent(object):
__slots__ = ('name') # __slots__
def __init__(self, name):
self.name = name
a = Parent('ly')
print a.name
Parent.age = 25 # slots对通过类名动态添加属性没有限制
print a.age
try:
a = 5 / 0
except ZeroDivisionError:
print 'error'
finally:
print 'end'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 特殊的类属性
Step3: 方法
Step4: 定义一个Animal类,初始化方法把形参name赋值给实例对象数据属性name
Step5: @classmethod 类方法
Step6: 私有变量
Step7: 通常在类中定义方法去访问和修改这些私有变量。
Step8: 约定把以一个下划线开头的名字做为实例对象内部的东西,永远不从对象的外部访问他们
Step9: 注意:
Step10: 方法查找
Step11: 标准函数 super()
Step12: 首先建立一个实例对象,会自动调用初始化方法,为什么 B 的 init 会被调用:因为 D 没有定义 init,所以会在 MRO 中找下一个类,去查看它有没有定义 init,也就是去调用 B 的 init。
Step13: _new_
|
3,837
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
from snorkel import SnorkelSession
session = SnorkelSession()
import os
from snorkel.parser import XMLMultiDocPreprocessor
# The following line is for testing only. Feel free to ignore it.
file_path = 'data/CDR.BioC.small.xml' if 'CI' in os.environ else 'data/CDR.BioC.xml'
doc_preprocessor = XMLMultiDocPreprocessor(
path=file_path,
doc='.//document',
text='.//passage/text/text()',
id='.//id/text()'
)
from snorkel.parser import CorpusParser
from utils import TaggerOneTagger
tagger_one = TaggerOneTagger()
corpus_parser = CorpusParser(fn=tagger_one.tag)
corpus_parser.apply(list(doc_preprocessor))
from snorkel.models import Document, Sentence
print("Documents:", session.query(Document).count())
print("Sentences:", session.query(Sentence).count())
from six.moves.cPickle import load
with open('data/doc_ids.pkl', 'rb') as f:
train_ids, dev_ids, test_ids = load(f)
train_ids, dev_ids, test_ids = set(train_ids), set(dev_ids), set(test_ids)
train_sents, dev_sents, test_sents = set(), set(), set()
docs = session.query(Document).order_by(Document.name).all()
for i, doc in enumerate(docs):
for s in doc.sentences:
if doc.name in train_ids:
train_sents.add(s)
elif doc.name in dev_ids:
dev_sents.add(s)
elif doc.name in test_ids:
test_sents.add(s)
else:
raise Exception('ID <{0}> not found in any id set'.format(doc.name))
from snorkel.models import Candidate, candidate_subclass
ChemicalDisease = candidate_subclass('ChemicalDisease', ['chemical', 'disease'])
from snorkel.candidates import PretaggedCandidateExtractor
candidate_extractor = PretaggedCandidateExtractor(ChemicalDisease, ['Chemical', 'Disease'])
for k, sents in enumerate([train_sents, dev_sents, test_sents]):
candidate_extractor.apply(sents, split=k)
print("Number of candidates:", session.query(ChemicalDisease).filter(ChemicalDisease.split == k).count())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configuring a DocPreprocessor
Step2: Creating a CorpusParser
Step3: Part II
Step4: We should get 8268 candidates in the training set, 888 candidates in the development set, and 4620 candidates in the test set.
|
3,838
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
people = graphlab.SFrame('people_wiki.gl/')
people.head()
len(people)
obama = people[people['name'] == 'Barack Obama']
john = people[people['name'] == 'Elton John']
john
obama['text']
clooney = people[people['name'] == 'George Clooney']
clooney['text']
obama['word_count'] = graphlab.text_analytics.count_words(obama['text'])
john['word_count'] = graphlab.text_analytics.count_words(john['text'])
print john['word_count']
obama_word_count_table = obama[['word_count']].stack('word_count', new_column_name = ['word','count'])
john_word_count_table = john[['word_count']].stack('word_count', new_column_name = ['word','count'])
obama_word_count_table.head()
john_word_count_table.head()
obama_word_count_table.sort('count',ascending=False)
john_word_count_table.sort('count',ascending=False)
people['word_count'] = graphlab.text_analytics.count_words(people['text'])
people.head()
tfidf = graphlab.text_analytics.tf_idf(people['word_count'])
tfidf
people['tfidf'] = tfidf['docs']
obama = people[people['name'] == 'Barack Obama']
john = people[people['name'] == 'Elton John']
obama[['tfidf']].stack('tfidf',new_column_name=['word','tfidf']).sort('tfidf',ascending=False)
john[['tfidf']].stack('tfidf',new_column_name=['word','tfidf']).sort('tfidf',ascending=False)
clinton = people[people['name'] == 'Bill Clinton']
paul = people[people['name'] == 'Paul McCartney']
beckham = people[people['name'] == 'Victoria Beckham']
print graphlab.distances.cosine(obama['tfidf'][0],clinton['tfidf'][0])
print graphlab.distances.cosine(obama['tfidf'][0],beckham['tfidf'][0])
print graphlab.distances.cosine(obama['tfidf'][0],beckham['tfidf'][0])
print graphlab.distances.cosine(john['tfidf'][0],beckham['tfidf'][0])
print graphlab.distances.cosine(john['tfidf'][0],paul['tfidf'][0])
knn_model = graphlab.nearest_neighbors.create(people,features=['tfidf'],label='name', distance='cosine')
wc_model = graphlab.nearest_neighbors.create(people,features=['word_count'],label='name', distance='cosine')
knn_model.query(beckham)
print wc_model.query(john)
print knn_model.query(john)
swift = people[people['name'] == 'Taylor Swift']
knn_model.query(swift)
jolie = people[people['name'] == 'Angelina Jolie']
knn_model.query(jolie)
arnold = people[people['name'] == 'Arnold Schwarzenegger']
knn_model.query(arnold)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load some text data - from wikipedia, pages on people
Step2: Data contains
Step3: Explore the dataset and checkout the text it contains
Step4: Exploring the entry for actor George Clooney
Step5: Get the word counts for Obama article
Step6: Sort the word counts for the Obama article
Step7: Sorting the word counts to show most common words at the top
Step8: Most common words include uninformative words like "the", "in", "and",...
Step9: Examine the TF-IDF for the Obama article
Step10: Words with highest TF-IDF are much more informative.
Step11: Is Obama closer to Clinton than to Beckham?
Step12: Build a nearest neighbor model for document retrieval
Step13: Applying the nearest-neighbors model for retrieval
Step14: As we can see, president Obama's article is closest to the one about his vice-president Biden, and those of other politicians.
|
3,839
|
<ASSISTANT_TASK:>
Python Code:
from bokeh.plotting import figure, show, output_notebook
output_notebook()
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# output to static HTML file
# output_file("lines.html", title="line plot example")
# create a new plot with a title and axis labels
p = figure(title="simple line example", x_axis_label='x', y_axis_label='y')
# add a line renderer with legend and line thickness
p.line(x, y, legend="Temp.", line_width=15)
# show the results
show(p)
from bokeh.charts import Histogram
from bokeh.sampledata.autompg import autompg as df
df.sort('cyl', inplace=True)
hist = Histogram(df, values='hp', title="HP Distribution", legend='top_right')
show(hist)
import numpy as np
from bokeh.models import HoverTool, BoxSelectTool
TOOLS = [BoxSelectTool(), HoverTool()]
# create our canvas
p1 = figure(title="HP Distribution", background_fill_color="#E8DDCB", tools=TOOLS)
# stat
hist, edges = np.histogram(df.hp, density=True, bins=50)
# geom
p1.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649")
show(p1)
df.sort('cyl', inplace=True)
hist = Histogram(df, values='hp', color='cyl',
title="HP Distribution by Cylinder Count", legend='top_right')
show(hist)
from bokeh.models import GeoJSONDataSource
from bokeh.plotting import figure
from bokeh.sampledata.sample_geojson import geojson
geo_source = GeoJSONDataSource(geojson=geojson)
p = figure()
p.circle(x='x', y='y', alpha=0.9, source=geo_source)
show(p)
import pandas as pd
# more time/compute intensive to parse dates. but we know we definitely have/need them
df = pd.read_csv('data/sf_listings.csv', parse_dates=['last_review'], infer_datetime_format=True)
df_reviews = pd.read_csv('data/reviews.csv', parse_dates=['date'], infer_datetime_format=True)
# index DataFrame on listing_id in order to join datasets
reindexed_df = df_reviews.set_index('listing_id')
reindexed_df.head()
# remember the original id in a column to group on
df['listing_id'] = df['id']
df_listing = df.set_index('id')
df_listing.head()
# join the listing information with the review information
review_timeseries = df_listing.join(reindexed_df)
print review_timeseries.columns
review_timeseries.head()
# lets try a pivot table...
reviews_over_time = pd.crosstab(review_timeseries.date, review_timeseries.neighbourhood)
reviews_over_time.head()
# smooth by resampling by month
reviews_over_time.resample('M').mean()[['Mission', 'South of Market', 'Noe Valley']].plot(figsize=(12,6))
TOOLS = "pan,wheel_zoom,box_zoom,reset,save,hover"
d = reviews_over_time.resample('M').mean()
p = figure(x_axis_type="datetime", tools=TOOLS)
p.line(d.index, d['Mission'])
show(p)
import bokeh.charts as charts
line = charts.Line(d, y=['Mission', 'South of Market', 'Noe Valley'],
color=['Mission', 'South of Market', 'Noe Valley'],
title="Interpreter Sample Data", ylabel='Duration', legend=True, tools=TOOLS)
show(line)
from bokeh.models.widgets import Select
from bokeh.io import output_file, show, vform
select = Select(title="Option:", value="foo", options=list(reviews_over_time))
show(vform(select))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sanity Check
Step2: Grammar of Graphics
Step3: Customizing Tooltips
Step4: Let's Use some real (interesting) data!
|
3,840
|
<ASSISTANT_TASK:>
Python Code:
# Basic imports
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import sys
from time import time
from sklearn.metrics import r2_score, median_absolute_error
from multiprocessing import Pool
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20.0, 10.0)
%load_ext autoreload
%autoreload 2
sys.path.append('../../')
import recommender.simulator as sim
from utils.analysis import value_eval
from recommender.agent import Agent
from functools import partial
NUM_THREADS = 1
LOOKBACK = 252*2 + 28
STARTING_DAYS_AHEAD = 20
POSSIBLE_FRACTIONS = np.arange(0.0, 1.1, 0.1).round(decimals=3).tolist()
# Get the data
SYMBOL = 'SPY'
total_data_train_df = pd.read_pickle('../../data/data_train_val_df.pkl').stack(level='feature')
data_train_df = total_data_train_df[SYMBOL].unstack()
total_data_test_df = pd.read_pickle('../../data/data_test_df.pkl').stack(level='feature')
data_test_df = total_data_test_df[SYMBOL].unstack()
if LOOKBACK == -1:
total_data_in_df = total_data_train_df
data_in_df = data_train_df
else:
data_in_df = data_train_df.iloc[-LOOKBACK:]
total_data_in_df = total_data_train_df.loc[data_in_df.index[0]:]
# Create many agents
index = np.arange(NUM_THREADS).tolist()
env, num_states, num_actions = sim.initialize_env(total_data_in_df,
SYMBOL,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS)
agents = [Agent(num_states=num_states,
num_actions=num_actions,
random_actions_rate=0.98,
random_actions_decrease=0.999,
dyna_iterations=0,
name='Agent_{}'.format(i)) for i in index]
def show_results(results_list, data_in_df, graph=False):
for values in results_list:
total_value = values.sum(axis=1)
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(total_value))))
print('-'*100)
initial_date = total_value.index[0]
compare_results = data_in_df.loc[initial_date:, 'Close'].copy()
compare_results.name = SYMBOL
compare_results_df = pd.DataFrame(compare_results)
compare_results_df['portfolio'] = total_value
std_comp_df = compare_results_df / compare_results_df.iloc[0]
if graph:
plt.figure()
std_comp_df.plot()
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_in_df['Close'].iloc[STARTING_DAYS_AHEAD:]))))
# Simulate (with new envs, each time)
n_epochs = 4
for i in range(n_epochs):
tic = time()
env.reset(STARTING_DAYS_AHEAD)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL,
agents[0],
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_in_df)
env.reset(STARTING_DAYS_AHEAD)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL, agents[0],
learn=False,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
other_env=env)
show_results([results_list], data_in_df, graph=True)
TEST_DAYS_AHEAD = 20
env.set_test_data(total_data_test_df, TEST_DAYS_AHEAD)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=False,
starting_days_ahead=TEST_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True)
env.set_test_data(total_data_test_df, TEST_DAYS_AHEAD)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=True,
starting_days_ahead=TEST_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True)
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_test_df['Close'].iloc[TEST_DAYS_AHEAD:]))))
import pickle
with open('../../data/simple_q_learner_fast_learner_10_actions.pkl', 'wb') as best_agent:
pickle.dump(agents[0], best_agent)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's show the symbols data, to see how good the recommender has to be.
Step2: Let's run the trained agent, with the test set
Step3: And now a "realistic" test, in which the learner continues to learn from past samples in the test set (it even makes some random moves, though very few).
|
3,841
|
<ASSISTANT_TASK:>
Python Code:
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2022"
import os
import pandas as pd
import torch
from transformers import BertModel, BertTokenizer
from transformers import RobertaModel, RobertaTokenizer
import utils
import vsm
DATA_HOME = os.path.join('data', 'vsmdata')
# Set all the random seeds for reproducibility:
utils.fix_random_seeds()
import logging
logger = logging.getLogger()
logger.level = logging.ERROR
bert_weights_name = 'bert-base-cased'
bert_tokenizer = BertTokenizer.from_pretrained(bert_weights_name)
bert_model = BertModel.from_pretrained(bert_weights_name)
example_text = "Bert knows Snuffleupagus"
bert_tokenizer.tokenize(example_text)
ex_ids = bert_tokenizer.encode(example_text, add_special_tokens=True)
ex_ids
bert_tokenizer.convert_ids_to_tokens(ex_ids)
with torch.no_grad():
reps = bert_model(torch.tensor([ex_ids]), output_hidden_states=True)
reps.last_hidden_state.shape
len(reps.hidden_states)
reps.hidden_states[-1].shape
torch.equal(reps.hidden_states[-1], reps.last_hidden_state)
bert_tokenizer.tokenize('puppy')
vsm.hf_encode("puppy", bert_tokenizer)
bert_tokenizer.tokenize('snuffleupagus')
subtok_ids = vsm.hf_encode("snuffleupagus", bert_tokenizer)
subtok_ids
subtok_reps = vsm.hf_represent(subtok_ids, bert_model, layer=-1)
subtok_reps.shape
subtok_pooled = vsm.mean_pooling(subtok_reps)
subtok_pooled.shape
vsm_index = pd.read_csv(
os.path.join(DATA_HOME, 'yelp_window5-scaled.csv.gz'),
usecols=[0], index_col=0)
vocab = list(vsm_index.index)
vocab[: 5]
%%time
pooled_df = vsm.create_subword_pooling_vsm(
vocab, bert_tokenizer, bert_model, layer=1)
pooled_df.shape
pooled_df.iloc[: 5, :5]
vocab_ids = {w: vsm.hf_encode(w, bert_tokenizer)[0] for w in vocab}
corpus = [
"This is a sailing example",
"It's fun to go sailing!",
"We should go sailing.",
"I'd like to go sailing and sailing",
"This is merely an example"]
corpus_ids = [vsm.hf_encode(text, bert_tokenizer)
for text in corpus]
corpus_reps = [vsm.hf_represent(ids, bert_model, layer=1)
for ids in corpus_ids]
def find_sublist_indices(sublist, mainlist):
indices = []
length = len(sublist)
for i in range(0, len(mainlist)-length+1):
if mainlist[i: i+length] == sublist:
indices.append((i, i+length))
return indices
find_sublist_indices([1,2], [1, 2, 3, 0, 1, 2, 3])
sailing = vocab_ids['sailing']
sailing_reps = []
for ids, reps in zip(corpus_ids, corpus_reps):
offsets = find_sublist_indices(sailing, ids.squeeze(0))
for (start, end) in offsets:
pooled = vsm.mean_pooling(reps[:, start: end])
sailing_reps.append(pooled)
sailing_rep = torch.mean(torch.cat(sailing_reps), axis=0).squeeze(0)
sailing_rep.shape
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Contents
Step2: The transformers library does a lot of logging. To avoid ending up with a cluttered notebook, I am changing the logging level. You might want to skip this as you scale up to building production systems, since the logging is very good – it gives you a lot of insights into what the models and code are doing.
Step3: Loading Transformer models
Step4: There are lots other options for pretrained weights. See this Hugging Face directory.
Step5: The basics of tokenizing
Step6: Simple tokenization
Step7: The encode method maps individual strings to indices into the underlying embedding used by the model
Step8: We can get a better feel for what these representations are like by mapping the indices back to "words"
Step9: Those are all the essential ingredients for working with these parameters in Hugging Face. Of course, the library has a lot of other functionality, but the above suffices for our current application.
Step10: The return value reps is a special transformers class that holds a lot of representations. If we want just the final output representations for each token, we use last_hidden_state
Step11: The shape indicates that our batch has 1 example, with 10 tokens, and each token is represented by a vector of dimensionality 768.
Step12: The length 13 corresponds to the initial embedding layer (layer 0) and the 12 layers of this BERT model.
Step13: The decontextualized approach
Step14: As we saw above, some words map to multiple tokens
Step15: Next, the function vsm.hf_represent will map a batch of ids to their representations in a user-supplied model, at a specified layer in that model
Step16: The shape here
Step17: The function vsm.mean_pooling is simple torch.mean with axis=1. There are also predefined functions vsm.max_pooling, vsm.min_pooling, and vsm.last_pooling (representation for the final token).
Step18: And then we use vsm.create_subword_pooling_vsm
Step19: The result, pooled_df, is a pd.DataFrame with its index given by vocab. This can be used directly in the word relatedness evaluations that are central the homework and associated bakeoff.
Step20: This approach, and the associated code, should work generally for all Hugging Face Transformer-based models. Bommasani et al. (2020) provide a lot of guidance when it comes to how the model, the layer choice, and the pooling function interact.
Step21: Next, let's assume we have a corpus of texts that contain the words of interest
Step22: The following embeds every corpus example, keeping layer=1 representations
Step23: Finally, we define a convenience function for finding all the occurrences of a sublist in a larger list
Step24: For example
Step25: And here's an example using our vocab_ids and corpus
|
3,842
|
<ASSISTANT_TASK:>
Python Code:
from tessfpe.dhu.fpe import FPE
from tessfpe.dhu.unit_tests import check_house_keeping_voltages
fpe1 = FPE(1, debug=False, preload=True, FPE_Wrapper_version='6.1.1')
print fpe1.version
fpe1.cmd_start_frames()
fpe1.cmd_stop_frames()
if check_house_keeping_voltages(fpe1):
print "Wrapper load complete. Interface voltages OK."
from tessfpe.data.operating_parameters import operating_parameters
operating_parameters["heater_1_current"]
fpe1.house_keeping["analogue"]["heater_1_current"]
fpe1.house_keeping["analogue"]["ccd1_input_diode_high"]
fpe1.ops.heater_1_current = fpe1.ops.heater_1_current.low
fpe1.ops.heater_2_current = fpe1.ops.heater_2_current.low
fpe1.ops.heater_3_current = fpe1.ops.heater_3_current.low
fpe1.ops.send()
def set_fpe_defaults(fpe):
"Set the FPE to the default operating parameters and return a list of the default values"
defaults = {}
for k in range(len(fpe.ops.address)):
if fpe.ops.address[k] is None:
continue
fpe.ops.address[k].value = fpe.ops.address[k].default
defaults[fpe.ops.address[k].name] = fpe.ops.address[k].default
return defaults
set_fpe_defaults(fpe1)
operating_parameters["ccd1_output_drain_a_offset"]
#operating_parameters["ccd1_reset_drain"]
fpe1.ops.ccd1_reset_drain = 15
fpe1.ops.ccd1_output_drain_a_offset = 10
fpe1.ops.send()
fpe1.house_keeping["analogue"]["ccd1_output_drain_a"]
#operating_parameters["ccd1_reset_high"]
operating_parameters['ccd1_reset_low_offset']
fpe1.ops.ccd1_reset_high = -10.3
fpe1.ops.ccd1_reset_low_offset = -9.9
fpe1.ops.send()
fpe1.house_keeping["analogue"]["ccd1_reset_low"]
fpe1.cmd_start_frames() # Starts frame generation.
fpe1.cmd_stop_frames() # Stops frame generation.
from tessfpe.data.housekeeping_channels import housekeeping_channels
from tessfpe.data.housekeeping_channels import housekeeping_channel_memory_map
print fpe1.house_keeping
print fpe1.house_keeping["analogue"]
from numpy import var
samples=100
from tessfpe.data.housekeeping_channels import housekeeping_channels
# We make sample_data a dictionary and each value will be a set of HK data, with key = sample_name.
sample_data = {}
# For later:
signal_names = []
signal_values = []
signal_data = {}
variance_values = {}
#my_dict["new key"] = "New value"
for i in range(samples):
# Get a new set of HK values
house_keeping_values = fpe1.house_keeping["analogue"]
data_values = house_keeping_values.values()
# Add the new HK values to the sample_data dictionary:
sample_number = "sample_" + str(i)
sample_data[sample_number] = data_values
# Get the signal names for use later
signal_names = house_keeping_values.keys()
Assign the set of all HK values of the same signal (e.g. substrate_1)
to the dictionary 'signal_data'
for k in range(len(signal_names)):
# Build the list 'signal_values' for this signal:
for i in range(samples):
sample_number = "sample_" + str(i)
signal_values.append(sample_data[sample_number][k])
# Add signal_values to the signal_data dictionary:
signal_data[signal_names[k]] = signal_values
signal_values = []
Now get the variance of each of the 'signal_values' in the
signal_data dictionary and put the result in the 'variance_values'
dictionary.
for name in signal_data:
variance_values[name] = var(signal_data[name])
# print name, str(variance_values[name])
print '{0} {1:<5}'.format(name, variance_values[name])
data = []
for i in range(10):
set_values = {}
for k in range(len(fpe1.ops.address)):
if fpe1.ops.address[k] is None:
continue
low = fpe1.ops.address[k].low
high = fpe1.ops.address[k].high
name = fpe1.ops.address[k].name
set_values[name] = fpe1.ops.address[k].value = low + i / 100. * (high - low)
fpe1.ops.send()
data.append({"set values": set_values,"measured values": fpe1.house_keeping["analogue"]})
print data
print sample_data
v = {}
for name in operating_parameters.keys():
v[name] = operating_parameters[name]
print v[name]["unit"]
print name
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Useful Commands
Step2: Reading a housekeeping value has this form
Step3: Setting an operating parameter has this form
Step4: Setting all the operating parameters to the default values
Step7: Workspace
|
3,843
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
import numpy as np
import thinkstats2
def ReadFemPreg(dct_file='2002FemPreg.dct',
dat_file='2002FemPreg.dat.gz'):
Reads the NSFG pregnancy data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
dct = thinkstats2.ReadStataDct(dct_file)
preg = dct.ReadFixedWidth(dat_file, compression='gzip')
return preg
preg = ReadFemPreg()
print(preg.shape)
preg.head()
preg.agepreg
preg.agepreg /= 100
preg.agepreg.mean()
preg.birthwgt_lb.describe()
preg.birthwgt_lb.value_counts().sort_index()
preg.loc[preg.birthwgt_lb > 20, 'birthwgt_lb'] = np.nan
na_vals = [97, 98, 99]
preg.birthwgt_lb.replace(na_vals, np.nan, inplace=True)
preg.birthwgt_oz.replace(na_vals, np.nan, inplace=True)
preg['totalwgt_lb'] = preg.birthwgt_lb + preg.birthwgt_oz / 16.0
def CleanFemPreg(preg):
Recodes variables from the pregnancy frame.
preg: DataFrame
# mother's age is encoded in centiyears; convert to years
preg.agepreg /= 100.0
# birthwgt_lb contains at least one bogus value (51 lbs)
# replace with NaN
preg.loc[preg.birthwgt_lb > 20, 'birthwgt_lb'] = np.nan
# replace 'not ascertained', 'refused', 'don't know' with NaN
na_vals = [97, 98, 99]
preg.birthwgt_lb.replace(na_vals, np.nan, inplace=True)
preg.birthwgt_oz.replace(na_vals, np.nan, inplace=True)
preg.hpagelb.replace(na_vals, np.nan, inplace=True)
preg.babysex.replace([7, 9], np.nan, inplace=True)
preg.nbrnaliv.replace([9], np.nan, inplace=True)
# birthweight is stored in two columns, lbs and oz.
# convert to a single column in lb
# NOTE: creating a new column requires dictionary syntax,
# not attribute assignment (like preg.totalwgt_lb)
preg['totalwgt_lb'] = preg.birthwgt_lb + preg.birthwgt_oz / 16.0
# due to a bug in ReadStataDct, the last variable gets clipped;
# so for now set it to NaN
preg.cmintvw = np.nan
preg.pregordr.value_counts().sort_index()
assert len(preg) == 13593
assert preg.caseid[13592] == 12571
assert preg.pregordr.value_counts()[1] == 5033
assert preg.nbrnaliv.value_counts()[1] == 8981
assert preg.babysex.value_counts()[1] == 4641
assert preg.birthwgt_lb.value_counts()[7] == 3049
assert preg.birthwgt_oz.value_counts()[0] == 1037
assert preg.prglngth.value_counts()[39] == 4744
assert preg.outcome.value_counts()[1] == 9148
assert preg.birthord.value_counts()[1] == 4413
assert preg.agepreg.value_counts()[22.75] == 100
assert preg.totalwgt_lb.value_counts()[7.5] == 302
weights = preg.finalwgt.value_counts()
key = max(weights.keys())
assert preg.finalwgt.value_counts()[key] == 6
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: The NSFG data is in a fixed-width format, documented in a Stata dictionary file. ReadFemPreg reads the dictionary and then reads the data into a Pandas DataFrame.
Step3: After loading a DataFrame, I check the shape.
Step4: And take a look at the first few rows.
Step5: Then I validate the variables I am likely to need. The encoding of agepreg is non-obvious.
Step6: Which is why you have to read the codebook
Step7: For live births, birthweight is coded as two integers, birthwgt_lb and birthwgt_oz. We can use describe to summarize variables.
Step8: Most of that looks reasonable, but the maximum is 99 lbs! Let's look at the distribution of values
Step9: Consulting the code book, we see that 97, 98, and 99 are sentinel values indicating "not ascertained", "refused", and "don't know" (that is, the respondent did not know).
Step10: And if we don't care about the different sentinel values, we can replace them all with NaN.
Step11: Next, it will be convenient to recode birthwgt_lb and birthwgt_oz with a single floating-point value.
Step13: After testing these cleanings and recodings, we can encapsuate them in a function. As we work with additional variables, we might add more lines to this function.
Step14: The NSFG codebook includes summaries for many variables, which we can use to make sure the data is uncorrupted, and we are interpreting it correctly.
Step15: The distribution of pregordr is consistent with the summary in the codebook.
|
3,844
|
<ASSISTANT_TASK:>
Python Code:
!conda install ipython-sql -y
%load_ext sql
%config SqlMagic.autopandas=True
import pandas as pd
import sqlite3
%%sql sqlite://
CREATE TABLE presidents (first_name, last_name, year_of_birth);
INSERT INTO presidents VALUES ('George', 'Washington', 1732);
INSERT INTO presidents VALUES ('John', 'Adams', 1735);
INSERT INTO presidents VALUES ('Thomas', 'Jefferson', 1743);
INSERT INTO presidents VALUES ('James', 'Madison', 1751);
INSERT INTO presidents VALUES ('James', 'Monroe', 1758);
INSERT INTO presidents VALUES ('Zachary', 'Taylor', 1784);
INSERT INTO presidents VALUES ('Abraham', 'Lincoln', 1809);
INSERT INTO presidents VALUES ('Theodore', 'Roosevelt', 1858);
INSERT INTO presidents VALUES ('Richard', 'Nixon', 1913);
INSERT INTO presidents VALUES ('Barack', 'Obama', 1961);
later_presidents = %sql SELECT * FROM presidents WHERE year_of_birth > 1825
later_presidents
type(later_presidents)
later_presidents
con = sqlite3.connect("presidents.sqlite")
later_presidents.to_sql("presidents", con, if_exists='replace')
con = sqlite3.connect("presidents.sqlite")
cur = con.cursor()
new_dataframe = pd.read_sql(SELECT *
FROM presidents,
con=con)
con.close()
new_dataframe
type(new_dataframe)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: SQL
Step3: Through pandas directly
|
3,845
|
<ASSISTANT_TASK:>
Python Code:
fig,ax=subplots(3,3,figsize=(10, 10))
n=1
for i in range(3):
for j in range(3):
ax[i,j].scatter(X[:,0],X[:,n],c=Y)
n+=1
Xnorm=sklearn.preprocessing.normalize(X)
pca=sklearn.decomposition.PCA()
pca.fit(Xnorm)
fig,ax=subplots(1,3,figsize=(16, 4))
ax[0].scatter(pca.transform(X)[:,0],Y,c=Y)
ax[0].set_xlabel('Componente principal')
ax[0].set_ylabel('Tipo de vino')
ax[1].scatter(pca.transform(X)[:,0],pca.transform(X)[:,1],c=Y)
ax[1].set_xlabel('Componente principal')
ax[1].set_ylabel('segunda componente principal')
ax[2].plot(pca.explained_variance_ratio_)
xlabel('numero componentes')
ylabel('Varianza explicada')
n=50
N=10
treesScore=zeros(n)
for k in range(N):
for i,j in zip(logspace(0,1.5,n),range(n)):
rf = RandomForestClassifier(n_estimators=int(i))
rf.fit(X,Y)
treesScore[j]+=rf.score(X,Y)*1.0/N
plot(logspace(0,2,n),treesScore)
xscale('log')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Con un unico componente se logra explicar casi toda la varianza. Sin embargo incluso con esta separacion en componentes principales, los diferentes tipos de vino se sobrelapan especialmente en el rango de -0.05 a 0.05 en la componente principal. Esto se ve un poco mejor en la figura del medio donde se grafican las dos componentes principales. Si bien se puede distinguir un partron, los componentes principales no separan a los puntos verdes de los rojos. Esto significa que PCA no resulta especialmente util para distinguir entre los distintos tipos de vino y clasificarlos.
|
3,846
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import pudl
from pudl import constants as pc
from pathlib import Path
import pandas as pd
pd.options.display.max_columns = 150
pd.options.display.max_rows = 150
# make notebooks full width
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
pudl_settings = pudl.workspace.setup.get_defaults()
eia923_tables = pc.PUDL_TABLES['eia923']
eia923_years = list(range(2001, 2020))
ds = pudl.workspace.datastore.Datastore(local_cache_path=Path(pudl_settings["data_dir"]))
eia923_extractor = pudl.extract.eia923.Extractor(ds)
dfs = {}
# Lightly altered extractor code (pudl.extract.excel.GenericExtractor.extract) to avoid concatenating prematurely
for partition in pudl.helpers.iterate_multivalue_dict(year=eia923_years):
dfs[partition['year']] = {}
for page in eia923_extractor._metadata.get_all_pages():
if eia923_extractor.excel_filename(page, **partition) == '-1':
continue
newdata = pd.read_excel(
eia923_extractor.load_excel_file(page, **partition),
sheet_name=eia923_extractor._metadata.get_sheet_name(
page, **partition),
skiprows=eia923_extractor._metadata.get_skiprows(page, **partition),
skipfooter=eia923_extractor._metadata.get_skipfooter(
page, **partition),
dtype=eia923_extractor.get_dtypes(page, **partition),
nrows=20
)
newdata = pudl.helpers.simplify_columns(newdata)
newdata = eia923_extractor.process_raw(newdata, page, **partition)
newdata = eia923_extractor.process_renamed(newdata, page, **partition)
dfs[partition['year']][page] = newdata
for k, v in dfs.items():
print(k, v.keys())
# make dataframes of columns. One df per excel sheet, one row per year
from collections import defaultdict
col_dfs = defaultdict(list)
for page in eia923_extractor._metadata.get_all_pages():
for year in eia923_years:
try:
col_dfs[page].append(dfs[year][page].columns.to_frame().rename(columns={0: year}))
except KeyError:
continue
col_dfs = {k : pd.concat(v, axis=1).T for k, v in col_dfs.items()}
col_dfs['plant_frame']
col_dfs['plant_frame'].loc[:,col_dfs['plant_frame'].isna().any()]
a = iter(col_dfs.items())
k,v = next(a)
print(k)
v.loc[:,v.isna().any()]
k,v = next(a)
print(k)
v.loc[:,v.isna().any()]
k,v = next(a)
print(k)
v.loc[:,v.isna().any()]
k,v = next(a)
print(k)
v.loc[:,v.isna().any()]
k,v = next(a)
print(k)
v.loc[:,v.isna().any()]
k,v = next(a)
print(k)
v.loc[:,v.isna().any()]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This shows the state of the columns for each year for each sheet. When a column is introduced (or disappears), that entry will be NaN. This particular page only has data from 2011 on.
Step2: Filter for columns with any NaNs to avoid filling the screen with unchanged columns
Step3: Look at all the dataframes this way
Step4: fuel_receipts_costs has a weird column dropout
Step5: plant_frame has a few ephemeral columns. Again, checking the raw EIA excel sheets shows that they really do only exist for 2011 and 2013
|
3,847
|
<ASSISTANT_TASK:>
Python Code:
import urllib.request
import json
import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from pandas.io.json import json_normalize
def get_data(dataurl, filename):
get data from url then store into jason file in local folder
Args:
dataurl (str): url of the data source.
filename (str): file name you want to store.
with urllib.request.urlopen(dataurl) as url:
data = json.loads(url.read().decode())
with open('raw_data/' + filename, 'w') as outfile:
json.dump(data, outfile)
# source data url and corresponding filename
pageviews_desktop_url = "https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/en.wikipedia.org/desktop/user/monthly/2015070100/2017093000"
pageviews_desktop_filename = "pageviews_desktop_201507-201709.json"
pageviews_mobileapp_url = "https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/en.wikipedia.org/mobile-app/user/monthly/2015070100/2017093000"
pageviews_mobileapp_filename = "pageviews_mobile-app_201507-201709.json"
pageviews_mobileweb_url = "https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/en.wikipedia.org/mobile-web/user/monthly/2015070100/2017093000"
pageviews_mobileweb_filename = "pageviews_mobile-web_201507-201709.json"
pagecounts_desktop_site_url = "https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/en.wikipedia.org/desktop-site/monthly/2008010100/2016073000"
pagecounts_desktop_site_filename = "pagecounts_desktop-site_200801-201607.json"
pagecounts_mobile_site_url = "https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/en.wikipedia.org/mobile-site/monthly/2008010100/2016073000"
pagecounts_mobile_site_filename = "pagecounts_mobile-site_200801-201607.json"
# get data from url then store it into jason file
get_data(pageviews_desktop_url, pageviews_desktop_filename)
get_data(pageviews_mobileapp_url, pageviews_mobileapp_filename)
get_data(pageviews_mobileweb_url, pageviews_mobileweb_filename)
get_data(pagecounts_desktop_site_url, pagecounts_desktop_site_filename)
get_data(pagecounts_mobile_site_url, pagecounts_mobile_site_filename)
# load json files
with open('raw_data/' + pageviews_mobileweb_filename) as data_file:
views_web = json.load(data_file)
with open('raw_data/' + pageviews_mobileapp_filename) as data_file:
views_app = json.load(data_file)
# transform json file to pandas.dataframe
views_web_df = json_normalize(views_web['items'])
views_app_df = json_normalize(views_app['items'])
# seperate timestamp into year and month
views_web_year = views_web_df['timestamp'].str[0:4]
views_web_month = views_web_df['timestamp'].str[4:6]
# combine web views and app views into total views
mobile_total = views_web_df['views'] + views_app_df['views']
# create pageview_mobile_views dataframe
pageview_mobile_views_df = pd.DataFrame({'year' : views_web_year, 'month' : views_web_month,
'pageview_mobile_views':mobile_total,})
pageview_mobile_views_df = pageview_mobile_views_df[['year', 'month', 'pageview_mobile_views']]
pageview_mobile_views_df[:5]
# load json files
with open('raw_data/' + pageviews_desktop_filename) as data_file:
views_desktop = json.load(data_file)
# transform json file to pandas.dataframe
views_desktop_df = json_normalize(views_desktop['items'])
# seperate timestamp into year and month
views_desktop_year = views_desktop_df['timestamp'].str[0:4]
views_desktop_month = views_desktop_df['timestamp'].str[4:6]
# create pageview_desktop_views dataframe
pageview_desktop_views_df = pd.DataFrame({'pageview_desktop_views': views_desktop_df['views'],
'year' : views_desktop_year, 'month' : views_desktop_month})
pageview_desktop_views_df = pageview_desktop_views_df[['year', 'month', 'pageview_desktop_views']]
pageview_desktop_views_df[:5]
# create pageview_all_views dataframe
pageview_all_views_df = pd.DataFrame({'pageview_all_views': pageview_desktop_views_df['pageview_desktop_views']
+ pageview_mobile_views_df['pageview_mobile_views'],
'year' : views_desktop_year, 'month' : views_desktop_month})
pageview_all_views_df = pageview_all_views_df[['year', 'month', 'pageview_all_views']]
pageview_all_views_df[:5]
# load json files
with open('raw_data/' + pagecounts_mobile_site_filename) as data_file:
counts_mobile = json.load(data_file)
# transform json file to pandas.dataframe
counts_mobile_df = json_normalize(counts_mobile['items'])
# seperate timestamp into year and month
counts_mobile_year = counts_mobile_df['timestamp'].str[0:4]
counts_mobile_month = counts_mobile_df['timestamp'].str[4:6]
# create pageview_desktop_views dataframe
pagecount_mobile_views_df = pd.DataFrame({'pagecount_mobile_views': counts_mobile_df['count'],
'year' : counts_mobile_year, 'month' : counts_mobile_month})
pagecount_mobile_views_df = pagecount_mobile_views_df[['year', 'month', 'pagecount_mobile_views']]
pagecount_mobile_views_df[:5]
with open('raw_data/' + pagecounts_desktop_site_filename) as data_file:
counts_desktop = json.load(data_file)
# transform json file to pandas.dataframe
counts_desktop_df = json_normalize(counts_desktop['items'])
# seperate timestamp into year and month
counts_desktop_year = counts_desktop_df['timestamp'].str[0:4]
counts_desktop_month = counts_desktop_df['timestamp'].str[4:6]
# create pageview_desktop_views dataframe
pagecount_desktop_views_df = pd.DataFrame({'pagecount_desktop_views': counts_desktop_df['count'],
'year' : counts_desktop_year, 'month' : counts_desktop_month})
pagecount_desktop_views_df = pagecount_desktop_views_df[['year', 'month', 'pagecount_desktop_views']]
pagecount_desktop_views_df[:5]
# create pageview_all_views dataframe
pagecount_all_views_df = pd.merge(pagecount_desktop_views_df, pagecount_mobile_views_df, how='left', on=['year', 'month'])
pagecount_all_views_df.fillna(0, inplace=True)
pagecount_all_views_df['pagecount_desktop_views'] += pagecount_all_views_df['pagecount_mobile_views']
pagecount_all_views_df = pagecount_all_views_df.rename(columns = {'pagecount_desktop_views':'pagecount_all_views'})
del pagecount_all_views_df['pagecount_mobile_views']
pagecount_all_views_df.pagecount_all_views = pagecount_all_views_df.pagecount_all_views.astype(int)
pagecount_all_views_df = pagecount_all_views_df[['year', 'month', 'pagecount_all_views']]
pagecount_all_views_df[:5]
df_list = [pagecount_all_views_df, pagecount_mobile_views_df, pagecount_desktop_views_df,
pageview_all_views_df, pageview_mobile_views_df, pageview_desktop_views_df]
df = df_list[0]
# merge each dataframe
for i in range(1, len(df_list)):
df = pd.merge(df, df_list[i], how='left', on=['year', 'month'])
# replace NAN into 0
df.fillna(0, inplace=True)
# set column type to int
df.pagecount_mobile_views = df.pagecount_mobile_views.astype(int)
df.pageview_all_views = df.pageview_all_views.astype(int)
df.pageview_mobile_views = df.pageview_mobile_views.astype(int)
df.pageview_desktop_views = df.pageview_desktop_views.astype(int)
df[:5]
df.to_csv('cleaned_data/' + 'en-wikipedia_traffic_200801-201709.csv', sep=',')
df = df.replace(0, np.nan)
date = pd.to_datetime(df.year + df.month, format = '%Y%m')
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
fmt = mdates.DateFormatter('%Y')
# make plot
fig, ax = plt.subplots()
ax.plot(date, df['pagecount_mobile_views'], color = 'green', linestyle = '--', alpha=0.7)
ax.plot(date, df['pagecount_desktop_views'], color = 'blue', linestyle = '--', alpha=0.7)
ax.plot(date, df['pagecount_all_views'], color = 'red', linestyle = '--', alpha=0.7)
ax.plot(date, df['pageview_all_views'], color = 'red', alpha=0.7)
ax.plot(date, df['pageview_mobile_views'], color = 'green', alpha=0.7)
ax.plot(date, df['pageview_desktop_views'], color = 'blue', alpha=0.7)
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_minor_locator(months)
datemin = datetime.date(date.min().year, 1, 1)
datemax = datetime.date(date.max().year + 1, 1, 1)
ax.set_xlim(datemin, datemax)
fig.autofmt_xdate()
# set plot
fig.set_size_inches(13, 8)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, prop={'size': 15}, borderaxespad=0.)
fig.suptitle('Page Views on English Wikipedia', fontsize=20)
ax.set_xlabel('Year')
ax.set_ylabel('Page Views')
ax.xaxis.label.set_size(15)
ax.yaxis.label.set_size(15)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step 1
Step2: Then I write a function to get scource data from url and then store data into jason files. The inputs of the function are source data url and corresponding file name you want to store.
Step3: Next, I generate url variables and corresponding file name variables to put into above function.
Step4: Last, we just need to call the function. We get the data from url and then store it into jason file under data folder.
Step5: Step 2
Step6: (2) process pageview_desktop_views
Step7: (3) process pageview_all_views
Step8: (4) process pagecount_mobile_views
Step9: (5) process pagecount_desktop_views
Step10: (6) process pagecount_all_views
Step11: (7) merge all dataframes
Step12: (8) output csv file
Step13: Step 3
|
3,848
|
<ASSISTANT_TASK:>
Python Code:
!wget -O - 'http://www.cs.nyu.edu/~roweis/data/nips12raw_str602.tgz' > /tmp/nips12raw_str602.tgz
import tarfile
filename = '/tmp/nips12raw_str602.tgz'
tar = tarfile.open(filename, 'r:gz')
for item in tar:
tar.extract(item, path='/tmp')
import os, re
# Folder containing all NIPS papers.
data_dir = '/tmp/nipstxt/' # Set this path to the data on your machine.
# Folders containin individual NIPS papers.
yrs = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
dirs = ['nips' + yr for yr in yrs]
# Get all document texts and their corresponding IDs.
docs = []
doc_ids = []
for yr_dir in dirs:
files = os.listdir(data_dir + yr_dir) # List of filenames.
for filen in files:
# Get document ID.
(idx1, idx2) = re.search('[0-9]+', filen).span() # Matches the indexes of the start end end of the ID.
doc_ids.append(yr_dir[4:] + '_' + str(int(filen[idx1:idx2])))
# Read document text.
# Note: ignoring characters that cause encoding errors.
with open(data_dir + yr_dir + '/' + filen, errors='ignore', encoding='utf-8') as fid:
txt = fid.read()
# Replace any whitespace (newline, tabs, etc.) by a single space.
txt = re.sub('\s', ' ', txt)
docs.append(txt)
filenames = [data_dir + 'idx/a' + yr + '.txt' for yr in yrs] # Using the years defined in previous cell.
# Get all author names and their corresponding document IDs.
author2doc = dict()
i = 0
for yr in yrs:
# The files "a00.txt" and so on contain the author-document mappings.
filename = data_dir + 'idx/a' + yr + '.txt'
for line in open(filename, errors='ignore', encoding='utf-8'):
# Each line corresponds to one author.
contents = re.split(',', line)
author_name = (contents[1] + contents[0]).strip()
# Remove any whitespace to reduce redundant author names.
author_name = re.sub('\s', '', author_name)
# Get document IDs for author.
ids = [c.strip() for c in contents[2:]]
if not author2doc.get(author_name):
# This is a new author.
author2doc[author_name] = []
i += 1
# Add document IDs to author.
author2doc[author_name].extend([yr + '_' + id for id in ids])
# Use an integer ID in author2doc, instead of the IDs provided in the NIPS dataset.
# Mapping from ID of document in NIPS datast, to an integer ID.
doc_id_dict = dict(zip(doc_ids, range(len(doc_ids))))
# Replace NIPS IDs by integer IDs.
for a, a_doc_ids in author2doc.items():
for i, doc_id in enumerate(a_doc_ids):
author2doc[a][i] = doc_id_dict[doc_id]
import spacy
nlp = spacy.load('en')
%%time
processed_docs = []
for doc in nlp.pipe(docs, n_threads=4, batch_size=100):
# Process document using Spacy NLP pipeline.
ents = doc.ents # Named entities.
# Keep only words (no numbers, no punctuation).
# Lemmatize tokens, remove punctuation and remove stopwords.
doc = [token.lemma_ for token in doc if token.is_alpha and not token.is_stop]
# Remove common words from a stopword list.
#doc = [token for token in doc if token not in STOPWORDS]
# Add named entities, but only if they are a compound of more than word.
doc.extend([str(entity) for entity in ents if len(entity) > 1])
processed_docs.append(doc)
docs = processed_docs
del processed_docs
# Compute bigrams.
from gensim.models import Phrases
# Add bigrams and trigrams to docs (only ones that appear 20 times or more).
bigram = Phrases(docs, min_count=20)
for idx in range(len(docs)):
for token in bigram[docs[idx]]:
if '_' in token:
# Token is a bigram, add to document.
docs[idx].append(token)
# Create a dictionary representation of the documents, and filter out frequent and rare words.
from gensim.corpora import Dictionary
dictionary = Dictionary(docs)
# Remove rare and common tokens.
# Filter out words that occur too frequently or too rarely.
max_freq = 0.5
min_wordcount = 20
dictionary.filter_extremes(no_below=min_wordcount, no_above=max_freq)
_ = dictionary[0] # This sort of "initializes" dictionary.id2token.
# Vectorize data.
# Bag-of-words representation of the documents.
corpus = [dictionary.doc2bow(doc) for doc in docs]
print('Number of authors: %d' % len(author2doc))
print('Number of unique tokens: %d' % len(dictionary))
print('Number of documents: %d' % len(corpus))
from gensim.models import AuthorTopicModel
%time model = AuthorTopicModel(corpus=corpus, num_topics=10, id2word=dictionary.id2token, \
author2doc=author2doc, chunksize=2000, passes=1, eval_every=0, \
iterations=1, random_state=1)
%%time
model_list = []
for i in range(5):
model = AuthorTopicModel(corpus=corpus, num_topics=10, id2word=dictionary.id2token, \
author2doc=author2doc, chunksize=2000, passes=100, gamma_threshold=1e-10, \
eval_every=0, iterations=1, random_state=i)
top_topics = model.top_topics(corpus)
tc = sum([t[1] for t in top_topics])
model_list.append((model, tc))
model, tc = max(model_list, key=lambda x: x[1])
print('Topic coherence: %.3e' %tc)
# Save model.
model.save('/tmp/model.atmodel')
# Load model.
model = AuthorTopicModel.load('/tmp/model.atmodel')
model.show_topic(0)
topic_labels = ['Circuits', 'Neuroscience', 'Numerical optimization', 'Object recognition', \
'Math/general', 'Robotics', 'Character recognition', \
'Reinforcement learning', 'Speech recognition', 'Bayesian modelling']
for topic in model.show_topics(num_topics=10):
print('Label: ' + topic_labels[topic[0]])
words = ''
for word, prob in model.show_topic(topic[0]):
words += word + ' '
print('Words: ' + words)
print()
model['YannLeCun']
from pprint import pprint
def show_author(name):
print('\n%s' % name)
print('Docs:', model.author2doc[name])
print('Topics:')
pprint([(topic_labels[topic[0]], topic[1]) for topic in model[name]])
show_author('YannLeCun')
show_author('GeoffreyE.Hinton')
show_author('TerrenceJ.Sejnowski')
show_author('ChristofKoch')
from gensim.models import atmodel
doc2author = atmodel.construct_doc2author(model.corpus, model.author2doc)
# Compute the per-word bound.
# Number of words in corpus.
corpus_words = sum(cnt for document in model.corpus for _, cnt in document)
# Compute bound and divide by number of words.
perwordbound = model.bound(model.corpus, author2doc=model.author2doc, \
doc2author=model.doc2author) / corpus_words
print(perwordbound)
%time top_topics = model.top_topics(model.corpus)
%%time
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
smallest_author = 0 # Ignore authors with documents less than this.
authors = [model.author2id[a] for a in model.author2id.keys() if len(model.author2doc[a]) >= smallest_author]
_ = tsne.fit_transform(model.state.gamma[authors, :]) # Result stored in tsne.embedding_
# Tell Bokeh to display plots inside the notebook.
from bokeh.io import output_notebook
output_notebook()
from bokeh.models import HoverTool
from bokeh.plotting import figure, show, ColumnDataSource
x = tsne.embedding_[:, 0]
y = tsne.embedding_[:, 1]
author_names = [model.id2author[a] for a in authors]
# Radius of each point corresponds to the number of documents attributed to that author.
scale = 0.1
author_sizes = [len(model.author2doc[a]) for a in author_names]
radii = [size * scale for size in author_sizes]
source = ColumnDataSource(
data=dict(
x=x,
y=y,
author_names=author_names,
author_sizes=author_sizes,
radii=radii,
)
)
# Add author names and sizes to mouse-over info.
hover = HoverTool(
tooltips=[
("author", "@author_names"),
("size", "@author_sizes"),
]
)
p = figure(tools=[hover, 'crosshair,pan,wheel_zoom,box_zoom,reset,save,lasso_select'])
p.scatter('x', 'y', radius='radii', source=source, fill_alpha=0.6, line_color=None)
show(p)
from gensim.similarities import MatrixSimilarity
# Generate a similarity object for the transformed corpus.
index = MatrixSimilarity(model[list(model.id2author.values())])
# Get similarities to some author.
author_name = 'YannLeCun'
sims = index[model[author_name]]
# Make a function that returns similarities based on the Hellinger distance.
from gensim import matutils
import pandas as pd
# Make a list of all the author-topic distributions.
author_vecs = [model.get_author_topics(author) for author in model.id2author.values()]
def similarity(vec1, vec2):
'''Get similarity between two vectors'''
dist = matutils.hellinger(matutils.sparse2full(vec1, model.num_topics), \
matutils.sparse2full(vec2, model.num_topics))
sim = 1.0 / (1.0 + dist)
return sim
def get_sims(vec):
'''Get similarity of vector to all authors.'''
sims = [similarity(vec, vec2) for vec2 in author_vecs]
return sims
def get_table(name, top_n=10, smallest_author=1):
'''
Get table with similarities, author names, and author sizes.
Return `top_n` authors as a dataframe.
'''
# Get similarities.
sims = get_sims(model.get_author_topics(name))
# Arrange author names, similarities, and author sizes in a list of tuples.
table = []
for elem in enumerate(sims):
author_name = model.id2author[elem[0]]
sim = elem[1]
author_size = len(model.author2doc[author_name])
if author_size >= smallest_author:
table.append((author_name, sim, author_size))
# Make dataframe and retrieve top authors.
df = pd.DataFrame(table, columns=['Author', 'Score', 'Size'])
df = df.sort_values('Score', ascending=False)[:top_n]
return df
get_table('YannLeCun')
get_table('JamesM.Bower', smallest_author=3)
%time model_ser = AuthorTopicModel(corpus=corpus, num_topics=10, id2word=dictionary.id2token, \
author2doc=author2doc, random_state=1, serialized=True, \
serialization_path='/tmp/model_serialization.mm')
# Delete the file, once you're done using it.
import os
os.remove('/tmp/model_serialization.mm')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the following sections we will load the data, pre-process it, train the model, and explore the results using some of the implementation's functionality. Feel free to skip the loading and pre-processing for now, if you are familiar with the process.
Step2: Construct a mapping from author names to document IDs.
Step3: Pre-processing text
Step4: In the code below, Spacy takes care of tokenization, removing non-alphabetic characters, removal of stopwords, lemmatization and named entity recognition.
Step5: Below, we use a Gensim model to add bigrams. Note that this achieves the same goal as named entity recognition, that is, finding adjacent words that have some particular significance.
Step6: Now we are ready to construct a dictionary, as our vocabulary is finalized. We then remove common words (occurring $> 50\%$ of the time), and rare words (occur $< 20$ times in total).
Step7: We produce the vectorized representation of the documents, to supply the author-topic model with, by computing the bag-of-words.
Step8: Let's inspect the dimensionality of our data.
Step9: Train and use model
Step10: If you believe your model hasn't converged, you can continue training using model.update(). If you have additional documents and/or authors call model.update(corpus, author2doc).
Step11: Choose the model with the highest topic coherence.
Step12: We save the model, to avoid having to train it again, and also show how to load it again.
Step13: Explore author-topic representation
Step14: Below, we have given each topic a label based on what each topic seems to be about intuitively.
Step15: Rather than just calling model.show_topics(num_topics=10), we format the output a bit so it is easier to get an overview.
Step16: These topics are by no means perfect. They have problems such as chained topics, intruded words, random topics, and unbalanced topics (see Mimno and co-authors 2011). They will do for the purposes of this tutorial, however.
Step17: Let's print the top topics of some authors. First, we make a function to help us do this more easily.
Step18: Below, we print some high profile researchers and inspect them. Three of these, Yann LeCun, Geoffrey E. Hinton and Christof Koch, are spot on.
Step19: Simple model evaluation methods
Step20: Now let's evaluate the per-word bound.
Step21: We can evaluate the quality of the topics by computing the topic coherence, as in the LDA class. Use this to e.g. find out which of the topics are poor quality, or as a metric for model selection.
Step22: Plotting the authors
Step23: We are now ready to make the plot.
Step24: The circles in the plot above are individual authors, and their sizes represent the number of documents attributed to the corresponding author. Hovering your mouse over the circles will tell you the name of the authors and their sizes. Large clusters of authors tend to reflect some overlap in interest.
Step25: However, this framework uses the cosine distance, but we want to use the Hellinger distance. The Hellinger distance is a natural way of measuring the distance (i.e. dis-similarity) between two probability distributions. Its discrete version is defined as
Step26: Now we can find the most similar authors to some particular author. We use the Pandas library to print the results in a nice looking tables.
Step27: As before, we can specify the minimum author size.
Step28: Serialized corpora
|
3,849
|
<ASSISTANT_TASK:>
Python Code:
file_path = '../data/2011.0.00419.S/sg_ouss_id/group_ouss_id/member_ouss_2013-03-06_id/product/IRAS16547-4247_Jet_CS_v1_7-6.clean.fits'
noise_pixel = (15, 4)
train_pixels = [(133, 135),(134, 135),(133, 136),(134, 136)]
img = fits.open(file_path)
meta = img[0].data
hdr = img[0].header
# V axis
naxisv = hdr['NAXIS3']
onevpix = hdr['CDELT3']*0.000001
v0 = hdr['CRVAL3']*0.000001
v0pix = int(hdr['CRPIX3'])
vaxis = onevpix * (np.arange(naxisv)+1-v0pix) + v0
values = meta[0, :, train_pixels[0][0], train_pixels[0][1]] - np.mean(meta[0, :, train_pixels[0][0], train_pixels[0][1]])
values = values/np.max(values)
plt.plot(vaxis, values)
plt.xlim(np.min(vaxis), np.max(vaxis))
plt.ylim(-1, 1)
gca().xaxis.set_major_formatter(FormatStrFormatter('%d'))
noise = meta[0, :, noise_pixel[0], noise_pixel[1]] - np.mean(meta[0, :, noise_pixel[0], noise_pixel[1]])
noise = noise/np.max(noise)
plt.plot(vaxis, noise)
plt.ylim(-1, 1)
plt.xlim(np.min(vaxis), np.max(vaxis))
gca().xaxis.set_major_formatter(FormatStrFormatter('%d'))
cube_params = {
'freq' : vaxis[naxisv/2],
'alpha' : 0,
'delta' : 0,
'spe_bw' : naxisv*onevpix,
'spe_res' : onevpix*v0pix,
's_f' : 4,
's_a' : 0}
dictionary = gen_all_words(cube_params, True)
prediction = pd.DataFrame([])
for train_pixel in train_pixels:
dictionary_recal, detected_peaks = recal_words(file_path, dictionary, cube_params,
train_pixel, noise_pixel)
X = get_values_filtered_normalized(file_path, train_pixel, cube_params)
y_train = get_fortran_array(np.asmatrix(X))
dictionary_recal_fa = np.asfortranarray(dictionary_recal,
dtype= np.double)
lambda_param = 0
for idx in range(0, len(detected_peaks)):
if detected_peaks[idx] != 0:
lambda_param += 1
param = {
'lambda1' : lambda_param,
# 'L': 1,
'pos' : True,
'mode' : 0,
'ols' : True,
'numThreads' : -1}
alpha = spams.lasso(y_train, dictionary_recal_fa, **param).toarray()
total = np.inner(dictionary_recal_fa, alpha.T)
for i in range(0, len(alpha)):
iso_col = dictionary_recal.columns[i]
if(not prediction.columns.isin([iso_col]).any()):
prediction[iso_col] = alpha[i]
else:
prediction[iso_col] = prediction[iso_col]*alpha[i]
for p in prediction.columns:
if(prediction[p][0] != 0):
print(prediction[p])
pylab.rcParams['figure.figsize'] = (15, 15)
# Step 1: Read Cube
ax = plt.subplot(6, 1, 1)
ax.set_title('i) Raw Spectra Data')
data = get_data_from_fits(file_path)
y = data[0, :, train_pixel[0], train_pixel[1]]
plt.xticks([])
plt.plot(vaxis, y)
lines = get_lines_from_fits(file_path)
for line in lines:
# Shows lines really present
isotope_frequency = int(line[1])
isotope_name = line[0] + "-f" + str(line[1])
plt.axvline(x=isotope_frequency, ymin=0, ymax= 3, color='g')
# 2. Normalize, filter dada
ax = plt.subplot(6, 1, 2)
ax.set_title('ii) Normalized/Filtered Data')
plt.ylim(ymin =0,ymax = 1.15)
y = get_values_filtered_normalized(file_path, train_pixel, cube_params)
plt.xticks([])
plt.plot(vaxis, y)
# 3. Possible Words
ax = plt.subplot(6, 1, 3)
ax.set_title('iii) Theoretical Dictionary')
plt.ylim(ymin =0,ymax = 1.15)
plt.xticks([])
plt.plot(vaxis, dictionary)
# 4. Detect Lines
ax = plt.subplot(6, 1, 4)
ax.set_title('iv) Detection of Candidate Lines')
plt.ylim(ymin =0,ymax = 1.15)
plt.plot(vaxis, y)
plt.xticks([])
plt.ylabel("Temperature")
for idx in range(0, len(detected_peaks)):
if detected_peaks[idx] != 0:
plt.axvline(x=vaxis[idx], ymin=0, ymax= 1, color='r')
# 6. Recalibrate Dictionary
ax = plt.subplot(6, 1, 5)
ax.set_title('v) Recalibration of Dictonary')
plt.ylim(ymin =0,ymax = 1.15)
plt.plot(vaxis, dictionary_recal_fa)
plt.xticks([])
# 6. Recover Signal
ax = plt.subplot(6, 1, 6)
ax.set_title('2) Reconstructed Signal')
plt.ylim(ymin =0,ymax = 1.15)
plt.plot(vaxis, total)
gca().xaxis.set_major_formatter(FormatStrFormatter('%d'))
for i in range(0, len((results[0] > 0))):
if((results[0] > 0)[i]):
print(dictionary_recal.columns[i])
print(i)
for i in range(0, len(dictionary.index)):
print(calculate_probability(alpha, dictionary.index[i], dictionary_recal))
print(dictionary.index[i])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creation of Dictionary
Step2: Recalibration of Dictionary
|
3,850
|
<ASSISTANT_TASK:>
Python Code:
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
# Importando os pacotes
import numpy as np
import pandas as pd
import matplotlib as mat
import matplotlib.pyplot as plt
import colorsys
plt.style.use('seaborn-talk')
import warnings
warnings.filterwarnings("ignore")
%matplotlib inline
np.__version__
pd.__version__
mat.__version__
# Carregando o dataset
df = pd.read_csv("Dados-Pesquisa.csv", sep = ',', low_memory=False)
print(df.head())
df
print(df.describe())
# Lista todas as colunas
list(df)
# Qual a distribuição de idade dos participantes da pesquisa?
# A maioria dos profissionais que trabalham como programadores de
# software estão na faixa de idade entre 20 e 30 anos, sendo 25 anos
# a idade mais frequente.
# Gerando um histograma
df.Age.hist(bins = 60)
plt.xlabel("Idade")
plt.ylabel("Número de Profissionais")
plt.title("Distribuição de Idade")
plt.show()
# Qual é a distribuição de sexo dos participantes da pesquisa?
# A grande maioria dos programadores é do sexo masculino
# Definindo a quantidade
labels = df.Gender.value_counts().index
num = len(df.EmploymentField.value_counts().index)
# Criando a lista de cores
listaHSV = [(x*1.0/num, 0.5, 0.5) for x in range(num)]
listaRGB = list(map(lambda x: colorsys.hsv_to_rgb(*x), listaHSV))
# Gráfico de Pizza
fatias, texto = plt.pie(df.Gender.value_counts(), colors = listaRGB, startangle = 90)
#plt.axes().set_aspect('equal', 'datalim')
plt.legend(fatias, labels, bbox_to_anchor = (1.05,1))
plt.title("Sexo")
plt.show()
# Quais sãos os principais interesses dos participantes da pesquisa?
# O principal interesse profissional dos programadores é o desenvolvimento web (Full-Stack, Front-End e Back-End),
# seguido pela área de Data Science.
# Definindo a quantidade
num = len(df.JobRoleInterest.value_counts().index)
# Criando a lista de cores
listaHSV = [(x*1.0/num, 0.5, 0.5) for x in range(num)]
listaRGB = list(map(lambda x: colorsys.hsv_to_rgb(*x), listaHSV))
labels = df.JobRoleInterest.value_counts().index
colors = ['OliveDrab', 'Orange', 'OrangeRed', 'DarkCyan', 'Salmon', 'Sienna', 'Maroon', 'LightSlateGrey', 'DimGray']
# Gráfico de Pizza
fatias, texto = plt.pie(df.JobRoleInterest.value_counts(), colors = listaRGB, startangle = 90)
#plt.axes().set_aspect('equal', 'datalim')
plt.legend(fatias, labels, bbox_to_anchor = (1.25, 1))
plt.title("Interesse Profissional")
plt.show()
# Quais as áreas de negócio em que os participantes da pesquisa trabalham?
# A maioria dos programadores trabalha na área de desenvolvimento de
# softwares e TI, mas outras áreas como finanças e saúde também são
# significativas.
# Definindo a quantidade
num = len(df.EmploymentField.value_counts().index)
# Criando a lista de cores
listaHSV = [(x*1.0/num, 0.5, 0.5) for x in range(num)]
listaRGB = list(map(lambda x: colorsys.hsv_to_rgb(*x), listaHSV))
labels = df.EmploymentField.value_counts().index
# Gráfico de Pizza
fatias, texto = plt.pie(df.EmploymentField.value_counts(), colors = listaRGB, startangle = 90)
#plt.axes().set_aspect('equal', 'datalim')
plt.legend(fatias, labels, bbox_to_anchor = (1.3, 1))
plt.title("Área de trabalho Atual")
plt.show()
# Quais são as preferências de trabalho por idade?
# Perceba que à medida que a idade aumenta, o interesse por trabalho
# freelance também aumenta, sendo o modelo preferido por profissionais
# acima de 60 anos. Profissionais mais jovens preferem trabalhar em
# Startups ou no seu próprio negócio. Profissionais entre 20 e 50 anos
# preferem trabalhar em empresas de tamanho médio.
# Agrupando os dados
df_ageranges = df.copy()
bins=[0, 20, 30, 40, 50, 60, 100]
df_ageranges['AgeRanges'] = pd.cut(df_ageranges['Age'],
bins,
labels=["< 20", "20-30", "30-40", "40-50", "50-60", "< 60"])
df2 = pd.crosstab(df_ageranges.AgeRanges,
df_ageranges.JobPref).apply(lambda r: r/r.sum(), axis=1)
# Definindo a quantidade
num = len(df_ageranges.AgeRanges.value_counts().index)
# Criando a lista de cores
listaHSV = [(x*1.0/num, 0.5, 0.5) for x in range(num)]
listaRGB = list(map(lambda x: colorsys.hsv_to_rgb(*x), listaHSV))
# Gráfico de Barras (Stacked)
ax1 = df2.plot(kind = "bar", stacked = True, color = listaRGB, title = "Preferência de Trabalho por Idade")
lines, labels = ax1.get_legend_handles_labels()
ax1.legend(lines, labels, bbox_to_anchor = (1.51, 1))
# Visualizando o help
help(pd.crosstab)
# Qual o objetivo de realocação?
# A vontade de buscar um novo emprego diminui com a idade.
# Quase 80% das pessoas abaixo dos 30 anos estão preparadas para isso.
# Agrupando os dados
df3 = pd.crosstab(df_ageranges.AgeRanges,
df_ageranges.JobRelocateYesNo).apply(lambda r: r/r.sum(), axis = 1)
# Definindo a quantidade
num = len(df_ageranges.AgeRanges.value_counts().index)
# Criando a lista de cores
listaHSV = [(x*1.0/num, 0.5, 0.5) for x in range(num)]
listaRGB = list(map(lambda x: colorsys.hsv_to_rgb(*x), listaHSV))
# Gráfico de Barras (Stacked)
ax1 = df3.plot(kind = "bar", stacked = True, color = listaRGB, title = "Realocação por Idade")
lines, labels = ax1.get_legend_handles_labels()
ax1.legend(lines,["Não", "Sim"], loc = 'best')
# Qual a relação entre idade e horas de aprendizagem?
# A idade dos profissionais não afeta a quantidade de tempo gasto com capacitação e treinamento.
import warnings
warnings.filterwarnings('ignore')
# Criando subset dos dados
df9 = df.copy()
df9 = df9.dropna(subset=["HoursLearning"])
df9 = df9[df['Age'].isin(range(0,70))]
# Definindo os valores de x e y
x = df9.Age
y = df9.HoursLearning
# Computando os valores e gerando o gráfico
m, b = np.polyfit(x, y, 1)
plt.plot(x, y, '.')
plt.plot(x, m*x + b, '-', color = "red")
plt.xlabel("Idade")
plt.ylabel("Horas de Treinamento")
plt.title("Idade por Horas de Treinamento")
plt.show()
# Qual a relação entre investimento em capacitação e expectativa salarial?
# Os profissionais que investem tempo e dinheiro em capacitação e
# treinamento, em geral, conseguem salários mais altos, embora alguns
# profisisonais esperem altos salários, investindo 0 em treinamento.
import warnings
warnings.filterwarnings('ignore')
# Criando subset dos dados
df5 = df.copy()
df5 = df5.dropna(subset=["ExpectedEarning"])
df5 = df5[df['MoneyForLearning'].isin(range(0,60000))]
# Definindo os valores de x e y
x = df5.MoneyForLearning
y = df5.ExpectedEarning
# Computando os valores e gerando o gráfico
m, b = np.polyfit(x, y, 1)
plt.plot(x, y, '.')
plt.plot(x, m*x + b, '-', color = "red")
plt.xlabel("Investimento em Treinamento")
plt.ylabel("Expectativa Salarial")
plt.title("Investimento em Treinamento vs Expectativa Salarial")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Análise Exploratória de Dados
Step2: Distribuição de Idade
Step3: Distribuição de Sexo
Step4: Distribuição de Interesses
Step5: Distribuição de Empregabilidade
Step6: Preferências de Trabalho por Idade
Step7: Realocação por Idade
Step8: Idade x Horas de Aprendizagem
Step9: Investimento em Capacitação x Expectativa Salarial
|
3,851
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
x = np.array([1, 4, 7, 11, 34, -3, 5, 7, 5, 2, 3, 13])
###-------###
x[::2]
x[:-1] + x[1:]
x[1:] - x[:-1]
x[::2] / x[1::2]
N = len(x)
x[:N // 2] * x[N // 2:]
import numpy.linalg as linalg
A = np.array([ [3, 2, -1], [6, 4, -2], [5, 0, 3]])
B = np.array([ [2, 3, 2], [3, -4, -2], [4, -1, 1]])
linalg.matrix_rank(A)
A @ B
eig_vals, eig_vecs = linalg.eig(B)
eig_vecs[:,1]
b = np.array([14, -1, 11])
linalg.solve(B, b)
from scipy.integrate import quad
ans, err = quad(lambda x: np.tan(x), 0, np.pi / 4)
print(f'{ans:.4}')
ans, err = quad(lambda x: 3*x**3, -2, 0)
print(f'{ans:.4}')
ans, err = quad(lambda x: 4 * x**2 - x, 0, 3)
print(f'{ans:.4}')
ans, err = quad(lambda x: np.sin(x**2), -2, 2)
print(f'{ans:.4}')
def fxn(x):
if abs(x) < 2:
return x ** 2
elif x > 2:
return 4
else:
x
npfxn = np.vectorize(fxn)
#remove 1 point if they put lambda x: npfxn(x) instead of just the name.
ans, err = quad(npfxn, 0, 3)
print(f'{ans:.4}')
def integrand(x):
ans, err = quad(lambda y: np.sin(x * y), -2* x, x)
return ans
ans, err = quad(integrand, 0, 1)
print(f'{ans:.4}')
x = [-0.42,1.34,1.6,2.65,3.53,4.48,5.48,6.21,7.49,8.14,8.91,10.1]
y = [1.58,1.61,2.04,5.47,9.8,16.46,25.34,33.32,49.7,58.79,71.26,93.34]
x = np.array(x)
y = np.array(y)
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(x,y, 'o-')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
forward = (y[1:] - y[:-1]) / (x[1:] - x[:-1])
diff = 0.5 * (forward[1:] + forward[:-1])
print(np.round(diff, decimals=2))
integral = np.sum( y[:-1] * (x[1:] - x[:-1]))
print(f'{integral:.2f}')
integral = np.sum( (y[:1] + y[:-1]) * (x[1:] - x[:-1])) / 2
print(f'{integral:.2f}')
import scipy.stats as ss
def conf_interval(samples, confidence_level=0.95, sigma=None):
'''
computes confidence interval given samples
Parameters
---------
samples : list
The samples
confidence_level : float, optional
The confidence level. Default is 95%
sigam : float, optional
Population standard deviation. Default is None (unpsecified)
Returns
---------
float
A value, y, such that the population mean lies at the sample mean +/- y
'''
sample_mean = np.mean(samples)
sample_var = np.var(samples, ddof=1)
if sigma is not None:
sample_var = sigma * sigma
if sigma is None and len(samples) < 25:
score = ss.t.ppf((1 - confidence_level) / 2, df=len(samples)-1)
else:
score = ss.norm.ppf((1 - confidence_level) / 2)
y = -score * np.sqrt(sample_var / len(samples))
return y
# case 1
samples = np.random.normal(size=10)
y = conf_interval(samples, 0.90, 1)
print(f'{np.mean(samples):.2} +/- {y:.2}'.format())
# case 2
samples = np.random.normal(size=30)
y = conf_interval(samples, 0.95)
print(f'{np.mean(samples):.2} +/- {y:.2}'.format())
# case 1
samples = np.random.normal(size=5)
y = conf_interval(samples, 0.99)
print(f'{np.mean(samples):.2} +/- {y:.2}'.format())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1.2 Answer
Step2: 1.3 Answer
Step3: 1.4 Answer
Step4: 1.5 Answer
Step5: 2. Matrix Calculations (12 Points)
Step6: Answer 2.2
Step7: Answer 2.3
Step8: Answer 2.4
Step9: 3. Definite Integrals (16 Points)
Step10: Answer 3.2
Step11: Answer 3.3
Step12: Answer 3.4
Step13: 4. Complex Functions (14 Points)
Step14: 4.2 Answer
Step15: 5. Working with Data (26 Points)
Step16: Answer 5.2
Step17: Answer 5.3
Step18: Answer 5.4
Step19: Answer 5.5
|
3,852
|
<ASSISTANT_TASK:>
Python Code:
import NotebookImport
from metaPCNA import *
import GTEX as GTEX
f_win.order().tail()
gabr = [g for g in rna_df.index if g.startswith('GABR')]
f = dx_rna.ix[gabr].dropna()
f.join(f_win).sort(f_win.name)
GTEX.plot_tissues_across_gene('GABRD', log=True)
gtex = np.log2(GTEX.gtex)
meta = GTEX.meta
tissue_type = GTEX.tissue_type
mm = gtex.ix[gabr].dropna().T.groupby(meta['SMTSD']).median()
mm = mm.clip_lower(-10)
mm.rank(1, ascending=False)['GABRD'].order().head(10)
fig, ax = subplots(figsize=(10,4))
o = ['GABRA1','GABRA2', 'GABRA3', 'GABRA4', 'GABRA5',
'GABRB1', 'GABRB2', 'GABRB3',
'GABRR1', 'GABRR2', 'GABRR3',
'GABRD','GABRE','GABRP','GABRQ']
c = sns.color_palette('muted', n_colors=7, desat=.7)
dd = gtex.ix[o, ti(tissue_type == 'Testis')].dropna(1)
sns.violinplot(dd.clip_lower(-4.).T, order=o,
names=[s[4:] for s in o],
inner='points', alpha=.7,
color=[c[0]]*5 + [c[1]]*3 + [c[2]]*3 + c[3:])
ax.set_xlabel('GABA subunit')
ax.set_ylabel('GTEX- Testis Expression (Log2)')
prettify_ax(ax)
fig.savefig(FIGDIR + 'Fig_S6.pdf')
series_scatter(dx_rna.frac.ix[gabr],
gtex.ix[gabr].dropna().T.groupby(meta['SMTSD']).median().ix['Testis'])
sns.clustermap(gtex.ix[gabr, ti(meta['SMTSD'] == 'Testis')].T.corr())
bella = ti(meta['SMTSD'].dropna().str.contains('Cerebellum'))
sns.clustermap(gtex.ix[gabr, bella].T.corr())
sns.clustermap(dx.ix[gabr].T.corr().clip(-.5,.5))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: GABRD is highly expressed in many areas of the brain as well as in the testis. Interestingly it is the highest expressed subunit in the testis.
Step2: Here I'm plotting the different GABA receptors expression distribution in Testes.
Step3: There is a correlation of subunit expression in the testis with fraction upregulated in cancer across the GABA subunits.
Step4: Looking at co-expression
Step5: In the cerebellum
Step6: For TCGA differential expression
|
3,853
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (100, 110)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
from collections import Counter
counts = Counter(text)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: i for i, word in enumerate(set(vocab)) if len(word) > 0}
int_to_vocab = {i: word for i, word in enumerate(set(vocab)) if len(word) > 0}
return (vocab_to_int, int_to_vocab)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
tokens = {
".": "||Period||",
",": "||Comma||",
'"': "||Quotation_Mark||",
";": "||Semicolon||",
"!": "||Exclamation_Mark||",
"?": "||Question_Mark||",
"(": "||Left_Parentheses||",
")": "||Right_Parentheses||",
"--": "||Dash||",
"\n": "||Return||"
}
return tokens
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
input = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
return (input, targets, learning_rate)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
lstm_layers = 1
keep_prob = 0.7
# Your basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
# Getting an initial state of all zeros
initial_state = tf.identity(cell.zero_state(batch_size, tf.float32), name="initial_state")
return (cell, initial_state)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
# TODO: Implement Function
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
return outputs, tf.identity(final_state, name="final_state")
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
embedding = get_embed(input_data, vocab_size, embed_dim)
outputs, final_state = build_rnn(cell, embedding)
logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None)
return (logits, final_state)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
batch_length = batch_size * seq_length
n_batches = int(len(int_text) / batch_length)
inputs = np.array(int_text[: n_batches * batch_length])
targets = np.array(int_text[1: n_batches * batch_length + 1])
targets[-1] = inputs[0]
input_batches = np.split(inputs.reshape(batch_size, -1), n_batches, 1)
target_batches = np.split(targets.reshape(batch_size, -1), n_batches, 1)
batches = np.array(list(zip(input_batches, target_batches)))
return batches
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
# Number of Epochs
num_epochs = 40
# Batch Size
batch_size = 64
# RNN Size
rnn_size = 512
# Embedding Dimension Size
embed_dim = 256
# Sequence Length
seq_length = 48
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 1
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
InputTensor = loaded_graph.get_tensor_by_name("input:0")
InitialStateTensor = loaded_graph.get_tensor_by_name("initial_state:0")
FinalStateTensor = loaded_graph.get_tensor_by_name("final_state:0")
ProbsTensor = loaded_graph.get_tensor_by_name("probs:0")
return (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
max_prob = np.argmax(probabilities)
return int_to_vocab[max_prob]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step6: Implement Preprocessing Functions
Step9: Tokenize Punctuation
Step11: Preprocess all the data and save it
Step13: Check Point
Step15: Build the Neural Network
Step18: Input
Step21: Build RNN Cell and Initialize
Step24: Word Embedding
Step27: Build RNN
Step30: Build the Neural Network
Step33: Batches
Step35: Neural Network Training
Step37: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Implement Generate Functions
Step49: Choose Word
Step51: Generate TV Script
|
3,854
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cnrm-cerfacs', 'sandbox-3', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
3,855
|
<ASSISTANT_TASK:>
Python Code:
import girder_client
import numpy as np
from matplotlib import pylab as plt
from matplotlib.colors import ListedColormap
from histomicstk.saliency.tissue_detection import (
get_slide_thumbnail, get_tissue_mask)
%matplotlib inline
APIURL = 'http://candygram.neurology.emory.edu:8080/api/v1/'
# SAMPLE_SLIDE_ID = '5d586d57bd4404c6b1f28640'
SAMPLE_SLIDE_ID = "5d817f5abd4404c6b1f744bb"
gc = girder_client.GirderClient(apiUrl=APIURL)
# gc.authenticate(interactive=True)
_ = gc.authenticate(apiKey='kri19nTIGOkWH01TbzRqfohaaDWb6kPecRqGmemb')
thumbnail_rgb = get_slide_thumbnail(gc, SAMPLE_SLIDE_ID)
plt.imshow(thumbnail_rgb)
print(get_tissue_mask.__doc__)
labeled, mask = get_tissue_mask(
thumbnail_rgb, deconvolve_first=True,
n_thresholding_steps=2, sigma=0., min_size=30)
vals = np.random.rand(256,3)
vals[0, ...] = [0.9, 0.9, 0.9]
cMap = ListedColormap(1 - vals)
f, ax = plt.subplots(1, 3, figsize=(20, 20))
ax[0].imshow(thumbnail_rgb)
ax[1].imshow(labeled, cmap=cMap) # all tissue regions
ax[2].imshow(mask, cmap=cMap) # largest tissue region
plt.show()
for deconvolve_first in [False, True]:
for n_thresholding_steps in [1, 2]:
labeled, mask = get_tissue_mask(
thumbnail_rgb, deconvolve_first=deconvolve_first,
n_thresholding_steps=n_thresholding_steps, sigma=0., min_size=30)
f, ax = plt.subplots(1, 3, figsize=(20, 5))
ax[0].imshow(thumbnail_rgb)
ax[1].imshow(labeled, cmap=cMap)
ax[2].imshow(mask, cmap=cMap)
plt.suptitle("deconvolve = %s, n_thresholding_steps = %d" % (deconvolve_first, n_thresholding_steps), fontsize=20)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Constants and Prepwork
Step2: First, let's fetch the slide thumbnail
Step3: (Optional) Color normalization of thumbnail
Step4: Get the tissue masks
Step5: Visualize the result
Step6: Note effect of hyperparameters
|
3,856
|
<ASSISTANT_TASK:>
Python Code:
# 基础库导入
from __future__ import print_function
from __future__ import division
import warnings
warnings.filterwarnings('ignore')
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
import sys
# 使用insert 0即只使用github,避免交叉使用了pip安装的abupy,导致的版本不一致问题
sys.path.insert(0, os.path.abspath('../'))
import abupy
# 使用沙盒数据,目的是和书中一样的数据环境
abupy.env.enable_example_env_ipython()
from abupy import AbuFactorAtrNStop, AbuFactorPreAtrNStop, AbuFactorCloseAtrNStop, AbuFactorBuyBreak, ABuProgress
from abupy import abu, EMarketTargetType, AbuMetricsBase, ABuMarketDrawing, AbuFuturesCn, ABuSymbolPd, AbuOrderPdProxy
from abupy import AbuUmpMainDeg, AbuUmpMainJump, AbuUmpMainPrice, AbuUmpMainWave, AbuFuturesCn, EStoreAbu, AbuML
from abupy import AbuUmpEdgeDeg, AbuUmpEdgePrice, AbuUmpEdgeWave, AbuUmpEdgeMul
us_choice_symbols = ['usTSLA', 'usNOAH', 'usSFUN', 'usBIDU', 'usAAPL', 'usGOOG', 'usWUBA', 'usVIPS']
cn_choice_symbols = ['002230', '300104', '300059', '601766', '600085', '600036', '600809', '000002', '002594']
hk_choice_symbols = ['hk03333', 'hk00700', 'hk02333', 'hk01359', 'hk00656', 'hk03888', 'hk02318']
tc_choice_symbols = ['btc', 'ltc']
# 期货市场的直接从AbuFuturesCn().symbo中读取
ft_choice_symbols = AbuFuturesCn().symbol.tolist()
# 训练集:沙盒中所有美股 + 沙盒中所有A股 + 沙盒中所有港股 + 比特币
train_choice_symbols = us_choice_symbols + cn_choice_symbols + hk_choice_symbols + tc_choice_symbols[:1]
# 测试集:沙盒中所有期货 + 莱特币
test_choice_symbols = ft_choice_symbols + tc_choice_symbols[1:]
# 设置初始资金数
read_cash = 1000000
# 买入因子依然延用向上突破因子
buy_factors = [{'xd': 60, 'class': AbuFactorBuyBreak},
{'xd': 42, 'class': AbuFactorBuyBreak}]
# 卖出因子继续使用上一节使用的因子
sell_factors = [
{'stop_loss_n': 1.0, 'stop_win_n': 3.0,
'class': AbuFactorAtrNStop},
{'class': AbuFactorPreAtrNStop, 'pre_atr_n': 1.5},
{'class': AbuFactorCloseAtrNStop, 'close_atr_n': 1.5}
]
# 回测生成买入时刻特征
abupy.env.g_enable_ml_feature = True
abu_result_tuple_train = abu.load_abu_result_tuple(n_folds=2, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='lecture_train')
orders_pd_train = abu_result_tuple_train.orders_pd
AbuMetricsBase.show_general(*abu_result_tuple_train, returns_cmp=True, only_info=True)
_ = AbuUmpMainDeg.ump_main_clf_dump(orders_pd_train, p_ncs=slice(20, 40, 1))
ump_jump = AbuUmpMainJump.ump_main_clf_dump(orders_pd_train, p_ncs=slice(20, 40, 1))
ump_jump.fiter.df.head()
ump_price = AbuUmpMainPrice.ump_main_clf_dump(orders_pd_train, p_ncs=slice(20, 40, 1))
ump_price.fiter.df.head()
ump_wave = AbuUmpMainWave.ump_main_clf_dump(orders_pd_train, p_ncs=slice(20, 40, 1))
ump_wave.fiter.df.head()
# 不使用主裁拦截
abupy.env.g_enable_ump_main_deg_block = False
abupy.env.g_enable_ump_main_jump_block = False
abupy.env.g_enable_ump_main_price_block = False
abupy.env.g_enable_ump_main_wave_block = False
abu_result_tuple_test, _ = abu.run_loop_back(read_cash,
buy_factors,
sell_factors,
start='2014-07-26',
end='2016-07-26',
choice_symbols=test_choice_symbols)
ABuProgress.clear_output()
AbuMetricsBase.show_general(*abu_result_tuple_test, returns_cmp=True, only_info=True)
abupy.env.g_enable_ump_main_deg_block = True
abupy.env.g_enable_ump_main_jump_block = True
abupy.env.g_enable_ump_main_price_block = True
abupy.env.g_enable_ump_main_wave_block = True
abu_result_tuple_test_ump, _ = abu.run_loop_back(read_cash,
buy_factors,
sell_factors,
start='2014-07-26',
end='2016-07-26',
choice_symbols=test_choice_symbols)
ABuProgress.clear_output()
AbuMetricsBase.show_general(*abu_result_tuple_test_ump, returns_cmp=True, only_info=True)
proxy = AbuOrderPdProxy(abu_result_tuple_test.orders_pd)
with proxy.proxy_work(abu_result_tuple_test_ump.orders_pd) as (order1, order2):
block_order = order1 - order2
print('正确拦截失败的交易数量{}, 错误拦截的交易数量{}'.format(block_order.result.value_counts()[-1], block_order.result.value_counts()[1]))
block_order.head()
class AbuFactorBuyBreakUmpDemo(AbuFactorBuyBreak):
扩展AbuFactorBuyBreak组织裁判进行更复杂的综合裁决
def make_ump_block_decision(self, ml_feature_dict):
ump = self.ump_manger
# 统计角度主裁对应这次交易命中的分类簇个数
deg_hit_cnt = ump.ump_main_deg.predict_hit_kwargs(**ml_feature_dict)
# 统计跳空主裁对应这次交易命中的分类簇个数
jump_hit_cnt = ump.ump_main_jump.predict_hit_kwargs(**ml_feature_dict)
# 统计波动主裁对应这次交易命中的分类簇个数
wave_hit_cnt = ump.ump_main_wave.predict_hit_kwargs(**ml_feature_dict)
# 统计价格主裁对应这次交易命中的分类簇个数
price_hit_cnt = ump.ump_main_price.predict_hit_kwargs(**ml_feature_dict)
# 进行裁判之间的拦截配合, 简单示例,只要加起来大于2个就算配合成功,拦截
if deg_hit_cnt + jump_hit_cnt + wave_hit_cnt + price_hit_cnt > 2:
return True
return False
# 通过import的方式导入AbuFactorBuyBreakUmpDemo
# 因为在windows系统上,启动并行后,在ipython notebook中定义的类会在子进程中无法找到
from abupy import AbuFactorBuyBreakUmpDemo
buy_factors = [{'xd': 60, 'class': AbuFactorBuyBreakUmpDemo},
{'xd': 42, 'class': AbuFactorBuyBreakUmpDemo}]
abu_result_tuple_test_ump_mul, _ = abu.run_loop_back(read_cash,
buy_factors,
sell_factors,
start='2014-07-26',
end='2016-07-26',
choice_symbols=test_choice_symbols)
ABuProgress.clear_output()
AbuMetricsBase.show_general(*abu_result_tuple_test_ump_mul, returns_cmp=True, only_info=True)
class AbuFactorBuyBreakReocrdHitDemo(AbuFactorBuyBreak):
def make_ump_block_decision(self, ml_feature_dict):
ump = self.ump_manger
# 统计角度主裁对应这次交易命中的分类簇个数
deg_hit_cnt = ump.ump_main_deg.predict_hit_kwargs(**ml_feature_dict)
# 统计跳空主裁对应这次交易命中的分类簇个数
jump_hit_cnt = ump.ump_main_jump.predict_hit_kwargs(**ml_feature_dict)
# 统计波动主裁对应这次交易命中的分类簇个数
wave_hit_cnt = ump.ump_main_wave.predict_hit_kwargs(**ml_feature_dict)
# 统计价格主裁对应这次交易命中的分类簇个数
price_hit_cnt = ump.ump_main_price.predict_hit_kwargs(**ml_feature_dict)
ml_feature_dict.update({'deg_hit_cnt': deg_hit_cnt, 'jump_hit_cnt': jump_hit_cnt,
'wave_hit_cnt': wave_hit_cnt, 'price_hit_cnt': price_hit_cnt})
return False
# 通过import的方式导入AbuFactorBuyBreakReocrdHitDemo,
# 因为在windows系统上,启动并行后,在ipython notebook中定义的类会在子进程中无法找到
from abupy import AbuFactorBuyBreakReocrdHitDemo
buy_factors = [{'xd': 60, 'class': AbuFactorBuyBreakReocrdHitDemo},
{'xd': 42, 'class': AbuFactorBuyBreakReocrdHitDemo}]
abu_result_tuple_test_ump_record, _ = abu.run_loop_back(read_cash,
buy_factors,
sell_factors,
start='2014-07-26',
end='2016-07-26',
choice_symbols=test_choice_symbols)
ABuProgress.clear_output()
hit_df = pd.DataFrame()
def make_hit_df(order):
global hit_df
if order.result != 0:
hit = pd.DataFrame([order.result, order.ml_features['deg_hit_cnt'],
order.ml_features['jump_hit_cnt'],
order.ml_features['wave_hit_cnt'],
order.ml_features['price_hit_cnt']],
index=['result', 'deg_hit_cnt', 'jump_hit_cnt', 'wave_hit_cnt', 'price_hit_cnt']).T
hit_df = hit_df.append(hit)
_ = abu_result_tuple_test_ump_record.orders_pd.apply(make_hit_df, axis=1)
hit_df.head()
hd_np = hit_df.as_matrix()
y = hd_np[:, 0]
x = hd_np[:, 1:]
hit_ml = AbuML(x, y, hit_df)
hit_ml.fit()
class AbuFactorBuyBreakHitPredictDemo(AbuFactorBuyBreak):
扩展AbuFactorBuyBreak组织裁判进行更复杂的综合裁决
def _init_self(self, **kwargs):
与AbuFactorBuyBreak基本相同,唯一区别是关键子参数中添加了通过AbuFactorBuyBreakUmpDemo记录训练好的决策器
self.hit_ml = kwargs['hit_ml']
super(AbuFactorBuyBreakHitPredictDemo, self)._init_self(**kwargs)
# 添加了通过AbuFactorBuyBreakUmpDemo记录训练好的决策器
self.hit_ml = kwargs['hit_ml']
def make_ump_block_decision(self, ml_feature_dict):
ump = self.ump_manger
# 统计角度主裁对应这次交易命中的分类簇个数
deg_hit_cnt = ump.ump_main_deg.predict_hit_kwargs(**ml_feature_dict)
# 统计跳空主裁对应这次交易命中的分类簇个数
jump_hit_cnt = ump.ump_main_jump.predict_hit_kwargs(**ml_feature_dict)
# 统计波动主裁对应这次交易命中的分类簇个数
wave_hit_cnt = ump.ump_main_wave.predict_hit_kwargs(**ml_feature_dict)
# 统计价格主裁对应这次交易命中的分类簇个数
price_hit_cnt = ump.ump_main_price.predict_hit_kwargs(**ml_feature_dict)
result = self.hit_ml.predict([deg_hit_cnt, jump_hit_cnt, wave_hit_cnt, price_hit_cnt])[0]
if result == -1:
return True
return False
# 通过import的方式导入AbuFactorBuyBreakHitPredictDemo
# 因为在windows系统上,启动并行后,在ipython notebook中定义的类会在子进程中无法找到
from abupy import AbuFactorBuyBreakHitPredictDemo
buy_factors = [{'hit_ml':hit_ml, 'xd': 60, 'class': AbuFactorBuyBreakHitPredictDemo},
{'hit_ml':hit_ml, 'xd': 42, 'class': AbuFactorBuyBreakHitPredictDemo}]
abu_result_tuple_test_ump_predict, _ = abu.run_loop_back(read_cash,
buy_factors,
sell_factors,
start='2014-07-26',
end='2016-07-26',
choice_symbols=test_choice_symbols)
ABuProgress.clear_output()
AbuMetricsBase.show_general(*abu_result_tuple_test_ump_predict, returns_cmp=True, only_info=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 受限于沙盒中数据限制,本节示例的相关性分析只限制在abupy内置沙盒数据中,完整示例以及代码请阅读《量化交易之路》中相关章节。
Step2: 在运行完成第15节中相关内容后,使用load_abu_result_tuple读取上一节保存在本地的训练集数据:
Step3: 1. 跳空主裁
Step4: 下面我们通过ump_main_clf_dump()函数对AbuUmpMainJump完成跳空裁判的训练保存等一系列工作:
Step5: 上面通过ump_main_clf_dump()已经完成训练,筛选,本地保存分类器等工作,我们下面还是针对AbuUmpMainJump寻找宏观上合理的分类簇拦截交易解释。
Step6: 如上显示的特征为买入当天价格相对特征周期内所有价格排序的位置值,即比如特征周期为60天,那么如果买入当天价格为60天内最高,那么price_rank60=1.0;如果买入当天价格为60天内第30高价格,那么price_rank60=0.5。
Step7: 显示的特征为三个周期内的价格波动率特征。
Step8: 同上相同参数进行测试集回测,相同的资金及策略参数等,唯一不同点在于开启主裁拦截,代码如下所示:
Step9: 使用AbuOrderPdProxy可以查看被拦截的交易
Step11: 由于本节的示例只是从沙盒数据中的为数不多的交易进行训练,且测试集交易数量也不多,所以效果确实一般(在20节后的文档会陆续讲解美股,A股,港股等全市场回测后进行ump训练,回测测试集的示例)可以从对比中发现,大约拦截了一百多笔交易,正确拦截的数量比错误拦截的要多,胜率和盈亏并不理想,拦截了大量的交易可以节省佣金,降低交易数量是最好的优化。
Step12: 上面编写的代码实现了在策略的make_ump_block_decision中组织裁判进行更复杂的综合裁决,进行裁判之间的拦截配合,实际上这里的make_ump_block_decision编写的太过简单,只是为了简单示例,具体实现请参考AbuUmpManager中ump_block函数,下面使用AbuFactorBuyBreakUmpDemo做为买入因子,参数还是60,42进行回测如下所示:
Step13: 下面示例上述说的第三点:为每个裁判通过统计赋予裁决权重,进行综合裁决。
Step14: 如下使用AbuFactorBuyBreakReocrdHitDemo进行回测,如下所示:
Step15: 下面把刚才AbuFactorBuyBreakReocrdHitDemo在回测中记录的数据和交易的最终结果result进行组合,形成hit_df,如下所示:
Step16: 上面形成的hit_df的第一列result可做为有监督学习的y,其它的列可做为x特征列,下面使用AbuML直接封装数据:
Step19: 下面继续继承AbuFactorBuyBreak复写make_ump_block_decision,区别是使用hit_ml对几个裁判这次交易命中的分类簇个数组成矢量特征进行predict,拦截预测结果为-1的交易,代码如下所示:
Step20: 使用AbuFactorBuyBreakHitPredictDemo进行回测,注意在构造策略字典序列的时候使用了刚刚训练好的hit_ml,如下所示:
Step21: 以下度量结果胜率,盈亏比都达到很高,因为用回测的数据进行训练后再次反过来指导回测,结果是没有意义的,这里的示例只是为了容易理解什么叫做:让裁判自己学习怎么配合,自己做出最正确的判断,更详细完整的示例会在之后的章节中示例讲解,请关注公众号的更新提醒。
|
3,857
|
<ASSISTANT_TASK:>
Python Code:
# Setup feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.time_series.ex5 import *
# Setup notebook
from pathlib import Path
from learntools.time_series.style import * # plot style settings
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
from statsmodels.tsa.deterministic import DeterministicProcess
from xgboost import XGBRegressor
comp_dir = Path('../input/store-sales-time-series-forecasting')
data_dir = Path("../input/ts-course-data")
store_sales = pd.read_csv(
comp_dir / 'train.csv',
usecols=['store_nbr', 'family', 'date', 'sales', 'onpromotion'],
dtype={
'store_nbr': 'category',
'family': 'category',
'sales': 'float32',
},
parse_dates=['date'],
infer_datetime_format=True,
)
store_sales['date'] = store_sales.date.dt.to_period('D')
store_sales = store_sales.set_index(['store_nbr', 'family', 'date']).sort_index()
family_sales = (
store_sales
.groupby(['family', 'date'])
.mean()
.unstack('family')
.loc['2017']
)
# You'll add fit and predict methods to this minimal class
class BoostedHybrid:
def __init__(self, model_1, model_2):
self.model_1 = model_1
self.model_2 = model_2
self.y_columns = None # store column names from fit method
def fit(self, X_1, X_2, y):
# YOUR CODE HERE: fit self.model_1
____
y_fit = pd.DataFrame(
# YOUR CODE HERE: make predictions with self.model_1
____,
index=X_1.index, columns=y.columns,
)
# YOUR CODE HERE: compute residuals
y_resid = ____
y_resid = y_resid.stack().squeeze() # wide to long
# YOUR CODE HERE: fit self.model_2 on residuals
self.model_2.fit(____, ____)
# Save column names for predict method
self.y_columns = y.columns
# Save data for question checking
self.y_fit = y_fit
self.y_resid = y_resid
# Add method to class
BoostedHybrid.fit = fit
# Check your answer
q_1.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q_1.hint()
#_COMMENT_IF(PROD)_
q_1.solution()
#%%RM_IF(PROD)%%
def fit(self, X_1, X_2, y):
# Train model_1
self.model_1.fit(X_1, y)
# Make predictions
y_fit = pd.DataFrame(
self.model_1.predict(X_1), index=X_1.index, columns=y.columns,
)
# Compute residuals
y_resid = y - y_fit
y_resid = y_resid.stack().squeeze() # wide to long
# Train model_2 on residuals
self.model_2.fit(X_2, y.stack().squeeze())
# Save column names for predict method
self.y_columns = y.columns
# Save data for question checking
self.y_fit = y_fit
self.y_resid = y_resid
# Add method to class
BoostedHybrid.fit = fit
q_1.assert_check_failed()
#%%RM_IF(PROD)%%
def fit(self, X_1, X_2, y):
# Train model_1
self.model_1.fit(X_1, y)
# Make predictions
y_fit = pd.DataFrame(
self.model_1.predict(X_1), index=X_1.index, columns=y.columns,
)
# Compute residuals
y_resid = y
y_resid = y_resid.stack().squeeze() # wide to long
# Train model_2 on residuals
self.model_2.fit(X_2, y_resid)
# Save column names for predict method
self.y_columns = y.columns
# Save data for question checking
self.y_fit = y_fit
self.y_resid = y_resid
# Add method to class
BoostedHybrid.fit = fit
q_1.assert_check_failed()
#%%RM_IF(PROD)%%
def fit(self, X_1, X_2, y):
# Train model_1
self.model_1.fit(X_1, y)
# Make predictions
y_fit = pd.DataFrame(
self.model_1.predict(X_1), index=X_1.index, columns=y.columns,
)
# Compute residuals
y_resid = y - y_fit
y_resid = y_resid.stack().squeeze() # wide to long
# Train model_2 on residuals
self.model_2.fit(X_2, y_resid)
# Save column names for predict method
self.y_columns = y.columns
# Save data for question checking
self.y_fit = y_fit
self.y_resid = y_resid
# Add method to class
BoostedHybrid.fit = fit
q_1.assert_check_passed()
def predict(self, X_1, X_2):
y_pred = pd.DataFrame(
# YOUR CODE HERE: predict with self.model_1
____,
index=X_1.index, columns=self.y_columns,
)
y_pred = y_pred.stack().squeeze() # wide to long
# YOUR CODE HERE: add self.model_2 predictions to y_pred
y_pred += ____
return y_pred.unstack() # long to wide
# Add method to class
BoostedHybrid.predict = predict
# Check your answer
q_2.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q_2.hint()
#_COMMENT_IF(PROD)_
q_2.solution()
#%%RM_IF(PROD)%%
def predict(self, X_1, X_2):
# Predict with model_1
y_pred = pd.DataFrame(
self.model_1.predict(X_1), index=X_1.index, columns=self.y_columns,
)
y_pred = y_pred.stack().squeeze() # wide to long
# Add model_2 predictions to model_1 predictions
y_pred += y_pred
return y_pred.unstack()
# Add method to class
BoostedHybrid.predict = predict
q_2.assert_check_failed()
#%%RM_IF(PROD)%%
def predict(self, X_1, X_2):
# Predict with model_1
y_pred = pd.DataFrame(
self.model_1.predict(X_1), index=X_1.index, columns=self.y_columns,
)
y_pred = y_pred.stack().squeeze() # wide to long
# Add model_2 predictions to model_1 predictions
y_pred += self.model_2.predict(X_2)
return y_pred.unstack()
# Add method to class
BoostedHybrid.predict = predict
q_2.assert_check_passed()
# Target series
y = family_sales.loc[:, 'sales']
# X_1: Features for Linear Regression
dp = DeterministicProcess(index=y.index, order=1)
X_1 = dp.in_sample()
# X_2: Features for XGBoost
X_2 = family_sales.drop('sales', axis=1).stack() # onpromotion feature
# Label encoding for 'family'
le = LabelEncoder() # from sklearn.preprocessing
X_2 = X_2.reset_index('family')
X_2['family'] = le.fit_transform(X_2['family'])
# Label encoding for seasonality
X_2["day"] = X_2.index.day # values are day of the month
# YOUR CODE HERE: Create LinearRegression + XGBRegressor hybrid with BoostedHybrid
model = ____
# YOUR CODE HERE: Fit and predict
#_UNCOMMENT_IF(PROD)_
#model.fit(____, ____, ____)
y_pred = ____
#_UNCOMMENT_IF(PROD)_
#y_pred = y_pred.clip(0.0)
# Check your answer
q_3.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q_3.hint()
#_COMMENT_IF(PROD)_
q_3.solution()
#%%RM_IF(PROD)%%
# Create model
model = BoostedHybrid(
model_1=LinearRegression(),
model_2=LinearRegression(),
)
model.fit(X_1, X_2, y)
y_pred = model.predict(X_1, X_2)
y_pred = y_pred.clip(0.0)
q_3.assert_check_failed()
#%%RM_IF(PROD)%%
# Create model
model = BoostedHybrid(
model_1=LinearRegression,
model_2=XGBRegressor,
)
#model.fit(X_1, X_2, y)
#y_pred = model.predict(X_1, X_2)
#y_pred = y_pred.clip(0.0)
q_3.assert_check_failed()
#%%RM_IF(PROD)%%
# Create model
model = BoostedHybrid(
model_1=LinearRegression(),
model_2=XGBRegressor(),
)
model.fit(X_1, X_2, y)
y_pred = model.predict(X_1, X_2)
y_pred = y_pred.clip(0.0)
q_3.assert_check_passed()
# Model 1 (trend)
from pyearth import Earth
from sklearn.linear_model import ElasticNet, Lasso, Ridge
# Model 2
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
# Boosted Hybrid
# YOUR CODE HERE: Try different combinations of the algorithms above
model = BoostedHybrid(
model_1=Ridge(),
model_2=KNeighborsRegressor(),
)
y_train, y_valid = y[:"2017-07-01"], y["2017-07-02":]
X1_train, X1_valid = X_1[: "2017-07-01"], X_1["2017-07-02" :]
X2_train, X2_valid = X_2.loc[:"2017-07-01"], X_2.loc["2017-07-02":]
# Some of the algorithms above do best with certain kinds of
# preprocessing on the features (like standardization), but this is
# just a demo.
model.fit(X1_train, X2_train, y_train)
y_fit = model.predict(X1_train, X2_train).clip(0.0)
y_pred = model.predict(X1_valid, X2_valid).clip(0.0)
families = y.columns[0:6]
axs = y.loc(axis=1)[families].plot(
subplots=True, sharex=True, figsize=(11, 9), **plot_params, alpha=0.5,
)
_ = y_fit.loc(axis=1)[families].plot(subplots=True, sharex=True, color='C0', ax=axs)
_ = y_pred.loc(axis=1)[families].plot(subplots=True, sharex=True, color='C3', ax=axs)
for ax, family in zip(axs, families):
ax.legend([])
ax.set_ylabel(family)
# View the solution (Run this cell to receive credit!)
q_4.check()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the next two questions, you'll create a boosted hybrid for the Store Sales dataset by implementing a new Python class. Run this cell to create the initial class definition. You'll add fit and predict methods to give it a scikit-learn like interface.
Step2: 1) Define fit method for boosted hybrid
Step3: 2) Define predict method for boosted hybrid
Step4: Now you're ready to use your new BoostedHybrid class to create a model for the Store Sales data. Run the next cell to set up the data for training.
Step5: 3) Train boosted hybrid
Step6: Depending on your problem, you might want to use other hybrid combinations than the linear regression + XGBoost hybrid you've created in the previous questions. Run the next cell to try other algorithms from scikit-learn.
Step7: These are just some suggestions. You might discover other algorithms you like in the scikit-learn User Guide.
Step8: 4) Fit with different learning algorithms
|
3,858
|
<ASSISTANT_TASK:>
Python Code:
from quantopian.pipeline.data import Fundamentals
# Since the underlying data of Fundamentals.exchange_id
# is of type string, .latest returns a Classifier
exchange = Fundamentals.exchange_id.latest
from quantopian.pipeline.classifiers.fundamentals import Sector
morningstar_sector = Sector()
nyse_filter = exchange.eq('NYS')
dollar_volume_decile = AverageDollarVolume(window_length=10).deciles()
top_decile = (dollar_volume_decile.eq(9))
def make_pipeline():
exchange = Fundamentals.exchange_id.latest
nyse_filter = exchange.eq('NYS')
morningstar_sector = Sector()
dollar_volume_decile = AverageDollarVolume(window_length=10).deciles()
top_decile = (dollar_volume_decile.eq(9))
return Pipeline(
columns={
'exchange': exchange,
'sector_code': morningstar_sector,
'dollar_volume_decile': dollar_volume_decile
},
screen=(nyse_filter & top_decile)
)
result = run_pipeline(make_pipeline(), '2015-05-05', '2015-05-05')
print 'Number of securities that passed the filter: %d' % len(result)
result.head(5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Previously, we saw that the latest attribute produced an instance of a Factor. In this case, since the underlying data is of type string, latest produces a Classifier.
Step2: Using Sector is equivalent to Fundamentals.morningstar_sector_code.latest.
Step3: This filter will return True for securities having 'NYS' as their most recent exchange_id.
Step4: Let's put each of our classifiers into a pipeline and run it to see what they look like.
|
3,859
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
# scientific
%matplotlib inline
from matplotlib import pyplot as plt;
import matplotlib as mpl;
import numpy as np;
import sklearn as skl;
import sklearn.datasets;
import sklearn.cluster;
import sklearn.mixture;
# ipython
import IPython;
# python
import os;
import random;
#####################################################
# image processing
import PIL;
# trim and scale images
def trim(im, percent=100):
print("trim:", percent);
bg = PIL.Image.new(im.mode, im.size, im.getpixel((0,0)))
diff = PIL.ImageChops.difference(im, bg)
diff = PIL.ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
x = im.crop(bbox)
return x.resize(((x.size[0]*percent)//100, (x.size[1]*percent)//100), PIL.Image.ANTIALIAS);
#####################################################
# daft (rendering PGMs)
import daft;
# set to FALSE to load PGMs from static images
RENDER_PGMS = True;
# decorator for pgm rendering
def pgm_render(pgm_func):
def render_func(path, percent=100, render=None, *args, **kwargs):
print("render_func:", percent);
# render
render = render if (render is not None) else RENDER_PGMS;
if render:
print("rendering");
# render
pgm = pgm_func(*args, **kwargs);
pgm.render();
pgm.figure.savefig(path, dpi=300);
# trim
img = trim(PIL.Image.open(path), percent);
img.save(path, 'PNG');
else:
print("not rendering");
# error
if not os.path.isfile(path):
raise Exception("Error: Graphical model image %s not found. You may need to set RENDER_PGMS=True." % path);
# display
return IPython.display.Image(filename=path);#trim(PIL.Image.open(path), percent);
return render_func;
######################################################
@pgm_render
def pgm_latent():
pgm = daft.PGM([4,4], origin=[-2,-1], node_unit=0.8, grid_unit=2.0);
# nodes
pgm.add_node(daft.Node("z", r"$Z_n$", 0.7, 1));
pgm.add_node(daft.Node("x", r"$X_n$", 1.3, 1, observed=True));
pgm.add_node(daft.Node("theta", r"$\theta$", 1.3, 0.3));
# edges
pgm.add_edge("z", "x", head_length=0.08);
pgm.add_edge("theta", "x", head_length=0.08);
pgm.add_edge("theta", "z", head_length=0.08);
pgm.add_plate(daft.Plate([0.4,0.8,1.3,0.5], label=r"$\qquad\qquad\qquad\;\; N$",
shift=-0.1))
return pgm;
%%capture
pgm_latent("images/pgm/latent.png")
# example stolen from scikit-learn docs
def plot_gmm():
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = skl.mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
plt.figure(figsize=(10,8))
CS = plt.contour(X, Y, Z, norm=mpl.colors.LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plot_gmm()
%%capture
pgm_latent("images/pgm/latent.png")
@pgm_render
def pgm_coinflip():
pgm = daft.PGM([4,4], origin=[-2,-1], node_unit=0.8, grid_unit=2.0);
# nodes
pgm.add_node(daft.Node("z", r"$Z_n$", 0.7, 1));
pgm.add_node(daft.Node("x", r"$X_n$", 1.3, 1, observed=True));
pgm.add_node(daft.Node("theta", r"$\theta$", 1.3, 0.3));
# edges
pgm.add_edge("z", "x", head_length=0.08);
pgm.add_edge("theta", "x", head_length=0.08);
pgm.add_plate(daft.Plate([0.4,0.8,1.3,0.5],
label=r"$\qquad\qquad\qquad\;\; N$",
shift=-0.1))
return pgm;
%%capture
pgm_coinflip("images/pgm/coinflip.png")
def coin_likelihood(roll, bias):
# P(X | Z, theta)
numHeads = roll.count("H");
flips = len(roll);
return pow(bias, numHeads) * pow(1-bias, flips-numHeads);
def coin_marginal_likelihood(rolls, biasA, biasB):
# P(X | theta)
trials = [];
for roll in rolls:
h = roll.count("H");
t = roll.count("T");
likelihoodA = coin_likelihood(roll, biasA);
likelihoodB = coin_likelihood(roll, biasB);
trials.append(np.log(0.5 * (likelihoodA + likelihoodB)));
return sum(trials);
def plot_coin_likelihood(rolls, thetas=None):
# grid
xvals = np.linspace(0.01,0.99,100);
yvals = np.linspace(0.01,0.99,100);
X,Y = np.meshgrid(xvals, yvals);
# compute likelihood
Z = [];
for i,r in enumerate(X):
z = []
for j,c in enumerate(r):
z.append(coin_marginal_likelihood(rolls,c,Y[i][j]));
Z.append(z);
# plot
plt.figure(figsize=(10,8));
C = plt.contour(X,Y,Z,150);
cbar = plt.colorbar(C);
plt.title(r"Likelihood $\log p(\mathcal{X}|\theta_A,\theta_B)$", fontsize=20);
plt.xlabel(r"$\theta_A$", fontsize=20);
plt.ylabel(r"$\theta_B$", fontsize=20);
# plot thetas
if thetas is not None:
thetas = np.array(thetas);
plt.plot(thetas[:,0], thetas[:,1], '-k', lw=2.0);
plt.plot(thetas[:,0], thetas[:,1], 'ok', ms=5.0);
plot_coin_likelihood([ "HTTTHHTHTH", "HHHHTHHHHH",
"HTHHHHHTHH", "HTHTTTHHTT", "THHHTHHHTH"]);
def coin_em(rolls, theta_A=None, theta_B=None, maxiter=10):
# Initial Guess
theta_A = theta_A or random.random();
theta_B = theta_B or random.random();
thetas = [(theta_A, theta_B)];
# Iterate
for c in range(maxiter):
print("#%d:\t%0.2f %0.2f" % (c, theta_A, theta_B));
# assign a coin to each trial
heads_A, tails_A = 0,0;
heads_B, tails_B = 0,0;
for trial in rolls:
likelihood_A = coin_likelihood(trial,theta_A);
likelihood_B = coin_likelihood(trial,theta_B);
p_A = likelihood_A / (likelihood_A + likelihood_B);
p_B = likelihood_B / (likelihood_A + likelihood_B);
heads_A += p_A * trial.count("H");
tails_A += p_A * trial.count("T");
heads_B += p_B * trial.count("H");
tails_B += p_B * trial.count("T");
# recompute thetas
theta_A = heads_A / (heads_A + tails_A);
theta_B = heads_B / (heads_B + tails_B);
thetas.append((theta_A,theta_B));
return thetas, (theta_A,theta_B);
rolls = [ "HTTTHHTHTH", "HHHHTHHHHH", "HTHHHHHTHH",
"HTHTTTHHTT", "THHHTHHHTH" ];
thetas, _ = coin_em(rolls, 0.1, 0.3, maxiter=6);
plot_coin_likelihood(rolls, thetas)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: EECS 445
Step2: Review
Step3: Gaussian Mixture Models
Step4: Complete Data Log-Likelihood
Step5: Coin Flip
Step6: Coin Flip
Step7: Coin Flip
Step8: Coin Flip
|
3,860
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import sys
from rdflib import Graph,URIRef
from gastrodon import LocalEndpoint,one,QName
import gzip
import pandas as pd
pd.set_option("display.width",100)
pd.set_option("display.max_colwidth",80)
g=Graph()
g.parse(gzip.open("data/dbpedia_2015-10.nt.gz"),format="nt")
len(g)
e=LocalEndpoint(g)
properties=e.select(
SELECT ?p (COUNT(*) AS ?cnt) {
?s ?p ?o .
} GROUP BY ?p ORDER BY DESC(?cnt)
)
properties
g.bind("prov","http://www.w3.org/ns/prov#")
g.bind("owl","http://www.w3.org/2002/07/owl#")
g.bind("cc","http://creativecommons.org/ns#")
g.bind("foaf","http://xmlns.com/foaf/0.1/")
g.bind("dc","http://purl.org/dc/terms/")
g.bind("vann","http://purl.org/vocab/vann/")
e=LocalEndpoint(g)
properties=e.select(
SELECT ?p (COUNT(*) AS ?cnt) {
?s ?p ?o .
} GROUP BY ?p ORDER BY DESC(?cnt)
)
properties
single=e.select(
SELECT ?s {
?s dc:source ?o .
}
)
single
ontology=one(single)
meta=e.select(
SELECT ?p ?o {
?_ontology ?p ?o .
} ORDER BY ?p
)
meta
ontology
license=meta.at[0,'p']
license
type(license)
meta.at[0,'p'].to_uri_ref()
e.select(
SELECT ?s ?o {
?s ?_license ?o .
}
)
properties=e.select(
SELECT ?p (COUNT(*) AS ?cnt) {
?s ?p ?o .
FILTER(?s!=?_ontology)
} GROUP BY ?p ORDER BY DESC(?cnt)
)
properties
properties["cnt"].plot.pie(figsize=(6,6)).set_ylabel('')
100.0*properties["cnt"].cumsum()/properties["cnt"].sum()
pd.DataFrame.from_items([
('count',properties["cnt"]),
("frequency",100.0*properties["cnt"]/properties["cnt"].sum()),
("distribution",100.0*properties["cnt"].cumsum()/properties["cnt"].sum())
])
e.select(
SELECT (LANG(?label) AS ?lang) (COUNT(*) AS ?cnt) {
?s rdfs:label ?label .
} GROUP BY LANG(?label) ORDER BY DESC(?cnt)
)
lang=e.select(
SELECT ?lang (COUNT(*) AS ?cnt) {
?s rdfs:label ?label .
BIND (LANG(?label) AS ?lang)
} GROUP BY ?lang ORDER BY DESC(?cnt)
)
lang
distinct_s=one(e.select(
SELECT (COUNT(DISTINCT ?s) AS ?cnt) {
?s rdfs:label ?o .
}
))
distinct_s
black_sheep=one(e.select(
SELECT ?s {
?s rdfs:label ?o .
FILTER NOT EXISTS {
?s rdfs:label ?o2 .
FILTER(LANG(?o2)='en')
}
}
))
black_sheep
meta=e.select(
SELECT ?p ?o {
?_black_sheep ?p ?o .
} ORDER BY ?p
)
meta
lang_coverage=100*lang["cnt"]/distinct_s
lang_coverage
lang_coverage.plot(kind="barh",figsize=(10,6))
types=e.select(
SELECT ?type (COUNT(*) AS ?cnt) {
?s a ?type .
} GROUP BY ?type ORDER BY DESC(?cnt)
)
types
types=e.select(
SELECT ?s (COUNT(*) AS ?cnt) {
?s a owl:Class .
?p rdfs:domain ?s .
} GROUP BY ?s ORDER BY DESC(?cnt) LIMIT 20
)
types
g.bind("dbo","http://dbpedia.org/ontology/")
e=LocalEndpoint(g)
types=e.select(
SELECT ?s (COUNT(*) AS ?cnt) {
?s a owl:Class .
?p rdfs:domain ?s .
} GROUP BY ?s ORDER BY DESC(?cnt) LIMIT 5
)
types.head()
person_types=e.select(
SELECT ?p (COUNT(*) AS ?cnt) {
?p rdfs:domain dbo:Person .
?p rdfs:label ?l .
} GROUP BY ?p ORDER BY DESC(?cnt) LIMIT 30
)
person_types
e.select(
SELECT ?p ?range ?label ?cnt {
?p rdfs:range ?range .
OPTIONAL {
?p rdfs:label ?label .
FILTER(LANG(?label)='de')
}
{
SELECT ?p (COUNT(*) AS ?cnt) {
?p rdfs:domain dbo:Person .
?p rdfs:label ?l .
} GROUP BY ?p ORDER BY DESC(?cnt)
}
FILTER(?cnt>4)
} ORDER BY DESC(?cnt)
)
def top_properties(klasse='dbo:Person',lang='de',threshold=4):
klasse=QName(klasse)
df=e.select(
SELECT ?p ?range ?label ?cnt {
?p rdfs:range ?range .
OPTIONAL {
?p rdfs:label ?label .
FILTER(LANG(?label)=?_lang)
}
{
SELECT ?p (COUNT(*) AS ?cnt) {
?p rdfs:domain ?_klasse .
?p rdfs:label ?l .
} GROUP BY ?p ORDER BY DESC(?cnt)
}
FILTER(?cnt>?_threshold)
} ORDER BY DESC(?cnt)
)
return df.style.highlight_null(null_color='red')
top_properties()
top_properties(lang='ja')
top_properties('dbo:SpaceMission',lang='fr',threshold=1)
e.select(
SELECT ?s ?o {
?s prov:wasDerivedFrom ?o .
} LIMIT 10
)
_.at[0,'o']
e.select(
SELECT ?type {
?type rdfs:subClassOf dbo:Person .
}
)
e.select(
SELECT ?type {
?type rdfs:subClassOf* dbo:Person .
}
)
e.select(
SELECT ?type {
?type ^rdfs:subClassOf* dbo:Painter .
}
)
e.select(
SELECT ?type {
dbo:Painter rdfs:subClassOf* ?type .
}
)
e.select(
SELECT ?type {
dbo:City rdfs:subClassOf* ?type .
}
)
e.select(
SELECT ?a ?b {
?a owl:equivalentClass ?b .
} LIMIT 10
)
e.select(
SELECT ?a ?b {
?a owl:equivalentClass ?b .
FILTER(STRSTARTS(STR(?b),"http://schema.org/"))
}
)
g.bind("schema","http://schema.org/")
e=LocalEndpoint(g)
e.select(
SELECT ?a ?b {
?a owl:equivalentClass ?b .
FILTER(?b=<http://schema.org/Product>)
}
)
e.select(
SELECT ?a ?b {
?a owl:equivalentClass ?b .
FILTER(!STRSTARTS(STR(?b),"http://schema.org/"))
FILTER(!STRSTARTS(STR(?b),"http://www.wikidata.org/"))
}
)
g.bind("dzero","http://www.ontologydesignpatterns.org/ont/d0.owl#")
g.bind("dul","http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#")
g.bind("bibo","http://purl.org/ontology/bibo/")
g.bind("skos","http://www.w3.org/2004/02/skos/core#")
e=LocalEndpoint(g)
e.select(
SELECT ?a ?b {
?a owl:equivalentClass ?b .
FILTER(!STRSTARTS(STR(?b),"http://schema.org/"))
FILTER(!STRSTARTS(STR(?b),"http://www.wikidata.org/"))
}
)
e.select(
SELECT ?b ?a {
?a owl:disjointWith ?b .
} ORDER BY ?b
)
e.select(
SELECT ?type {
?type a rdfs:Datatype .
} LIMIT 10
)
g.bind("type","http://dbpedia.org/datatype/")
e=LocalEndpoint(g)
e.select(
SELECT ?type {
?type a rdfs:Datatype .
} LIMIT 10
)
e.select(
SELECT ?p ?o {
type:lightYear ?p ?o .
}
)
e.select(
SELECT ?p (COUNT(*) AS ?cnt) {
?s a rdfs:Datatype .
?s ?p ?o .
} GROUP BY ?p
)
e.select(
SELECT ?type (COUNT(*) AS ?cnt) {
?p rdfs:range ?type .
?type a rdfs:Datatype .
} GROUP BY ?type ORDER BY DESC(?cnt)
)
len(_)
e.select(
SELECT ?type {
?type a rdfs:Datatype .
MINUS { ?s ?p ?type }
} LIMIT 20
)
e.select(
SELECT ?p {
?p rdfs:range type:kilogram
}
)
counts=e.select(
SELECT ?type (COUNT(*) AS ?cnt) {
?s a ?type .
FILTER (?type IN (rdf:Property,owl:DatatypeProperty,owl:ObjectProperty))
} GROUP BY ?type ORDER BY DESC(?cnt)
)["cnt"]
counts
counts["rdf:Property"]
counts["owl:DatatypeProperty"]+counts["owl:ObjectProperty"]
e.select(
SELECT ?klasse {
?klasse a owl:DatatypeProperty .
?klasse a owl:ObjectProperty .
}
)
e.select(
SELECT ?klasse {
?klasse a owl:DatatypeProperty .
MINUS {?klasse a rdf:Property}
}
)
e.select(
SELECT ?klasse {
?klasse a owl:ObjectProperty .
MINUS {?klasse a rdf:Property}
}
)
e.select(
SELECT ?p {
?p a rdf:Property .
MINUS {
{ ?p a owl:DatatypeProperty }
UNION
{ ?p a owl:ObjectProperty }
}
}
)
galaxyMass=URIRef("http://dbpedia.org/ontology/Galaxy/mass")
e.select(
SELECT ?p {
?_galaxyMass rdfs:subPropertyOf ?p .
}
)
e.select(
SELECT ?from ?to {
?from rdfs:subPropertyOf ?to .
}
)
e.select(
SELECT ?from ?to {
?from rdfs:subPropertyOf ?to .
FILTER(!STRSTARTS(STR(?from),"http://dbpedia.org/ontology/"))
}
)
e.select(
SELECT ?from ?to {
?from rdfs:subPropertyOf ?to .
FILTER(!STRSTARTS(STR(?to),"http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#"))
}
)
e.select(
SELECT ?to (COUNT(*) AS ?cnt) {
?from rdfs:subPropertyOf ?to .
FILTER(!STRSTARTS(STR(?to),"http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#"))
} GROUP BY ?to ORDER BY DESC(?cnt)
)
e.select(
SELECT ?about ?from {
?from
rdfs:subPropertyOf dbo:code ;
rdfs:domain ?about .
}
)
e.select(
SELECT ?about ?from {
?from
rdfs:subPropertyOf dbo:closeTo ;
rdfs:domain ?about .
}
)
e.select(
SELECT ?to (COUNT(*) AS ?cnt) {
?from rdfs:subPropertyOf ?to .
FILTER(STRSTARTS(STR(?to),"http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#"))
} GROUP BY ?to ORDER BY DESC(?cnt)
)
e.select(
SELECT ?domain ?p ?range {
?p
rdfs:subPropertyOf dul:isPartOf ;
rdfs:domain ?domain ;
rdfs:range ?range .
}
)
e.select(
SELECT ?a ?b {
?a owl:equivalentProperty ?b
}
)
g.bind("wikidata","http://www.wikidata.org/entity/")
e=LocalEndpoint(g)
e.select(
SELECT ?a ?b {
?a owl:equivalentProperty ?b
FILTER(!STRSTARTS(STR(?b),"http://www.wikidata.org/entity/"))
}
)
e.select(
SELECT ?a ?b {
?a owl:equivalentProperty ?b
FILTER(STRSTARTS(STR(?b),"http://dbpedia.org/ontology/"))
}
)
e.namespaces()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading the graph
Step2: Now it is loaded in memory in an RDF graph which I can do SPARQL queries on; think of it as a hashtable on steroids. I can get the size of the graph (number of triples) the same way I would get the size of any Python object
Step3: The Graph is supplied by RDFLib, but I wrap it in an Endpoint object supplied by Gastrodon; this provides a bridge between RDFLib and pandas as well as smoothing away the difference between a local endpoint and remote endpoints (a SPARQL database running in another process or on another computer)
Step5: Counting properties and discovering namespaces
Step7: Note that the leftmost column is bold; this is because gastrodon recognized that this query groups on the ?p variable and it made this an index of the pandas dataframe. Gastrodon uses the SPARQL parser from RDFLib to understand your queries to support you in writing and displaying them. One advantage of this is that if you want to make a plot from the above data frame (which I'll do in a moment after cleaning the data) the dependent and independent variables will be automatically determined and things will 'just work'.
Step9: Metadata about the DBpedia Ontology
Step10: The one function will extract the single member of any list, iterable, DataFrame, or Series that has just one member.
Step12: The select function can see variables in the stack frame that calls it. Simply put, if you use the ?_ontology variable in a SPARQL query, select will look for a Python variable called ontology, and substitute the value of ontology into ?_ontology. The underscore sigil prevents substitutions from happening by accident.
Step13: How Gastrodon handles URI References
Step14: that's because it is a string! It's more than a string, however, it is a class which is a subclass of string
Step15: and in fact has the full URI reference hidden away inside of it
Step17: When you access this value in a SPARQL query, the select function recognizes the type of the variable and automatically inserts the full URI reference
Step19: Counting properties that are not about the Ontology
Step20: At this point it is about as easy to make a pie chart as it is with Excel. A pie chart is a good choice here because each fact has exactly one property in it
Step21: My favorite method for understanding this kind of distribution is to sort the most common properties first and then compute the Cumulative Distribution Function, which is the percentage of facts that have used the predicates we've seen so far.
Step22: Note that this result looks different than the DataFrames you've seen so far because it is not a DataFrame, it is a series, which has just one index column and one data column. It's possible to stick several series together to make a DataFrame, however.
Step24: Unlike many graphical depictions, the above chart is fair to both highly common and unusually rare predicates.
Step26: A detail you might notice is that the lang column is not bolded, instead, a sequential numeric index was created when I made the data frame. This is because Gastrodon, at this moment, isn't smart enough to understand a function that appears in the GROUP BY clause.
Step28: One key to getting correct results in a data analysis is to test your assumptions. English is the most prevalent language by far, but can we assume that every object has an English name? There are 3593 objects with English labels, but
Step30: objects with labels overall, so there must be at least one object without an English label. SPARQL has negation operators so we can find objects like that
Step32: Looking up all the facts for that object (which is a property used in DBpedia) shows that it has a name in greek, but not any other language
Step33: I guess that's the exception that proves the rule. Everything else has a name in English, about half of the schema objects have a name in German, and the percentage falls off pretty rapidly from there
Step34: As the percentages add up to more than 100 (an object can have names in many languages), the pie chart would be a wrong choice, but a bar chart is effective.
Step36: Classes used in the DBpedia Ontology
Step38: 739 classes are really a lot of classes! You personally might be interested in some particular domain (say Pop Music) but to survey the whole thing, I need some way to pick out classes which are important.
Step40: Adding another namespace binding makes sense to make the output more managable
Step42: Common properties for People
Step44: To make something that looks like a real report, I reach into my bag of tricks.
Step46: Towards a simple schema browser
Step47: Note that the select here can see variables in the immediately enclosing scope, that is, the function definition. As it is inside a function definition, it does not see variables defined in the Jupyter notebook. The handling of missing values is a big topic in Pandas, so I take the liberty of highlighting the label that is missing in German.
Step48: In Japanese, a different set of labels is missing. It's nice to see that Unicode characters outside the latin-1 codepage work just fine.
Step49: And of course it can be fun to look at other classes and languages
Step51: About "prov
Step53: Subclasses
Step55: SPARQL 1.1 has property path operators that will make the query engine recurse through multiple rdfs
Step58: The previous queries work "down" from a higher-level class, but by putting a '^' before the property name, I can reverse the direction of traversal, to find all topics which dbo
Step60: The same outcome can be had by switching the subject and object positions in the triple
Step62: Equivalent Classes
Step64: Here are all of the equivalencies between the DBpedia Ontology and schema.org.
Step66: Many of these are as you would expect, but there are some that are not correct, given the definition of owl
Step68: I think you'd agree that an Automobile is a Product, but that a Product is not necessarily an automobile. In these cases,
Step70: To keep track of them all, I add a few more namespace declarations.
Step72: The mapping from dbo
Step75: If two classes are disjoint, that means that nothing can be an instance of both things. For instance, a Fish cannot be a Mammal, a Person is not a building, etc. These sort of facts are helpful for validation, but one should resist the impulse to make statements of disjointness which aren't strictly true. (For instance, it would be unlikely, but not impossible, to be the winner of both a Heisman Trophy and a Fields Metal, so these are not disjoint categories.)
Step77: The information about these data types are currently sparse
Step79: These turn out to be the only properties that any datatypes have; pretty clearly, datatypes are not labeled in the rich set of languages that properties and classes are labeled in. (Note that vocabulary exists in RDFS and OWL for doing just that, such as specifying that type
Step81: Another approach is to look at how datatypes get used, that is, how frequently various datatypes are used as the range of a property.
Step83: Out of 382 properties, only 41 actually appear as the range of the properties in the schema. Here are a few properties that are unused in the schema.
Step85: According to the DBpedia Ontology documentation, there are two kinds of datatype declarations in mappings. In some cases the unit is explictly specified in the mapping field (ex. a field that contains a length is specified in meters) and in other cases, a particular datatype is specific to the field.
Step87: One unfortunate thing is that the DBpedia ontology sometimes composes property URIs by putting together the class (ex. "Galaxy") and the property (ex. "mass") with a slash between them. Slash is not allowed in a localname, which means that you can't write ontology
Step89: The sums don't match.
Step92: However, there are cases where a property is registered as an OWL property but not as an RDFS property
Step94: However, there are no properties defined as an RDFS property that are not defined in OWL.
Step96: Conclusion
Step98: ... and don't find it. That's not really a problem, because this I can always add one by adding a few more facts to my copy of the DBpedia Ontology. Let's see what is really there...
Step100: It looks like terms on the left are always part of the DBpedia Ontology
Step102: Terms on the right are frequently part of the http
Step104: Out of those 75 relationships, I bet many of them point to the same superproperties
Step106: The most common superproperty is dbo
Step108: Looking at the superproperty dbo
Step110: Looking a the superproperties in DUL, these look much like the kind of properties one would expect to
Step112: A really common kind of property is a "part-of" relationship, known as meronymy if you like greek.
Step114: Equivalent Property
Step115: Many of these properties are from Wikidata, so it probably makes sense to bind a namespace for Wikidata.
Step117: This kind of equivalency with Wikidata is meaningful precisely because DBpedia and Wikidata are competitive (and cooperative) databases that cover the same domain. Let's take a look at equivalencies to databases other than Wikidata
Step119: The vast number of those link to schema.org, except for a handful which link to other DBpedia Ontology properties.
Step120: The quality of these equivalencies are questionable to me; for instance, in geography, people often publish separate "land area" and "water areas" for a region. Still, out of 30,000 facts, I've seen fewer than 30 that looked obviously wrong
|
3,861
|
<ASSISTANT_TASK:>
Python Code:
icon_create_function = \
function(cluster) {
return L.divIcon({
html: '<b>' + cluster.getChildCount() + '</b>',
className: 'marker-cluster marker-cluster-large',
iconSize: new L.Point(20, 20)
});
}
from folium.plugins import MarkerCluster
m = folium.Map(
location=[np.mean(lats), np.mean(lons)], tiles="Cartodb Positron", zoom_start=1
)
marker_cluster = MarkerCluster(
locations=locations,
popups=popups,
name="1000 clustered icons",
overlay=True,
control=True,
icon_create_function=icon_create_function,
)
marker_cluster.add_to(m)
folium.LayerControl().add_to(m)
m
%%time
m = folium.Map(
location=[np.mean(lats), np.mean(lons)],
tiles='Cartodb Positron',
zoom_start=1
)
marker_cluster = MarkerCluster(
name='1000 clustered icons',
overlay=True,
control=False,
icon_create_function=None
)
for k in range(size):
location = lats[k], lons[k]
marker = folium.Marker(location=location)
popup = 'lon:{}<br>lat:{}'.format(location[1], location[0])
folium.Popup(popup).add_to(marker)
marker_cluster.add_child(marker)
marker_cluster.add_to(m)
folium.LayerControl().add_to(m);
m
from folium.plugins import FastMarkerCluster
%%time
m = folium.Map(
location=[np.mean(lats), np.mean(lons)],
tiles='Cartodb Positron',
zoom_start=1
)
FastMarkerCluster(data=list(zip(lats, lons))).add_to(m)
folium.LayerControl().add_to(m);
m
callback = \
function (row) {
var icon, marker;
icon = L.AwesomeMarkers.icon({
icon: "map-marker", markerColor: "red"});
marker = L.marker(new L.LatLng(row[0], row[1]));
marker.setIcon(icon);
return marker;
};
m = folium.Map(
location=[np.mean(lats), np.mean(lons)], tiles="Cartodb Positron", zoom_start=1
)
FastMarkerCluster(data=list(zip(lats, lons)), callback=callback).add_to(m)
m
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Adding all icons in a single call
Step2: Explicit loop allow for customization in the loop.
Step4: FastMarkerCluster is not as flexible as MarkerCluster but, like the name suggests, it is faster.
|
3,862
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from math import atan2, sqrt, sin, cos, pi
import re
X, Y, Z, MX, MY, MZ = 0, 1, 2, 3, 4, 5 # indices in coord dictionary items
RO = 180 * 3600 / pi
# initial coordinates
coo1 = {'K1': [ 0.0, 5.9427, 0.9950],
'K2': [ 6.0242, 0.0, 1.3998],
'K3': [ 9.7954, 5.3061, 1.8230],
'K4': [17.9716, 5.2726, 1.8389],
'K5': [31.6363, 5.5274, 1.0126],
'K6': [33.2002, 7.0923, 1.1090],
'K7': [35.9246, 14.5219, 1.3326],
'K8': [40.6884, 21.0337, 1.4709],
'K9': [32.501, 22.8658, 1.6797]
}
# coordinatess from the second adjustment
coo2 = {'K1': [ 0.0002, 5.9422, 0.9948],
'K2': [ 6.0252, -0.0006, 1.3997],
'K3': [ 9.7959, 5.3061, 1.8230],
'K4': [17.9716, 5.2729, 1.8389],
'K5': [31.6366, 5.5280, 1.0129],
'K6': [33.1994, 7.0916, 1.1091],
'K7': [35.9235, 14.5207, 1.3327],
'K8': [40.6888, 21.0319, 1.4711],
'K9': [32.2494, 22.8644, 1.6799]
}
def tr3(src, dst, x0):
Three parameter orthogonal transformation
:param src: dictionary of source points and coordinates
:param dst: dictionary of target points and coordinates
:param x0: preliminary transformation parameter values
:returns: x_offset y_offset rotation
# find common points
s = set(src.keys())
d = set(dst.keys())
common = s.intersection(d)
n = len(common)
A = np.zeros((2*n, 3))
l = np.zeros(2*n)
i = 0
# set up equations
for key in common:
A[i] = np.array([1.0, 0.0, -src[key][X] * sin(x0[2]) -
src[key][Y] * cos(x0[2])])
l[i] = dst[key][X] - (x0[0] + src[key][X] * cos(x0[2]) -
src[key][Y] * sin(x0[2]))
i += 1
A[i] = np.array([0.0, 1.0, src[key][X] * cos(x0[2]) -
src[key][Y] * sin(x0[2])])
l[i] = dst[key][1] - (x0[1] + src[key][X] * sin(x0[2]) +
src[key][Y] * cos(x0[2]))
i += 1
# solve equation
ATA = np.dot(A.transpose(), A)
ATl = np.dot(A.transpose(), l)
param = np.linalg.solve(ATA, ATl) # x0, y0, rotation
v = np.dot(A, param+x0) - l # corrections
return param + x0, v
def sel(coo, keys):
select points from coordinate list based on point IDs or regexp
:param coo: dictionary with coordinates
:param keys: dictionary keys/point IDS to select or a regexp for point ids
if isinstance(keys, str):
r = re.compile(keys)
w = list(filter(r.search, coo.keys()))
else:
w = keys
return {k : coo[k] for k in w if k in coo}
def coo_tr(coo, param):
transform coordinates in coo using transformation parameters
:param coo: dictionary of coordinates to transform
:param param: transformation parameters x0, y0, alfa, scale
if len(param) == 4:
x0, y0, alpha, scale = param
else:
x0, y0, alpha = param
scale = 1.0
return {k: [x0 + coo[k][X] * scale * cos(alpha) - coo[k][Y] * scale * sin(alpha),
y0 + coo[k][X] * scale * sin(alpha) + coo[k][Y] * scale * cos(alpha),
coo[k][Z]] for k in coo}
key_list = list(coo1.keys())
n_key = len(key_list)
min_v = 1e38
print('P1 P2 X0 Y0 Alpha" sum(|v|)')
print('----------------------------------------------')
for i in range(n_key):
k1 = key_list[i]
for j in range(i+1, n_key):
k2 = key_list[j]
p, v = tr3(sel(coo1, [k1, k2]), sel(coo2, [k1, k2]), [0.0, 0.0, 0.0])
coo1_tr = coo_tr(coo1, p)
sum_v = 0
# calculate sum of absolute value of corrections
for k in coo1:
sum_v += abs(coo1_tr[k][X] - coo2[k][X]) + \
abs(coo1_tr[k][Y] - coo2[k][Y])
if sum_v < min_v:
opt = [k1, k2, p, sum_v]
min_v = sum_v
print(f'{k1:4s} {k2:4s} {p[0]:8.3f} {p[1]:8.3f} {p[2] * RO:6.1f} {sum_v:8.3f}')
print('optimal:')
print(f'{opt[0]:4s} {opt[1]:4s} {opt[2][0]:8.3f} {opt[2][1]:8.3f} {opt[2][2] * RO:6.1f} {opt[3]:8.3f}')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sample data
Step3: A function to calculate three parameter transformation based on commmon points. The point coordinates are stored in dictionaries, the key is the point ID/name and each dictionary item stores a list of coordinates [x, y, z].
Step5: Function to select two point from all points.
Step7: Apply the transformation parameters to points.
Step8: Iterating the transformation using two points in all combinations.
|
3,863
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append("..")
import splitwavepy as sw
import matplotlib.pyplot as plt
import numpy as np
data = sw.Pair(noise=0.05,pol=40,delta=0.1)
data.plot()
data.split(40,1.6)
data.plot()
data.unsplit(80,1.6)
data.plot()
# Let's start afresh, and this time put the splitting on straight away.
data = sw.Pair(delta=0.1,noise=0.01,pol=40,fast=80,lag=1.2)
# plot power in signal
fig, ax1 = plt.subplots()
ax1.plot(data.t(),data.power())
# generate a window
window = data.window(25,12,tukey=0.1)
# window = sw.Window(data.centre(),150)
ax2 = ax1.twinx()
ax2.plot(data.t(),window.asarray(data.t().size),'r')
plt.show()
data.plot(window=window)
# Now repreat but this time apply loads of splitting and see the energy broaden
data = sw.Pair(delta=0.1,noise=0.01,pol=40,fast=80,lag=5.2)
# plot power in signal
fig, ax1 = plt.subplots()
ax1.plot(data.t(),data.power())
# generate a window
window = data.window(25,12,tukey=0.1)
# window = sw.Window(data.centre(),150)
ax2 = ax1.twinx()
ax2.plot(data.t(),window.asarray(data.t().size),'r')
plt.show()
data.plot(window=window)
# large window
largewindow = data.window(23,24,tukey=0.1)
data.plot(window=largewindow)
# sparse search
tlags = np.linspace(0,7.0,60)
degs = np.linspace(-90,90,60)
M = sw.EigenM(tlags=tlags,degs=degs,noise=0.03,fast=112,lag=5.3,delta=0.2)
M.plot()
# dense search
# tlags = np.linspace(0.,7.0,200)
# degs = np.linspace(0,180,200)
# M = sw.EigenM(M.data,tlags=tlags,degs=degs)
# M.plot()
M.tlags
M = sw.EigenM(delta=0.1,noise=0.02,fast=60,lag=1.3)
M.plot()
np.linspace(0,0.5,15)
p = sw.Pair(delta=0.1,pol=30,fast=30,lag=1.2,noise=0.01)
p.plot()
p.angle
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's add a bit of splitting. Note, this shortens trace length slightly. And the pulse is still at the centre.
Step2: Measuring shear wave splitting involves searching for the splitting parameters that, when removed from the data, best linearise the particle motion. We know the splitting parameters so no need to search. Let's just confirm that when we undo the splitting we get linearised particle motion. Again, this shortens the trace, and the pulse is still at the centre.
Step3: The window
Step4: The measurement
|
3,864
|
<ASSISTANT_TASK:>
Python Code:
from ecell4 import *
A = Species("A")
B = Species("B")
A = Species("A")
A.set_attribute("radius", "0.005")
A.set_attribute("D", "1")
A.set_attribute("location", "cytoplasm")
A = Species("A", "0.005", "1", "cytoplasm") # XXX: serial, radius, D, location
print(A.serial()) # will return 'A'
print(A.get_attribute("D")) # will return '1'
rr = ReactionRule()
rr.add_reactant(Species("A"))
rr.add_product(Species("B"))
rr.set_k(1.0)
rr = ReactionRule()
rr.add_reactant(Species("A"))
rr.add_reactant(Species("B"))
rr.add_product(Species("C"))
rr.set_k(1.0)
rr1 = create_unimolecular_reaction_rule(Species("A"), Species("B"), 1.0)
rr2 = create_binding_reaction_rule(Species("A"), Species("B"), Species("C"), 1.0)
rr3 = create_binding_reaction_rule(Species("C"), Species("A"), Species("B"), 1.5)
print(rr3.as_string()) # will return 'C+A>B|1.5'
sp1 = Species("A", "0.005", "1")
sp2 = Species("B", "0.005", "1")
sp3 = Species("C", "0.01", "0.5")
rr1 = create_binding_reaction_rule(Species("A"), Species(b"B"), Species("C"), 0.01)
rr2 = create_unbinding_reaction_rule(Species("C"), Species("A"), Species("B"), 0.3)
m = NetworkModel()
m.add_species_attribute(sp1)
m.add_species_attribute(sp2)
m.add_species_attribute(sp3)
m.add_reaction_rule(rr1)
m.add_reaction_rule(rr2)
print(m.species_attributes())
print(m.reaction_rules())
m.num_reaction_rules()
# will return like:
# [<ecell4.core.Species object at 0x7f36443bfa98>, <ecell4.core.Species object at 0x7f36443bfab0>, <ecell4.core.Species object at 0x7f36443bfac8>]
# [<ecell4.core.ReactionRule object at 0x7f36443bfa98>, <ecell4.core.ReactionRule object at 0x7f36443bfab0>]
# 2
%matplotlib inline
# XXX: 'm' is a NetworkModel, which is described in Model tutorial (http://nbviewer.ipython.org/github/ecell/ecell4/blob/develop/ipynb/Tutorials/ModelBasics.ipynb) .
import numpy
t = numpy.linspace(0, 10, 100) # [0.0, 0.1, 0.2, ..., 9.9, 10.0]
y = run_simulation(t, {'C': 60}, volume=1.0, model=m)
y = run_simulation(t, {'C': 60}, volume=1.0, model=m, solver='gillespie')
del A
del B
with reaction_rules():
A + B > C | 0.01 # equivalent to create_binding_reaction_rule
C > A + B | 0.3 # equivalent to create_unbinding_reaction_rule
m = get_model()
with reaction_rules():
A + B == C | (0.01, 0.3)
y = run_simulation(numpy.linspace(0, 10, 100), {'C': 60}, volume=1.0)
with reaction_rules():
A > ~A | 1.0 # XXX: create_degradation_reaction_rule
~A > A | 1.0 # XXX: create_synthesis_reaction_rule
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: each expression describes a Species named A or B.
Step2: The 1st argument for set_attribute is the name of attribute.
Step3: When you want to inspect the Species attributes, please write as follows.
Step4: ReactionRule
Step5: Now you created a reaction from A to B. In this reaction definition, you don't need to assign values to Species.
Step6: A Binding A and B creates C. There are utility functions for binding and unbinding ReactionRules.
Step7: When you want to inspect the ReactionRule, please use as_string function.
Step8: NetworkModel
Step9: To add a Species, use add_species_attribute.
Step10: NetworkModel also contains Species attributes. These attributes are indispensable for particle and lattice simulations, but not necessarily needed for gillespie and ode.
Step11: run_simulation records the number of molecules for time t and plots the time-series simulation result.
Step12: You can simalate a model with different methods like this.
Step13: Please use with statement for E-Cell4 special notation.
Step14: $$\frac{\mathrm{d[A]}}{\mathrm{d}t}=\frac{\mathrm{d[B]}}{\mathrm{d}t}=-0.01\mathrm{[A][B]}+0.3\mathrm{[C]}\
|
3,865
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import matplotlib.pyplot as plt
import numpy as np
#import widgets
from ipywidgets import widgets
mywidget = widgets.FloatSlider()
display(mywidget)
print mywidget.value
def on_value_change(name, value):
print(value)
int_range = widgets.IntSlider(min=0, max=10, step=2)
int_range.observe(on_value_change, 'value')
display(int_range)
def click_handler(widget):
print "clicked"
b = widgets.Button(description='Click Me!')
b.on_click(click_handler)
display(b)
from ipywidgets import interact, interactive # Widget definitions
def linear_plot(m=0.5, b=27.0):
'''
Create a plot of some data that should
vary linearly along with a straight line
function with the given slope and intercept.
'''
#data to optimize
datax = np.array([1.0,2.0,3.0,5.0,7.0,9.0])
datay = np.array([10.2, 20.5, 24.8, 30.7, 33.6, 37.3])
erry = np.array([1.0,0.5,2.6,1.0,5.6,6.0])
#plot the data
plt.errorbar(datax,datay,xerr=0.0,yerr=erry,fmt='o')
#create a function to approximate the data using the slope
#and intercept parameters passed to the function
steps = 100
x = np.linspace(0,10.,steps)
y = m*x+b
#plot and show the result
plt.plot(x,y)
plt.xlim(0.,10.)
plt.ylim(0.,50.)
plt.show()
#Create an interactive plot with sliders for varying the slope and intercept
v = interact(linear_plot,m=(0.0,5.0), b=(0.0,50.0))
#Human optimizer results
#Best slope (m):
#Best y-intercept (b):
def scatter_plot(r=0.5, n=27):
t = np.random.uniform(0.0,2.0*np.pi,n)
rad = r*np.sqrt(np.random.uniform(0.0,1.0,n))
x = np.empty(n)
y = np.empty(n)
x = rad*np.cos(t)
y = rad*np.sin(t)
fig = plt.figure(figsize=(4,4),dpi=80)
plt.scatter(x,y)
plt.xlim(-1.,1.)
plt.ylim(-1.,1.)
plt.show()
v2 = interact(scatter_plot,r=(0.0,1.0), n=(1,1000))
def sin_plot(A=5.0,f1=5.0,f2=10.):
x = np.linspace(0,2*np.pi,1000)
#pure sine curve
y = A*np.sin(f1*x)
#superposition of sine curves with different frequency
#but same amplitude
y2 = A*(np.sin(f1*x)+np.sin(f2*x))
plt.plot(x,y,x,y2)
plt.xlim(0.,2.*np.pi)
plt.ylim(-10.,10.)
plt.grid()
plt.show()
v3 = interact(sin_plot,A=(0.,10.), f1=(1.0,10.0), f2=(1.0,10.0))
#Your code here
def lissajous_plot(a1=0.5,f1=1.,p1=0.,a2=0.5,f2=1.,p2=0.):
t = np.linspace(0, 20*np.pi, 5000)
x = a1*np.sin(f1*(t+p1))
y = a2*np.cos(f2*(t+p2))
plt.plot(x,y)
plt.xlim(-1.,1.)
plt.ylim(-1.,1.)
plt.show()
v4 = interact(lissajous_plot,a1=(0.,1.), f1=(1.0,4.0), p1=(0.,2*np.pi),
a2=(0.,1.),f2=(1.0,4.0),p2=(0.,2*np.pi))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Widgets
Step2: A simple slider
Step3: You can slide the slider back and forth and then "get" the current value from the widget object with
Step4: Here's a variation on that where the current value is printed when the slider is moved. Play around with it.
Step5: You may want to handle the input provided by a widget with a handler when someone interacts with a widget and then do something. Try this one
Step6: Interact/Interactive
Step7: To use them, we define a function that creates a plot which depends on the values of parameters passed to the function. Then you can create an interact object with the function and specified ranges for those parameters. The interact object displayed will give you sliders (widgets) that let you vary the input parameters and interactively see how they change the plot!
Step8: Spend a few minutes trying to find the "best fit" line to this data and record the corresponding slope and intercept values for that line here.
Step9: Random scatter
Step10: Sinusoids
Step11: Exercise
Step12: Lissajous Curves
|
3,866
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import matplotlib.pyplot as plt
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
# use just 60 seconds of data and mag channels, to save memory
raw.crop(0, 60).pick_types(meg='mag', stim=True).load_data()
raw.plot(duration=60, proj=False, n_channels=len(raw.ch_names),
remove_dc=False)
for cutoff in (0.1, 0.2):
raw_highpass = raw.copy().filter(l_freq=cutoff, h_freq=None)
fig = raw_highpass.plot(duration=60, proj=False,
n_channels=len(raw.ch_names), remove_dc=False)
fig.subplots_adjust(top=0.9)
fig.suptitle('High-pass filtered at {} Hz'.format(cutoff), size='xx-large',
weight='bold')
filter_params = mne.filter.create_filter(raw.get_data(), raw.info['sfreq'],
l_freq=0.2, h_freq=None)
mne.viz.plot_filter(filter_params, raw.info['sfreq'], flim=(0.01, 5))
def add_arrows(axes):
# add some arrows at 60 Hz and its harmonics
for ax in axes:
freqs = ax.lines[-1].get_xdata()
psds = ax.lines[-1].get_ydata()
for freq in (60, 120, 180, 240):
idx = np.searchsorted(freqs, freq)
# get ymax of a small region around the freq. of interest
y = psds[(idx - 4):(idx + 5)].max()
ax.arrow(x=freqs[idx], y=y + 18, dx=0, dy=-12, color='red',
width=0.1, head_width=3, length_includes_head=True)
fig = raw.plot_psd(fmax=250, average=True)
add_arrows(fig.axes[:2])
meg_picks = mne.pick_types(raw.info, meg=True)
freqs = (60, 120, 180, 240)
raw_notch = raw.copy().notch_filter(freqs=freqs, picks=meg_picks)
for title, data in zip(['Un', 'Notch '], [raw, raw_notch]):
fig = data.plot_psd(fmax=250, average=True)
fig.subplots_adjust(top=0.85)
fig.suptitle('{}filtered'.format(title), size='xx-large', weight='bold')
add_arrows(fig.axes[:2])
raw_notch_fit = raw.copy().notch_filter(
freqs=freqs, picks=meg_picks, method='spectrum_fit', filter_length='10s')
for title, data in zip(['Un', 'spectrum_fit '], [raw, raw_notch_fit]):
fig = data.plot_psd(fmax=250, average=True)
fig.subplots_adjust(top=0.85)
fig.suptitle('{}filtered'.format(title), size='xx-large', weight='bold')
add_arrows(fig.axes[:2])
raw_downsampled = raw.copy().resample(sfreq=200)
for data, title in zip([raw, raw_downsampled], ['Original', 'Downsampled']):
fig = data.plot_psd(average=True)
fig.subplots_adjust(top=0.9)
fig.suptitle(title)
plt.setp(fig.axes, xlim=(0, 300))
current_sfreq = raw.info['sfreq']
desired_sfreq = 90 # Hz
decim = np.round(current_sfreq / desired_sfreq).astype(int)
obtained_sfreq = current_sfreq / decim
lowpass_freq = obtained_sfreq / 3.
raw_filtered = raw.copy().filter(l_freq=None, h_freq=lowpass_freq)
events = mne.find_events(raw_filtered)
epochs = mne.Epochs(raw_filtered, events, decim=decim)
print('desired sampling frequency was {} Hz; decim factor of {} yielded an '
'actual sampling frequency of {} Hz.'
.format(desired_sfreq, decim, epochs.info['sfreq']))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Background on filtering
Step2: A half-period of this slow drift appears to last around 10 seconds, so a full
Step3: Looks like 0.1 Hz was not quite high enough to fully remove the slow drifts.
Step4: Notice that the output is the same as when we applied this filter to the data
Step5: Power line noise
Step6: It should be evident that MEG channels are more susceptible to this kind of
Step7:
Step8: Resampling
Step9: Because resampling involves filtering, there are some pitfalls to resampling
|
3,867
|
<ASSISTANT_TASK:>
Python Code:
#$HIDE$
import pandas as pd
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
print("Setup Complete")
# Path of the file to read
spotify_filepath = "../input/spotify.csv"
# Read the file into a variable spotify_data
spotify_data = pd.read_csv(spotify_filepath, index_col="Date", parse_dates=True)
# Line chart
plt.figure(figsize=(12,6))
sns.lineplot(data=spotify_data)
# Change the style of the figure to the "dark" theme
sns.set_style("dark")
# Line chart
plt.figure(figsize=(12,6))
sns.lineplot(data=spotify_data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We'll work with the same code that we used to create a line chart in a previous tutorial. The code below loads the dataset and creates the chart.
Step2: We can quickly change the style of the figure to a different theme with only a single line of code.
|
3,868
|
<ASSISTANT_TASK:>
Python Code:
# Import py_entitymatching package
import py_entitymatching as em
import os
import pandas as pd
# Get the datasets directory
datasets_dir = em.get_install_path() + os.sep + 'datasets'
# Get the paths of the input tables
path_A = datasets_dir + os.sep + 'person_table_A.csv'
path_B = datasets_dir + os.sep + 'person_table_B.csv'
# Read the CSV files and set 'ID' as the key attribute
A = em.read_csv_metadata(path_A, key='ID')
B = em.read_csv_metadata(path_B, key='ID')
atypes1 = em.get_attr_types(A)
atypes2 = em.get_attr_types(B)
atypes1.keys()
atypes1['birth_year'], atypes1['hourly_wage'], atypes1['address'], atypes1['name'], atypes1['zipcode']
atypes2['birth_year'], atypes2['hourly_wage'], atypes2['address'], atypes2['name'], atypes2['zipcode']
block_c = em.get_attr_corres(A, B)
block_c.keys()
id(A), id(block_c['ltable']), id(B), id(block_c['rtable'])
block_c['corres']
# for blocking
tok = em.get_tokenizers_for_blocking()
# for matching
# tok = em.get_tokenizers_for_matching()
tok
# for blocking
sim = em.get_sim_funs_for_blocking()
# for matching
# sim = em.get_sim_funs_for_matching()
sim
feature_table = em.get_features(A, B, atypes1, atypes2, block_c, tok, sim)
feature_table.head()
type(feature_table)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then, read the (sample) input tables for blocking purposes.
Step2: Generating Features for Manually
Step3: Getting Attribute Correspondences
Step4: Getting Tokenizers
Step5: Getting Similarity Functions
Step6: Getting Features
|
3,869
|
<ASSISTANT_TASK:>
Python Code:
!git clone https://github.com/benelot/pybullet-gym lib/pybullet-gym
!pip install -e lib/pybullet-gym
import gym
import numpy as np
import pybulletgym
env = gym.make("AntPyBulletEnv-v0")
# we want to look inside
env.render()
# examples of states and actions
print("observation space: ", env.observation_space,
"\nobservations:", env.reset())
print("action space: ", env.action_space,
"\naction_sample: ", env.action_space.sample())
class RandomActor():
def get_action(self, states):
assert len(states.shape) == 1, "can't work with batches"
return env.action_space.sample()
s = env.reset()
rewards_per_step = []
actor = RandomActor()
for i in range(10000):
a = actor.get_action(s)
s, r, done, _ = env.step(a)
rewards_per_step.append(r)
if done:
s = env.reset()
print("done: ", i)
rewards_per_step[100:110]
env.close()
from logger import TensorboardSummaries as Summaries
env = gym.make("AntPyBulletEnv-v0")
env = Summaries(env, "MyFirstWalkingAnt");
state_dim = env.observation_space.shape[0] # dimension of state space (28 numbers)
action_dim = env.action_space.shape[0] # dimension of action space (8 numbers)
import torch
import torch.nn as nn
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super().__init__()
<YOUR CODE>
def get_qvalues(self, states, actions):
'''
input:
states - tensor, (batch_size x features)
actions - tensor, (batch_size x actions_dim)
output:
qvalues - tensor, critic estimation, (batch_size)
'''
qvalues = <YOUR CODE>
assert len(qvalues.shape) == 1 and qvalues.shape[0] == states.shape[0]
return qvalues
# template for TD3; template for SAC is below
class TD3_Actor(nn.Module):
def __init__(self, state_dim, action_dim):
super().__init__()
<YOUR CODE>
def get_action(self, states, std_noise=0.1):
'''
Used to collect data by interacting with environment,
so your have to add some noise to actions.
input:
states - numpy, (batch_size x features)
output:
actions - numpy, (batch_size x actions_dim)
'''
# no gradient computation is required here since we will use this only for interaction
with torch.no_grad():
actions = <YOUR CODE>
assert isinstance(actions, (list,np.ndarray)), "convert actions to numpy to send into env"
assert actions.max() <= 1. and actions.min() >= -1, "actions must be in the range [-1, 1]"
return actions
def get_best_action(self, states):
'''
Will be used to optimize actor. Requires differentiable w.r.t. parameters actions.
input:
states - PyTorch tensor, (batch_size x features)
output:
actions - PyTorch tensor, (batch_size x actions_dim)
'''
actions = <YOUR CODE>
assert actions.requires_grad, "you must be able to compute gradients through actions"
return actions
def get_target_action(self, states, std_noise=0.2, clip_eta=0.5):
'''
Will be used to create target for critic optimization.
Returns actions with added "clipped noise".
input:
states - PyTorch tensor, (batch_size x features)
output:
actions - PyTorch tensor, (batch_size x actions_dim)
'''
# no gradient computation is required here since we will use this only for interaction
with torch.no_grad():
actions = <YOUR CODE>
# actions can fly out of [-1, 1] range after added noise
return actions.clamp(-1, 1)
# template for SAC
from torch.distributions import Normal
class SAC_Actor(nn.Module):
def __init__(self, state_dim, action_dim):
super().__init__()
<YOUR CODE>
def apply(self, states):
'''
For given batch of states samples actions and also returns its log prob.
input:
states - PyTorch tensor, (batch_size x features)
output:
actions - PyTorch tensor, (batch_size x action_dim)
log_prob - PyTorch tensor, (batch_size)
'''
<YOUR CODE>
return actions, log_prob
def get_action(self, states):
'''
Used to interact with environment by sampling actions from policy
input:
states - numpy, (batch_size x features)
output:
actions - numpy, (batch_size x actions_dim)
'''
# no gradient computation is required here since we will use this only for interaction
with torch.no_grad():
# hint: you can use `apply` method here
actions = <YOUR CODE>
assert isinstance(actions, (list,np.ndarray)), "convert actions to numpy to send into env"
assert actions.max() <= 1. and actions.min() >= -1, "actions must be in the range [-1, 1]"
return actions
class ReplayBuffer():
def __init__(self, size):
Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
Note: for this assignment you can pick any data structure you want.
If you want to keep it simple, you can store a list of tuples of (s, a, r, s') in self._storage
However you may find out there are faster and/or more memory-efficient ways to do so.
self._storage = []
self._maxsize = size
# OPTIONAL: YOUR CODE
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, done):
'''
Make sure, _storage will not exceed _maxsize.
Make sure, FIFO rule is being followed: the oldest examples has to be removed earlier
'''
data = (obs_t, action, reward, obs_tp1, done)
storage = self._storage
maxsize = self._maxsize
<YOUR CODE>
# add data to storage
def sample(self, batch_size):
Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
storage = self._storage
<YOUR CODE>
# randomly generate batch_size integers
# to be used as indexes of samples
<YOUR CODE>
# collect <s,a,r,s',done> for each index
return <YOUR CODE>
# <states>, <actions>, <rewards>, <next_states>, <is_done>
exp_replay = ReplayBuffer(10)
for _ in range(30):
exp_replay.add(env.reset(), env.action_space.sample(),
1.0, env.reset(), done=False)
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(
5)
assert len(exp_replay) == 10, "experience replay size should be 10 because that's what maximum capacity is"
def play_and_record(initial_state, agent, env, exp_replay, n_steps=1):
Play the game for exactly n steps, record every (s,a,r,s', done) to replay buffer.
Whenever game ends, add record with done=True and reset the game.
It is guaranteed that env has done=False when passed to this function.
:returns: return sum of rewards over time and the state in which the env stays
s = initial_state
sum_rewards = 0
# Play the game for n_steps as per instructions above
for t in range(n_steps):
# select action using policy with exploration
a = <YOUR CODE>
ns, r, done, _ = env.step(a)
exp_replay.add(s, a, r, ns, done)
s = env.reset() if done else ns
sum_rewards += r
return sum_rewards, s
#testing your code.
exp_replay = ReplayBuffer(2000)
actor = <YOUR ACTOR CLASS>(state_dim, action_dim).to(DEVICE)
state = env.reset()
play_and_record(state, actor, env, exp_replay, n_steps=1000)
# if you're using your own experience replay buffer, some of those tests may need correction.
# just make sure you know what your code does
assert len(exp_replay) == 1000, "play_and_record should have added exactly 1000 steps, "\
"but instead added %i" % len(exp_replay)
is_dones = list(zip(*exp_replay._storage))[-1]
assert 0 < np.mean(is_dones) < 0.1, "Please make sure you restart the game whenever it is 'done' and record the is_done correctly into the buffer."\
"Got %f is_done rate over %i steps. [If you think it's your tough luck, just re-run the test]" % (
np.mean(is_dones), len(exp_replay))
for _ in range(100):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(
10)
assert obs_batch.shape == next_obs_batch.shape == (10,) + (state_dim,)
assert act_batch.shape == (
10, action_dim), "actions batch should have shape (10, 8) but is instead %s" % str(act_batch.shape)
assert reward_batch.shape == (
10,), "rewards batch should have shape (10,) but is instead %s" % str(reward_batch.shape)
assert is_done_batch.shape == (
10,), "is_done batch should have shape (10,) but is instead %s" % str(is_done_batch.shape)
assert [int(i) in (0, 1)
for i in is_dones], "is_done should be strictly True or False"
print("Well done!")
gamma=0.99 # discount factor
max_buffer_size = 10**5 # size of experience replay
start_timesteps = 5000 # size of experience replay when start training
timesteps_per_epoch=1 # steps in environment per step of network updates
batch_size=128 # batch size for all optimizations
max_grad_norm=10 # max grad norm for all optimizations
tau=0.005 # speed of updating target networks
policy_update_freq=<> # frequency of actor update; vanilla choice is 2 for TD3 or 1 for SAC
alpha=0.1 # temperature for SAC
# iterations passed
n_iterations = 0
# experience replay
exp_replay = ReplayBuffer(max_buffer_size)
# models to train
actor = <YOUR ACTOR CLASS>(state_dim, action_dim).to(DEVICE)
critic1 = Critic(state_dim, action_dim).to(DEVICE)
critic2 = Critic(state_dim, action_dim).to(DEVICE)
# target networks: slow-updated copies of actor and two critics
target_critic1 = Critic(state_dim, action_dim).to(DEVICE)
target_critic2 = Critic(state_dim, action_dim).to(DEVICE)
target_actor = TD3_Actor(state_dim, action_dim).to(DEVICE) # comment this line if you chose SAC
# initialize them as copies of original models
target_critic1.load_state_dict(critic1.state_dict())
target_critic2.load_state_dict(critic2.state_dict())
target_actor.load_state_dict(actor.state_dict()) # comment this line if you chose SAC
def update_target_networks(model, target_model):
for param, target_param in zip(model.parameters(), target_model.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
# optimizers: for every model we have
opt_actor = torch.optim.Adam(actor.parameters(), lr=3e-4)
opt_critic1 = torch.optim.Adam(critic1.parameters(), lr=3e-4)
opt_critic2 = torch.optim.Adam(critic2.parameters(), lr=3e-4)
# just to avoid writing this code three times
def optimize(name, model, optimizer, loss):
'''
Makes one step of SGD optimization, clips norm with max_grad_norm and
logs everything into tensorboard
'''
loss = loss.mean()
optimizer.zero_grad()
loss.backward()
grad_norm = nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
# logging
env.writer.add_scalar(name, loss.item(), n_iterations)
env.writer.add_scalar(name + "_grad_norm", grad_norm.item(), n_iterations)
def compute_critic_target(rewards, next_states, is_done):
'''
Important: use target networks for this method! Do not use "fresh" models except fresh policy in SAC!
input:
rewards - PyTorch tensor, (batch_size)
next_states - PyTorch tensor, (batch_size x features)
is_done - PyTorch tensor, (batch_size)
output:
critic target - PyTorch tensor, (batch_size)
'''
with torch.no_grad():
critic_target = <YOUR CODE>
assert not critic_target.requires_grad, "target must not require grad."
assert len(critic_target.shape) == 1, "dangerous extra dimension in target?"
return critic_target
def compute_actor_loss(states):
'''
Returns actor loss on batch of states
input:
states - PyTorch tensor, (batch_size x features)
output:
actor loss - PyTorch tensor, (batch_size)
'''
# make sure you have gradients w.r.t. actor parameters
actions = <YOUR CODE>
assert actions.requires_grad, "actions must be differentiable with respect to policy parameters"
# compute actor loss
actor_loss = <YOUR CODE>
return actor_loss
seed = <YOUR FAVOURITE RANDOM SEED>
np.random.seed(seed)
env.unwrapped.seed(seed)
torch.manual_seed(seed);
from tqdm.notebook import trange
interaction_state = env.reset()
random_actor = RandomActor()
for n_iterations in trange(0, 1000000, timesteps_per_epoch):
# if experience replay is small yet, no training happens
# we also collect data using random policy to collect more diverse starting data
if len(exp_replay) < start_timesteps:
_, interaction_state = play_and_record(interaction_state, random_actor, env, exp_replay, timesteps_per_epoch)
continue
# perform a step in environment and store it in experience replay
_, interaction_state = play_and_record(interaction_state, actor, env, exp_replay, timesteps_per_epoch)
# sample a batch from experience replay
states, actions, rewards, next_states, is_done = exp_replay.sample(batch_size)
# move everything to PyTorch tensors
states = torch.tensor(states, device=DEVICE, dtype=torch.float)
actions = torch.tensor(actions, device=DEVICE, dtype=torch.float)
rewards = torch.tensor(rewards, device=DEVICE, dtype=torch.float)
next_states = torch.tensor(next_states, device=DEVICE, dtype=torch.float)
is_done = torch.tensor(
is_done.astype('float32'),
device=DEVICE,
dtype=torch.float
)
# losses
critic1_loss = <YOUR CODE>
optimize("critic1", critic1, opt_critic1, critic1_loss)
critic2_loss = <YOUR CODE>
optimize("critic2", critic2, opt_critic2, critic2_loss)
# actor update is less frequent in TD3
if n_iterations % policy_update_freq == 0:
actor_loss = <YOUR CODE>
optimize("actor", actor, opt_actor, actor_loss)
# update target networks
update_target_networks(critic1, target_critic1)
update_target_networks(critic2, target_critic2)
update_target_networks(actor, target_actor) # comment this line if you chose SAC
def evaluate(env, actor, n_games=1, t_max=1000):
'''
Plays n_games and returns rewards and rendered games
'''
rewards = []
for _ in range(n_games):
s = env.reset()
R = 0
for _ in range(t_max):
# select action for final evaluation of your policy
action = <YOUR CODE>
assert (action.max() <= 1).all() and (action.min() >= -1).all()
s, r, done, _ = env.step(action)
R += r
if done:
break
rewards.append(R)
return np.array(rewards)
# evaluation will take some time!
sessions = evaluate(env, actor, n_games=20)
score = sessions.mean()
print(f"Your score: {score}")
assert score >= 1000, "Needs more training?"
print("Well done!")
env.close()
env = gym.make("AntPyBulletEnv-v0")
# we want to look inside
env.render(mode="human")
# let's hope this will work
# don't forget to pray
env = gym.wrappers.Monitor(env, directory="videos", force=True)
# record sessions
# note that t_max is 300, so collected reward will be smaller than 1000
evaluate(env, actor, n_games=1, t_max=300)
env.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we will create an instance of the environment. In pybullet-gym, if render is called before the first reset, then you will (hopefully) see the visualisation of 3d physic environment.
Step2: Let's run random policy and see how it looks.
Step3: So, basically most episodes are 1000 steps long (then happens termination by time), though sometimes we are terminated earlier if simulation discovers some obvious reasons to think that we crashed our ant. Important thing about continuous control tasks like this is that we receive non-trivial signal at each step
Step4: This dense signal will guide our optimizations. It also partially explains why off-policy algorithms are more effective and sample-efficient than on-policy algorithms like PPO
Step5: We will add only one wrapper to our environment to simply write summaries, mainly, the total reward during an episode.
Step6: Models
Step7: Next, let's define a policy, or an actor $\pi$. Use architecture, similar to critic (3-layered MLP). The output depends on algorithm
Step8: For SAC, model gaussian policy. This means policy distribution is going to be multivariate normal with diagonal covariance. The policy head will predict the mean and covariance, and it should be guaranteed that covariance is non-negative. Important
Step12: ReplayBuffer
Step13: Initialization
Step14: Here is our experience replay
Step15: Here is our models
Step16: To stabilize training, we will require target networks - slow updating copies of our models. In TD3, both critics and actor have their copies, in SAC it is assumed that only critics require target copies while actor is always used fresh.
Step17: In continuous control, target networks are usually updated using exponential smoothing
Step18: Finally, we will have three optimization procedures to train our three models, so let's welcome our three Adams
Step19: Critic target computation
Step20: To train actor we want simply to maximize
Step21: Pipeline
Step22: Evaluation
Step23: Record
|
3,870
|
<ASSISTANT_TASK:>
Python Code:
print("Exemplo 9.8")
omega = 60
L = 0.1
V = 12
#v = 12[45º]
#I = V/jwL[45 - 90]
I = V/(omega*L)
phi = 45 - 90
print("Corrente fasorial: {}[{}]".format(I,phi))
print("Corrente temporal: {}cos({}t + {})".format(I,omega,phi))
print("Problema Prático 9.8")
V = 10
u = 10**(-6)
C = 50*u
omega = 100
#I = jwCV[30 + 90]
I = omega*C*V
phi = 30 + 90
print("Corrente fasorial: {}[{}]".format(I,phi))
print("Corrente temporal: {}cos({}t + {})".format(I,omega,phi))
print("Exemplo 9.9")
import numpy as np
V = 10
C = 0.1
R = 5
omega = 4
Zc = 1/(omega*C)
print("Impedância Z = {} - j{}".format(R,Zc))
Z = np.sqrt(R**2 + Zc**2)
theta = np.arctan(Zc/R)*180/np.pi
I = V/Z
phi = 0 - theta
print("I = {} [{}º]".format(I,phi))
V = I*Zc
print("V = {} [{}º]".format(V,phi - 90))
print("Problema Prático 9.9")
V = 20
omega = 10
phi = 30
R = 4
L = 0.2
Zl = omega*L
print("Z = {} + j{}".format(R,Zl))
Z = np.sqrt(R**2 + Zl**2)
theta = np.arctan(Zl/R)*180/np.pi
I = V/Z
alpha = phi - theta
print("I = {}[{}º]".format(I,alpha))
print("i(t) = {}sen({}t + {}º)".format(I,omega,alpha))
Vl = Zl*I
print("V = {}[{}º]".format(Vl,alpha + 90))
print("v(t) = {}sen({}t + {}º)".format(Vl,omega, alpha + 90))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problema Prático 9.8
Step2: Impedância e Admitância
Step3: Problema Prático 9.9
|
3,871
|
<ASSISTANT_TASK:>
Python Code:
# Useful Functions
def find_cointegrated_pairs(data):
n = data.shape[1]
score_matrix = np.zeros((n, n))
pvalue_matrix = np.ones((n, n))
keys = data.keys()
pairs = []
for i in range(n):
for j in range(i+1, n):
S1 = data[keys[i]]
S2 = data[keys[j]]
result = coint(S1, S2)
score = result[0]
pvalue = result[1]
score_matrix[i, j] = score
pvalue_matrix[i, j] = pvalue
if pvalue < 0.05:
pairs.append((keys[i], keys[j]))
return score_matrix, pvalue_matrix, pairs
# Useful Libraries
import numpy as np
import pandas as pd
import statsmodels
import statsmodels.api as sm
from statsmodels.tsa.stattools import coint, adfuller
# just set the seed for the random number generator
np.random.seed(107)
import matplotlib.pyplot as plt
A_returns = np.random.normal(0, 1, 100)
A = pd.Series(np.cumsum(A_returns), name='X') + 50
some_noise = np.random.exponential(1, 100)
B = A - 7 + some_noise
#Your code goes here
C_returns = np.random.normal(1, 1, 100)
C = pd.Series(np.cumsum(C_returns), name='X') + 100
D_returns = np.random.normal(2, 1, 100)
D = pd.Series(np.cumsum(D_returns), name='X') + 100
#Your code goes here
ual = get_pricing('UAL', fields=['price'],
start_date='2015-01-01', end_date='2016-01-01')['price']
aal = get_pricing('AAL', fields=['price'],
start_date='2015-01-01', end_date='2016-01-01')['price']
#Your code goes here
fcau = get_pricing('FCAU', fields=['price'],
start_date='2015-01-01', end_date='2016-01-01')['price']
hmc = get_pricing('HMC', fields=['price'],
start_date='2015-01-01', end_date='2016-01-01')['price']
#Your code goes here
symbol_list = ['MTRN', 'CMP', 'TRQ', 'SCCO', 'HCLP','SPY']
prices_df = get_pricing(symbol_list, fields=['price']
, start_date='2015-01-01', end_date='2016-01-01')['price']
prices_df.columns = map(lambda x: x.symbol, prices_df.columns)
#Your code goes here
S1 = prices_df['MTRN']
S2 = prices_df['SCCO']
#Your code goes here
S1_out = get_pricing('MTRN', fields=['price'],
start_date='2016-01-01', end_date='2016-07-01')['price']
S2_out = get_pricing('SCCO', fields=['price'],
start_date='2016-01-01', end_date='2016-07-01')['price']
#Your code goes here
# Your code goes here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 1
Step2: b. Cointegration Test II
Step3: Exercise 2
Step4: b. Real Cointegration Test II
Step5: Exercise 3
Step6: Exercise 4
Step7: b. Testing the Coefficient
Step8: Extra Credit Exercise
|
3,872
|
<ASSISTANT_TASK:>
Python Code:
#import libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import requests, bs4
import time
from sklearn import model_selection
from collections import OrderedDict
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn import linear_model
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
%pylab inline
#load data
df = pd.read_csv('msc_appel_data.csv', sep='\t', index_col='num')
df.tail(12)
columns_to_show = ['res_positive', 'res_explained', 'res_negative',
'total_appeals', 'appeals_to_mayor','El_form_to_mayor', 'Pap_form_to_mayor']
data=df[columns_to_show]
grid = sns.pairplot(df[columns_to_show])
savefig('1.png')
print("Correlation coefficient for a explained review result to the total number of appeals =",
df.res_explained.corr(df.total_appeals) )
print("Corr.coeff. for a total number of appeals to mayor to the total number of appeals to mayor in electronic form =",
df.appeals_to_mayor.corr(df.El_form_to_mayor) )
district_columns = ['to_10K_total_VAO', 'to_10K_total_ZAO', 'to_10K_total_ZelAO',
'to_10K_total_SAO','to_10K_total_SVAO','to_10K_total_SZAO','to_10K_total_TiNAO','to_10K_total_CAO',
'to_10K_total_YUAO','to_10K_total_YUVAO','to_10K_total_YUZAO']
y_pos = np.arange(len(district_columns))
short_district_columns=district_columns.copy()
for i in range(len(short_district_columns)):
short_district_columns[i] = short_district_columns[i].replace('to_10K_total_','')
distr_sum = df[district_columns].sum()
plt.figure(figsize=(16,9))
plt.bar(y_pos, distr_sum, align='center', alpha=0.5)
plt.xticks(y_pos, short_district_columns)
plt.ylabel('Number of appeals')
plt.title('Number of appeals per 10,000 people for all time')
savefig('2.png')
To remind
district_columns = ['to_10K_total_VAO', 'to_10K_total_ZAO', 'to_10K_total_ZelAO',
'to_10K_total_SAO','to_10K_total_SVAO','to_10K_total_SZAO','to_10K_total_TiNAO','to_10K_total_CAO',
'to_10K_total_YUAO','to_10K_total_YUVAO','to_10K_total_YUZAO']
# we will collect the data manually from
# https://ru.wikipedia.org/wiki/%D0%90%D0%B4%D0%BC%D0%B8%D0%BD%D0%B8%D1%81%D1%82%D1%80%D0%B0%D1%82%D0%B8%D0%B2%D0%BD%D0%BE-%D1%82%D0%B5%D1%80%D1%80%D0%B8%D1%82%D0%BE%D1%80%D0%B8%D0%B0%D0%BB%D1%8C%D0%BD%D0%BE%D0%B5_%D0%B4%D0%B5%D0%BB%D0%B5%D0%BD%D0%B8%D0%B5_%D0%9C%D0%BE%D1%81%D0%BA%D0%B2%D1%8B
#the data is filled in the same order as the district_columns
district_population=[1507198,1368731,239861,1160576,1415283,990696,339231,769630,1776789,1385385,1427284]
#transition from 1/10000 to citizens' appeal to the entire population of the district
total_appel_dep=district_population*distr_sum/10000
plt.figure(figsize=(16,9))
plt.bar(y_pos, total_appel_dep, align='center', alpha=0.5)
plt.xticks(y_pos, short_district_columns)
plt.ylabel('Number of appeals')
plt.title('Number of appeals per total pщulation of district for all time')
savefig('3.png')
#we use beautifulsoup
oil_page=requests.get('https://worldtable.info/yekonomika/cena-na-neft-marki-brent-tablica-s-1986-po-20.html')
b=bs4.BeautifulSoup(oil_page.text, "html.parser")
table=b.select('.item-description')
table = b.find('div', {'class': 'item-description'})
table_tr=table.find_all('tr')
d_parse=OrderedDict()
for tr in table_tr[1:len(table_tr)-1]:
td=tr.find_all('td')
d_parse[td[0].get_text()]=float(td[1].get_text())
# dictionary selection boundaries
d_start=358
d_end=379 #because the site has no data for October
#d_end=380 if the authors in the data source fill in the values for October, you must enter 380
# Uncomment all if grabber doesn't work
#d_parse=[("январь 2016", 30.8), ("февраль 2016", 33.2), ("март 2016", 39.25), ("апрель 2016", 42.78), ("май 2016", 47.09),
# ("июнь 2016", 49.78), ("июль 2016", 46.63), ("август 2016", 46.37), ("сентябрь 2016", 47.68), ("октябрь 2016", 51.1),
# ("ноябрь 2016", 47.97), ("декабрь 2016", 54.44), ("январь 2017", 55.98), ("февраль 2017", 55.95), ("март 2017", 53.38),
# ("апрель 2017", 53.54), ("май 2017", 50.66), ("июнь 2017", 47.91), ("июль 2017", 49.51), ("август 2017", 51.82) , ("сентябрь 2017", 55.74)]
#d_parse=dict(d_parse)
#d_start=0
#d_end=20
# values from January 2016 to October 2017
oil_price=list(d_parse.values())[d_start:d_end]
oil_price.append(57.64) #delete this when the source site shows data for October
#In the collected data the October's the data was calculated manually,
#in the future if it is fixed in the source, you can delete these lines and the code (oil_price.append(57.64)) above
df['oil_price']=oil_price
df.tail(5)
print("Correlation coefficient for the total number of appeals result to the oil price (in US $) =",
df.total_appeals.corr(df.oil_price) )
print("Correlation coefficient for a positive review result to the oil price (in US $) =",
df.res_positive.corr(df.oil_price) )
df2=df.copy()
#Let's make a separate column for each value of our categorical variable
df2=pd.get_dummies(df2,prefix=['month'])
#Let's code the month with numbers
d={'January':1, 'February':2, 'March':3, 'April':4, 'May':5, 'June':6, 'July':7,
'August':8, 'September':9, 'October':10, 'November':11, 'December':12}
month=df.month.map(d)
#We paste the information about the date from several columns
dt=list()
for year,mont in zip(df2.year.values, month.values):
s=str(year)+' '+str(mont)+' 1'
dt.append(s)
#convert the received data into the DateTime type and replace them with a column year
df2.rename(columns={'year': 'DateTime'}, inplace=True)
df2['DateTime']=pd.to_datetime(dt, format='%Y %m %d')
df2.head(5)
#Prepare the data
cols_for_regression=columns_to_show+district_columns
cols_for_regression.remove('res_positive')
cols_for_regression.remove('total_appeals')
X=df2[cols_for_regression].values
y=df2['res_positive']
#Scale the data
scaler =StandardScaler()
X_scal=scaler.fit_transform(X)
y_scal=scaler.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X_scal, y_scal, test_size=0.2, random_state=42)
#y_train=np.reshape(y_train,[y_train.shape[0],1])
#y_test=np.reshape(y_test,[y_test.shape[0],1])
loo = model_selection.LeaveOneOut()
#alpha coefficient is taken at a rough guess
lr = linear_model.Ridge(alpha=55.0)
scores = model_selection.cross_val_score(lr , X_train, y_train, scoring='mean_squared_error', cv=loo,)
print('CV Score:', scores.mean())
lr .fit(X_train, y_train)
print('Coefficients:', lr.coef_)
print('Test Score:', lr.score(X_test,y_test))
X_oil=df2[cols_for_regression+['oil_price']].values
y_oil=df2['res_positive']
scaler =StandardScaler()
X_scal_oil=scaler.fit_transform(X_oil)
y_scal_oil=scaler.fit_transform(y_oil)
X_train, X_test, y_train, y_test = train_test_split(X_scal_oil, y_scal_oil, test_size=0.2, random_state=42)
#y_train=np.reshape(y_train,[y_train.shape[0],1])
#y_test=np.reshape(y_test,[y_test.shape[0],1])
lr = linear_model.Ridge()
loo = model_selection.LeaveOneOut()
lr = linear_model.Ridge(alpha=55.0)
scores = model_selection.cross_val_score(lr , X_train, y_train, scoring='mean_squared_error', cv=loo,)
print('CV Score:', scores.mean())
lr .fit(X_train, y_train)
print('Coefficients:', lr.coef_)
print('Test Score:', lr.score(X_test,y_test))
# plot for test data
plt.figure(figsize=(16,9))
plt.scatter(lr.predict(X_test), y_test, color='black')
plt.plot(y_test, y_test, '-', color='green',
linewidth=1)
plt.xlabel('relative number of positive results (predict)')
plt.ylabel('relative number of positive results (test)')
plt.title="Regression on test data"
print('predict: {0} '.format(lr.predict(X_test)))
print('real: {0} '.format(y_test))
savefig('4.png')
l_bord = 18
r_bord = 22
X_train=X_scal_oil[0:l_bord]
X_test=X_scal_oil[l_bord:r_bord]
y_train=y_scal_oil[0:l_bord]
y_test=y_scal_oil[l_bord:r_bord]
loo = model_selection.LeaveOneOut()
lr = linear_model.Ridge(alpha=7.0)
scores = model_selection.cross_val_score(lr , X_train, y_train, scoring='mean_squared_error', cv=loo,)
print('CV Score:', scores.mean())
lr.fit(X_train, y_train)
print('Coefficients:', lr.coef_)
print('Test Score:', lr.score(X_test,y_test))
# plot for test data
plt.figure(figsize=(19,10))
#trainline
plt.scatter(df2.DateTime.values[0:l_bord], lr.predict(X_train), color='black')
plt.plot(df2.DateTime.values[0:l_bord], y_train, '--', color='green',
linewidth=3)
#test line
plt.scatter(df2.DateTime.values[l_bord:r_bord], lr.predict(X_test), color='black')
plt.plot(df2.DateTime.values[l_bord:r_bord], y_test, '--', color='blue',
linewidth=3)
#connecting line
plt.plot([df2.DateTime.values[l_bord-1],df2.DateTime.values[l_bord]], [y_train[l_bord-1],y_test[0]] ,
color='magenta',linewidth=2, label='train to test')
plt.xlabel('Date')
plt.ylabel('Relative number of positive results')
plt.title="Time series"
print('predict: {0} '.format(lr.predict(X_test)))
print('real: {0} '.format(y_test))
savefig('5.1.png')
l_bord = 18
r_bord = 22
cols_months=['month_December', 'month_February', 'month_January', 'month_July', 'month_June', 'month_March', 'month_May', 'month_November',
'month_October','month_September','month_April','month_August']
X_month=df2[cols_for_regression+cols_months].values
y_month=df2['res_positive']
scaler =StandardScaler()
X_scal_month=scaler.fit_transform(X_month)
y_scal_month=scaler.fit_transform(y_month)
X_train=X_scal_month[0:l_bord]
X_test=X_scal_month[l_bord:r_bord]
y_train=y_scal_month[0:l_bord]
y_test=y_scal_month[l_bord:r_bord]
loo = model_selection.LeaveOneOut()
lr = linear_model.Ridge(alpha=7.0)
scores = model_selection.cross_val_score(lr , X_train, y_train, scoring='mean_squared_error', cv=loo,)
print('CV Score:', scores.mean())
lr.fit(X_train, y_train)
print('Coefficients:', lr.coef_)
print('Test Score:', lr.score(X_test,y_test))
# plot for test data
plt.figure(figsize=(19,10))
#trainline
plt.scatter(df2.DateTime.values[0:l_bord], lr.predict(X_train), color='black')
plt.plot(df2.DateTime.values[0:l_bord], y_train, '--', color='green',
linewidth=3)
#test line
plt.scatter(df2.DateTime.values[l_bord:r_bord], lr.predict(X_test), color='black')
plt.plot(df2.DateTime.values[l_bord:r_bord], y_test, '--', color='blue',
linewidth=3)
#connecting line
plt.plot([df2.DateTime.values[l_bord-1],df2.DateTime.values[l_bord]], [y_train[l_bord-1],y_test[0]] , color='magenta',linewidth=2, label='train to test')
plt.xlabel('Date')
plt.ylabel('Relative number of positive results')
plt.title="Time series"
print('predict: {0} '.format(lr.predict(X_test)))
print('real: {0} '.format(y_test))
savefig('5.2.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Первый взгляд на данные (First look at the data)
Step2: Посмотрим подробней на некоторые комбинации, в которых есть намек на линейную зависимость. Получим количественную оценку в виде коэффициента корреляции Пирсона.
Step3: С одной стороны, очевидно, что чем больше общее количество обращений или обращений в электронной форме, тем больше всего обращений. С другой стороны, надо отметить, что эта зависимость не полностью линейная и наверняка мы не смогли учесть всего.
Step5: Добавим другие данные из сети (Add other data from the network)
Step6: Посмотрим связано ли как-нибудь количество положительных решений по обращениям с ценами на нефть. Соберем данные автоматически напишем простенький сборщик данных с сайта.
Step7: Линейная регрессия (Linear regression)
Step8: Построим модель на основании большинства столбцов таблицы в качестве признаков, без учета данных о месяцах. Посмотрим, как это поможет нам предсказать, число положительных решений по обращениям граждан.
Step9: Мы будем использовать линейную регрессию с регуляризацией Гребень (Ridge). Данные поделим в соотношении 80% к 20 % (обучение / контроль), также будем проверять качество модели с помощью кросс валидации (в данном случае разбиение будет один цикл – один образец).
Step10: Посмотрим, как влияет цена на нефть на качество предсказания.
Step11: При идеально точном предсказании, все 4 точки должны были бы располагаться на линии.
Step12: Уберем цены на нефть, зато добавим закодированные данные о месяцах.
|
3,873
|
<ASSISTANT_TASK:>
Python Code:
import os
import logging
import tensorflow as tf
import fairing
import numpy as np
from datetime import datetime
from fairing.cloud import gcp
# Setting up google container repositories (GCR) for storing output containers
# You can use any docker container registry istead of GCR
# For local notebook, GCP_PROJECT should be set explicitly
GCP_PROJECT = fairing.cloud.gcp.guess_project_name()
GCP_Bucket = os.environ['GCP_BUCKET'] # e.g., 'gs://kubeflow-demo-g/'
# This is for local notebook instead of that in kubeflow cluster
# os.environ['GOOGLE_APPLICATION_CREDENTIALS']=
def gcs_copy(src_path, dst_path):
import subprocess
print(subprocess.run(['gsutil', 'cp', src_path, dst_path], stdout=subprocess.PIPE).stdout[:-1].decode('utf-8'))
def gcs_download(src_path, file_name):
import subprocess
print(subprocess.run(['gsutil', 'cp', src_path, file_name], stdout=subprocess.PIPE).stdout[:-1].decode('utf-8'))
class TensorflowModel(object):
def __init__(self):
self.model_file = "mnist_model.h5"
self.model = None
def build(self):
self.model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
self.model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(self.model.summary())
def save_model(self):
self.model.save(self.model_file)
gcs_copy(self.model_file, GCP_Bucket + self.model_file)
def train(self):
self.build()
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
callbacks = [
# Interrupt training if `val_loss` stops improving for over 2 epochs
tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
# Write TensorBoard logs to `./logs` directory
tf.keras.callbacks.TensorBoard(log_dir=GCP_Bucket + 'logs/'
+ datetime.now().date().__str__())
]
self.model.fit(x_train, y_train, batch_size=32, epochs=5, callbacks=callbacks,
validation_data=(x_test, y_test))
self.save_model()
def predict(self, X):
if not self.model:
self.model = tf.keras.models.load_model(self.model_file)
# Do any preprocessing
prediction = self.model.predict(data=X)
TensorflowModel().train()
# In this demo, I use gsutil, therefore i compile a special image to install GoogleCloudSDK as based image
base_image = 'gcr.io/{}/fairing-predict-example:latest'.format(GCP_PROJECT)
!docker build --build-arg PY_VERSION=3.6.4 . -t {base_image}
!docker push {base_image}
GCP_PROJECT = fairing.cloud.gcp.guess_project_name()
BASE_IMAGE = 'gcr.io/{}/fairing-predict-example:latest'.format(GCP_PROJECT)
DOCKER_REGISTRY = 'gcr.io/{}/fairing-job-tf'.format(GCP_PROJECT)
from fairing import TrainJob
from fairing.backends import GKEBackend
train_job = TrainJob(TensorflowModel, BASE_IMAGE, input_files=["requirements.txt"],
docker_registry=DOCKER_REGISTRY, backend=GKEBackend())
train_job.submit()
fairing.config.set_builder(name='docker', registry=DOCKER_REGISTRY,
base_image=BASE_IMAGE, push=True)
fairing.config.set_deployer(name='tfjob', worker_count=1, ps_count=1)
run_fn = fairing.config.fn(TensorflowModel)
run_fn()
from fairing import TrainJob
from fairing.backends import GCPManagedBackend
train_job = TrainJob(TensorflowModel, BASE_IMAGE, input_files=["requirements.txt"],
docker_registry=DOCKER_REGISTRY, backend=GCPManagedBackend())
train_job.submit()
# ! tensorboard --logdir=gs://kubeflow-demo-g/logs --host=localhost --port=8777
from fairing import PredictionEndpoint
from fairing.backends import KubeflowGKEBackend
# The trained_ames_model.joblib is exported during the above local training
endpoint = PredictionEndpoint(TensorflowModel, BASE_IMAGE, input_files=['mnist_model.h5', "requirements.txt"],
docker_registry=DOCKER_REGISTRY, backend=KubeflowGKEBackend())
endpoint.create()
endpoint.delete()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define the model logic
Step2: Train an Keras model in a notebook
Step3: Spicify a image registry that will hold the image built by fairing
Step4: Deploy the training job to kubeflow cluster
Step5: Deploy distributed training job to kubeflow cluster
Step6: Deploy the training job as CMLE training job
Step7: Inspect training process with tensorboard
Step8: Deploy the trained model to Kubeflow for predictions
|
3,874
|
<ASSISTANT_TASK:>
Python Code:
import sys
assert sys.version_info[0] == 3
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0.0, 10.0, 0.1)
plt.plot(x, np.sin(x))
import pandas as pd
pd.DataFrame([(0, 1), (2, 3)], columns=['A', 'B'])
!which ansible
!which wget
!which curl
!which papermill
!which ping
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Libraries
Step2: pandas to display tables
Step3: Utilities
|
3,875
|
<ASSISTANT_TASK:>
Python Code:
import trappy
import numpy
config = {}
# TRAPpy Events
config["THERMAL"] = trappy.thermal.Thermal
config["OUT"] = trappy.cpu_power.CpuOutPower
config["IN"] = trappy.cpu_power.CpuInPower
config["PID"] = trappy.pid_controller.PIDController
config["GOVERNOR"] = trappy.thermal.ThermalGovernor
# Control Temperature
config["CONTROL_TEMP"] = 77000
# A temperature margin of 2.5 degrees Celsius
config["TEMP_MARGIN"] = 2500
# The Sustainable power at the control Temperature
config["SUSTAINABLE_POWER"] = 2500
# Expected percentile of CONTROL_TEMP + TEMP_MARGIN
config["EXPECTED_TEMP_QRT"] = 95
# Maximum expected Standard Deviation as a percentage
# of mean temperature
config["EXPECTED_STD_PCT"] = 5
import urllib
import os
TRACE_DIR = "example_trace_dat_thermal"
TRACE_FILE = os.path.join(TRACE_DIR, 'bart_thermal_trace.dat')
TRACE_URL = 'http://cdn.rawgit.com/sinkap/4e0a69cbff732b57e36f/raw/7dd0ed74bfc17a34a3bd5ea6b9eb3a75a42ddbae/bart_thermal_trace.dat'
if not os.path.isdir(TRACE_DIR):
os.mkdir(TRACE_DIR)
if not os.path.isfile(TRACE_FILE):
print "Fetching trace file.."
urllib.urlretrieve(TRACE_URL, filename=TRACE_FILE)
# Create a Trace object
run = trappy.Run(TRACE_FILE, "SomeBenchMark")
# Create an Assertion Object
from bart.common.Analyzer import Analyzer
t = Analyzer(run, config)
BIG = '000000f0'
LITTLE = '0000000f'
result = t.getStatement("((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \
& (IN:dynamic_power > 0)",reference=True, select=BIG)
if len(result):
print "FAIL: Dynamic Power is NOT Zero when load is Zero for the BIG cluster"
else:
print "PASS: Dynamic Power is Zero when load is Zero for the BIG cluster"
result = t.getStatement("((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \
& (IN:dynamic_power > 0)",reference=True, select=LITTLE)
if len(result):
print "FAIL: Dynamic Power is NOT Zero when load is Zero for the LITTLE cluster"
else:
print "PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster"
result = t.getStatement("(GOVERNOR:current_temperature > CONTROL_TEMP) &\
(PID:output > SUSTAINABLE_POWER)", reference=True, select=0)
if len(result):
print "FAIL: The Governor is allocating power > sustainable when T > CONTROL_TEMP"
else:
print "PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP"
t.assertStatement("numpy.percentile(THERMAL:temp, 95) < (CONTROL_TEMP + TEMP_MARGIN)")
t.assertStatement("numpy.mean(THERMAL:temp) <= CONTROL_TEMP", select=0)
t.getStatement("(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)", select=0)
from bart.thermal.ThermalAssert import ThermalAssert
t_assert = ThermalAssert(run)
end = run.get_duration()
LOW = 0
HIGH = 78000
# The thermal residency gives the percentage (or absolute time) spent in the
# specified temperature range.
result = t_assert.getThermalResidency(temp_range=(0, 78000),
window=(0, end),
percent=True)
for tz_id in result:
print "Thermal Zone: {} spends {:.2f}% time in the temperature range [{}, {}]".format(tz_id,
result[tz_id],
LOW/1000,
HIGH/1000)
pct_temp = numpy.percentile(t.getStatement("THERMAL:temp")[tz_id], result[tz_id])
print "The {:.2f}th percentile temperature is {:.2f}".format(result[tz_id], pct_temp / 1000.0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get the Trace
Step2: Run Object
Step3: Assertions
Step4: Assertion
Step5: Assertion
Step6: Statistics
Step7: Check if the mean temperauture is less than CONTROL_TEMP
Step8: We can also use getStatement to get the absolute values. Here we are getting the standard deviation expressed as a percentage of the mean
Step9: Thermal Residency
|
3,876
|
<ASSISTANT_TASK:>
Python Code:
!pip install websocket-client
!pip install python-swiftclient
import pandas as pd
import matplotlib.pyplot as plt
import json
import websocket
import thread
import time
import swiftclient
import codecs
from io import StringIO
olympics_data_filename = 'olympics.csv'
dictionary_data_filename = 'dictionary.csv'
auth_url = credentials_1['auth_url']+"/v3"
container = credentials_1["container"]
IBM_Objectstorage_Connection = swiftclient.Connection(
key=credentials_1['password'], authurl=auth_url, auth_version='3', os_options={
"project_id": credentials_1['project_id'], "user_id": credentials_1['user_id'], "region_name": credentials_1['region']})
def create_container(container_name):
Create a container on Object Storage.
x = IBM_Objectstorage_Connection.put_container(container_name)
return x
def put_object(container_name, fname, contents, content_type):
Write contents to Object Storage.
x = IBM_Objectstorage_Connection.put_object(
container_name,
fname,
contents,
content_type)
return x
def get_object(container_name, fname):
Retrieve contents from Object Storage.
Object_Store_file_details = IBM_Objectstorage_Connection.get_object(
container_name, fname)
return Object_Store_file_details[1]
olympics = pd.read_csv(StringIO(get_object(container, olympics_data_filename).decode('utf-8')))
olympics = olympics.rename(columns = {'Country':'Code'})
olympics = olympics.rename(columns = {'Year':'Edition'})
dictionary = pd.read_csv(StringIO(get_object(container, dictionary_data_filename).decode('utf-8')))
olympics = pd.merge(olympics, dictionary, on='Code')
olympics.head()
def get_medals_gb_year_country():
Group by edition and country and sum medals count.
medals_groupedBy_yearCountry = olympics.groupby(['Edition','Code']).apply(lambda country: country['Code'].count())
return medals_groupedBy_yearCountry
def get_medals_gb_year_country_medal():
Group by edition, country, medal type and sum medals count.
medals_groupedBy_yearCountryMedal = olympics.groupby(['Edition', 'Code', 'Medal']).apply(lambda country: country['Medal'].count())
return medals_groupedBy_yearCountryMedal
def get_medals_last_10_years(countrycode):
Get Gold, Silver and Bronze medals for a country for last 10 editions.
last10pics = olympics['Edition'].unique()
yrs = pd.Series(last10pics).nlargest(10)
df = pd.DataFrame([], columns=['Year', 'Gold', 'Silver', 'Bronze'])
medalsdf = get_medals_gb_year_country_medal()
for yr in yrs:
medaltally = medalsdf[yr][countrycode]
gold = 0
silver = 0
bronze = 0
if 'Gold' in medaltally:
gold = medaltally['Gold']
if 'Silver' in medaltally:
silver = medaltally['Silver']
if 'Bronze' in medaltally:
bronze = medaltally['Bronze']
df1 = pd.DataFrame([[yr,gold, silver, bronze]], columns=['Year', 'Gold', 'Silver', 'Bronze'])
df = df.append(df1, ignore_index=True)
df = df.sort_values(by=['Year'], ascending=True)
df = df.reset_index()
del df['index']
return df
def get_correlation_medalstally():
Get correlation between the medals tally and population, GDP per capita.
df = get_medals_gb_year_country()
values = get_all_olympic_years().values
size = values.size
correlations = []
for i in range(size):
year = values[i][0]
df1 = df[year].to_frame(name="Tally")
df1 = df1.reset_index()
df2 = pd.merge(df1,dictionary, on='Code')
corrpop = df2.corr().values[0][1]
corrgdp = df2.corr().values[0][2]
resp = {"Year": year, "Population":corrpop, "GDP":corrgdp}
correlations.append(resp)
return correlations
def get_medals_category(countrycode, year):
Get the medals count in different sports category for a country in an edition.
df = olympics[olympics['Edition'] == year]
df1 = df[df['Code'] == countrycode]
df2 = df1.groupby(['Sport']).apply(lambda country: country['Medal'].count())
return df2
def get_medals_category_all(countrycode):
Get the medals count in different sports category for a country for last ten editions.
df1 = olympics[olympics['Code'] == countrycode]
df2 = df1.groupby(['Sport']).apply(lambda country: country['Medal'].count())
return df2
def get_top_ten_gold_tally(year):
Get the top ten gold medal winning countries in an edition.
df = olympics[olympics['Edition'] == year]
df1 = df[df['Medal'] == 'Gold']
df2 = df1.groupby(['Code']).apply(lambda country: country['Medal'].count())
return df2
def get_top_ten_total_tally(year):
Get the top ten total medal winning countries in an edition.
df = olympics[olympics['Edition'] == year]
df1 = df.groupby(['Code']).apply(lambda country: country['Medal'].count())
return df1
def get_year_venue():
Get edition venue matrix.
df = olympics[['Edition', 'City']]
df = df.drop_duplicates()
df = df.reset_index()
df = df.set_index('Edition')
del df['index']
return df.sort_index()
def get_all_olympic_years():
Get list of all olympic editions.
df = olympics['Edition']
df = df.drop_duplicates()
df = df.reset_index()
del df['index']
return df.sort_index()
def get_all_countries():
Get list of all countries.
df = olympics[['Code','Country']]
df = df.drop_duplicates()
df = df.reset_index()
del df['index']
return df.sort(['Country'],ascending=[True])
def get_country_edition_data(countrycode,edition):
Get data for a country and edition.
df = olympics[olympics["Code"] == countrycode]
df1 = df[df["Edition"] == edition]
return df1
def on_message(ws, message):
print(message)
msg = json.loads(message)
cmd = msg['cmd']
if cmd == 'MBY':
country = msg['country']
tally = get_medals_last_10_years(country)
tallyarray=[]
for i, row in tally.iterrows():
medaltally = {"Year":int(row["Year"]),
"Gold":int(row["Gold"]),
"Silver":int(row["Silver"]),
"Bronze":int(row["Bronze"])}
tallyarray.append(medaltally)
wsresponse = {}
wsresponse["forcmd"] = "MBY"
wsresponse["response"] = tallyarray
ws.send(json.dumps(wsresponse))
elif cmd == 'MBSC':
country = msg['country']
year = 2008
response = get_medals_category(country, year)
ct = response.count()
if ct > 5:
response = response.nlargest(5)
medals = []
categories = []
for i, row in response.iteritems():
categories.append(i)
medals.append(row)
wsresponse = {}
wsresponse["forcmd"] = "MBSC"
wsresponse["response"] = { "categories":categories, "medals":medals}
ws.send(json.dumps(wsresponse))
elif cmd == 'MBSA':
country = msg['country']
response = get_medals_category_all(country)
ct = response.count()
if ct > 5:
response = response.nlargest(5)
medals = []
categories = []
for i, row in response.iteritems():
categories.append(i)
medals.append(row)
wsresponse = {}
wsresponse["forcmd"] = "MBSA"
wsresponse["response"] = { "categories":categories, "medals":medals}
ws.send(json.dumps(wsresponse))
elif cmd == 'T10G':
edition = msg["edition"]
response = get_top_ten_gold_tally(edition)
ct = response.count()
if ct > 10:
response = response.nlargest(10)
medals = []
for i, row in response.iteritems():
data = {"country":i,"tally":row}
medals.append(data)
wsresponse = {}
wsresponse["forcmd"] = "T10G"
wsresponse["response"] = medals
print(wsresponse)
ws.send(json.dumps(wsresponse))
elif cmd == 'T10M':
year = msg["edition"]
response = get_top_ten_total_tally(year)
ct = response.count()
if ct > 10:
response = response.nlargest(10)
medals = []
for i, row in response.iteritems():
data = {"country":i,"tally":row}
medals.append(data)
wsresponse = {}
wsresponse["forcmd"] = "T10M"
wsresponse["response"] = medals
print(wsresponse)
ws.send(json.dumps(wsresponse))
elif cmd == 'CORR':
corr = get_correlation_medalstally()
wsresponse = {}
wsresponse["forcmd"] = "CORR"
wsresponse["response"] = corr
ws.send(json.dumps(wsresponse))
elif cmd == 'YV':
yearvenue = get_year_venue()
yearvenuearray = []
for i in range(yearvenue.size):
value = {"Year":yearvenue.index[i],"Venue":yearvenue.values[i].tolist()[0]}
yearvenuearray.append(value)
responsejson = {}
responsejson["forcmd"]="YV"
responsejson["response"]=yearvenuearray
ws.send(json.dumps(responsejson))
elif cmd == 'DATA':
country = msg['country']
edition = msg['edition']
olympicsslice = get_country_edition_data(country,edition)
data = []
numofcolumns = olympicsslice.columns.size
cols = []
values = []
for column in olympicsslice.columns:
cols.append(column)
for value in olympicsslice.values:
values.append(value.tolist())
data = {"cols":cols,"vals":values}
responsejson = {}
responsejson['forcmd']='DATA'
responsejson['response']= data
ws.send(json.dumps(responsejson))
elif cmd == 'EDITIONS':
years = get_all_olympic_years()
yearsarray = []
for i,row in years.iteritems():
for value in row:
yearsarray.append(value)
length = len(yearsarray)
wsresponse = []
for i in range(length):
year = {"text":yearsarray[i],"value":yearsarray[i]}
wsresponse.append(year)
responsejson = {}
responsejson['forcmd']='EDITIONS'
responsejson['response']= wsresponse
ws.send(json.dumps(responsejson))
elif cmd == 'COUNTRIES':
countries = get_all_countries()
countriesarray = []
codearray = []
for i,row in countries.iteritems():
if i=='Code':
for value in row:
codearray.append(value)
elif i=='Country':
for value in row:
countriesarray.append(value)
length = len(codearray)
wsresponse = []
for i in range(length):
country = {"text":countriesarray[i],"value":codearray[i]}
wsresponse.append(country)
responsejson = {}
responsejson['forcmd']='COUNTRIES'
responsejson['response']= wsresponse
ws.send(json.dumps(responsejson))
def on_error(ws, error):
print(error)
def on_close(ws):
ws.send("DSX Listen End")
def on_open(ws):
def run(*args):
for i in range(10000):
hbeat = '{"cmd":"Olympics DSX HeartBeat"}'
ws.send(hbeat)
time.sleep(100)
thread.start_new_thread(run, ())
def start_websocket_listener():
websocket.enableTrace(True)
ws = websocket.WebSocketApp("ws://NODERED_BASE_URL/ws/orchestrate",
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.run_forever()
start_websocket_listener()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install IBM Bluemix Object Storage Client
Step2: 1.2 Import packages and libraries
Step3: 2. Configuration
Step7: 3. Persistence and Storage
Step8: 4. Data
Step21: 5. Insights on the data using Python Pandas
Step22: 6. Expose integration point with a websocket client
Step23: 7. Start websocket client
|
3,877
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
from geoscilabs.seismic.NMOwidget import ViewWiggle, InteractClean, InteractNosiy, NMOstackthree
from SimPEG.utils import download
# Define path to required data files
synDataFilePath = 'http://github.com/geoscixyz/geosci-labs/raw/master/assets/seismic/syndata1.npy'
obsDataFilePath = 'https://github.com/geoscixyz/geosci-labs/raw/master/assets/seismic/obsdata1.npy'
timeFilePath= 'https://github.com/geoscixyz/geosci-labs/raw/master/assets/seismic/time1.npy'
# Download the data
synData = download(synDataFilePath,overwrite=True,verbose=False)
obsData = download(obsDataFilePath,overwrite=True,verbose=False)
timeData = download(timeFilePath,overwrite=True,verbose=False)
# Plot the data
ViewWiggle(synData, obsData)
# Fit hyperbola to clean data
clean = InteractClean(synData,timeData)
clean
noisy = InteractNosiy(obsData,timeData)
noisy
NMOstackthree(obsData, noisy.kwargs["t0"], noisy.kwargs["v"]-200., noisy.kwargs["v"], noisy.kwargs["v"]+200.,timeData)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Two common-mid-point (CMP) gathers
Step2: Step 2
Step3: Step 3
Step4: Step 4
|
3,878
|
<ASSISTANT_TASK:>
Python Code:
from sklearn import datasets, neighbors, linear_model
digits = datasets.load_digits() # Retrieves digits dataset from scikit-learn
print(digits['DESCR'])
digits['images'][0]
import matplotlib.pyplot as plt
plt.gray()
plt.matshow(digits.images[0])
plt.matshow(digits.images[10])
plt.show()
for i in range(0,10):
plt.matshow(digits.images[i])
plt.show()
X_digits = digits.data
X_digits
y_digits = digits.target
y_digits
n_samples = len(X_digits)
n_samples
X_train = X_digits[:int(.9 * n_samples)]
X_train
y_train = y_digits[:int(.9 * n_samples)]
y_train
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
X_test
knn = neighbors.KNeighborsClassifier() # Retrieve the default K-Neighbours Classification algorithm
knn
fitting = knn.fit(X_train, y_train) # Train the algorithm on 90% of the samples
fitting
knn_score = fitting.score(X_test, y_test) # Score the algorithm on how well it fits the 10% of the data that was left out
print('KNN score: %f' % knn_score)
logistic = linear_model.LogisticRegression()
logistic
log_regression_fitting = logistic.fit(X_train, y_train)
log_regression_fitting
log_regression_score = log_regression_fitting.score(X_test, y_test)
print('LogisticRegression score: %f' % log_regression_score)
print('KNN score: %f' % knn_score)
print('LGR score: %f' % log_regression_score)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What does our data look like?
Step2: Extract our input data (X digits), our target output data (Y digits) and the number of samples we will process.
Step3: Extract 90% of our available data as training data for the models.
Step4: Extract 10% of our available data as test data, to check the level of accuracy for the models.
Step5: Use the K-Neighbours algorithm to create a Classifier implementing the k-nearest neighbors vote.
Step6: Use the Logistic Regression algorithm
Step7: We can see from the outcomes KNN was better at predicting the target result, with ~96% accuracy.
|
3,879
|
<ASSISTANT_TASK:>
Python Code:
import regionmask
regionmask.__version__
import xarray as xr
import numpy as np
# don't expand data
xr.set_options(display_style="text", display_expand_data=False)
lon = np.arange(-179.5, 180)
lat = np.arange(-89.5, 90)
regionmask.defined_regions.srex
mask = regionmask.defined_regions.srex.mask_3D(lon, lat)
mask
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from matplotlib import colors as mplc
cmap1 = mplc.ListedColormap(["none", "#9ecae1"])
fg = mask.isel(region=slice(4)).plot(
subplot_kws=dict(projection=ccrs.PlateCarree()),
col="region",
col_wrap=2,
transform=ccrs.PlateCarree(),
add_colorbar=False,
aspect=1.5,
cmap=cmap1,
)
for ax in fg.axes.flatten():
ax.coastlines()
fg.fig.subplots_adjust(hspace=0, wspace=0.1);
regionmask.plot_3D_mask(mask, add_colorbar=False, cmap="plasma");
airtemps = xr.tutorial.load_dataset("air_temperature")
# choose a good projection for regional maps
proj = ccrs.LambertConformal(central_longitude=-100)
ax = plt.subplot(111, projection=proj)
airtemps.isel(time=1).air.plot.pcolormesh(ax=ax, transform=ccrs.PlateCarree())
ax.coastlines();
mask_3D = regionmask.defined_regions.srex.mask_3D(airtemps)
mask_3D
mask_full = regionmask.defined_regions.srex.mask_3D(airtemps, drop=False)
mask_full
# 1) by the index of the region:
r1 = mask_3D.sel(region=3)
# 2) with the abbreviation
r2 = mask_3D.isel(region=(mask_3D.abbrevs == "WNA"))
# 3) with the long name:
r3 = mask_3D.isel(region=(mask_3D.names == "E. North America"))
mask_3D.set_index(regions=["region", "abbrevs", "names"]);
airtemps_cna = airtemps.where(r1)
proj = ccrs.LambertConformal(central_longitude=-100)
ax = plt.subplot(111, projection=proj)
airtemps_cna.isel(time=1).air.plot(ax=ax, transform=ccrs.PlateCarree())
ax.coastlines();
weights = np.cos(np.deg2rad(airtemps.lat))
ts_airtemps_regional = airtemps.weighted(mask_3D * weights).mean(dim=("lat", "lon"))
ts_airtemps_regional
ts_airtemps_regional.air.plot(col="region", col_wrap=3);
land_110 = regionmask.defined_regions.natural_earth_v5_0_0.land_110
land_mask = land_110.mask_3D(airtemps)
proj = ccrs.LambertConformal(central_longitude=-100)
ax = plt.subplot(111, projection=proj)
land_mask.squeeze().plot.pcolormesh(
ax=ax, transform=ccrs.PlateCarree(), cmap=cmap1, add_colorbar=False
)
ax.coastlines();
mask_lsm = mask_3D * land_mask.squeeze(drop=True)
f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=proj))
ax = axes[0]
mask_3D.sel(region=2).plot(
ax=ax, transform=ccrs.PlateCarree(), add_colorbar=False, cmap=cmap1
)
ax.coastlines()
ax.set_title("Regional mask: all points")
ax = axes[1]
mask_lsm.sel(region=2).plot(
ax=ax, transform=ccrs.PlateCarree(), add_colorbar=False, cmap=cmap1
)
ax.coastlines()
ax.set_title("Regional mask: land only");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load xarray and numpy
Step2: Creating a mask
Step3: We will create a mask with the SREX regions (Seneviratne et al., 2012).
Step4: The function mask_3D determines which gripoints lie within the polygon making up each region
Step5: As mentioned, mask is a boolean xarray.Dataset with shape region x lat x lon. It contains region (=numbers) as dimension coordinate as well as abbrevs and names as non-dimension coordinates (see the xarray docs for the details on the terminology).
Step6: Plotting flattened masks
Step7: Working with a 3D mask
Step8: The example data is a temperature field over North America. Let's plot the first time step
Step9: An xarray object can be passed to the mask_3D function
Step10: Per default this creates a mask containing one layer (slice) for each region containing (at least) one gridpoint. As the example data only has values over Northern America we only get only 6 layers even though there are 26 SREX regions. To obtain all layers specify drop=False
Step11: Note mask_full now has 26 layers.
Step12: This also applies to the regionally-averaged data below.
Step13: Mask out a region
Step14: Which looks as follows
Step15: We could now use airtemps_cna to calculate the regional average for 'Central North America'. However, there is a more elegant way.
Step16: Let's break down what happens here. By multiplying mask_3D * weights we get a DataArray where gridpoints not in the region get a weight of 0. Gridpoints within a region get a weight proportional to the gridcell area. airtemps.weighted(mask_3D * weights) creates an xarray object which can be used for weighted operations. From this we calculate the weighted mean over the lat and lon dimensions. The resulting dataarray has the dimensions region x time
Step17: The regionally-averaged time series can be plotted
Step18: Restrict the mask to land points
Step19: and plot it
Step20: To create the combined mask we multiply the two
Step21: Note the .squeeze(drop=True). This is required to remove the region dimension from land_mask.
|
3,880
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy.fft as sf
N = 8
result = sf.dct(np.eye(N), axis=0, norm= 'ortho')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
3,881
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import display, Image
from IPython.display import (
display_pretty, display_html, display_jpeg,
display_png, display_json, display_latex, display_svg
)
#raise NotImplementedError()
assert True # leave this to grade the import statements
Image (url = "http://users.wpi.edu/~drdewalt/photos/physics-blackboard.jpg", embed = True, height = 600, width = 600)
#raise NotImplementedError()
assert True # leave this to grade the image display
%%html
<table>
<tr>
<th>Name</th>
<th>Symbol</th>
<th>Antiparticle</th>
<th>Charge(e)</th>
<th>Mass$(MeV/c^2)$</th>
</tr>
<tr>
<td>up</td>
<td>u</td>
<td>$\bar u$</td>
<td>+$\frac{2}{3}$</td>
<td>1.5-3.3</td>
</tr>
<tr>
<td>down</td>
<td>d</td>
<td>$\bar d$</td>
<td>-$\frac{1}{3}$</td>
<td>3.5-6.0</td>
</tr>
<tr>
<td>charm</td>
<td>c</td>
<td>$\bar c$</td>
<td>+$\frac{2}{3}$</td>
<td>1,160-1,340</td>
</tr>
<tr>
<td>strange</td>
<td>s</td>
<td>$\bar s$</td>
<td>-$\frac{1}{3}$</td>
<td>70-130</td>
</tr>
<tr>
<td>top</td>
<td>t</td>
<td>$\bar t$</td>
<td>+$\frac{2}{3}$</td>
<td>169,100-173,300</td>
</tr>
<tr>
<td>bottom</td>
<td>b</td>
<td>$\bar b$</td>
<td>-$\frac{1}{3}$</td>
<td>4,130-4,370</td>
</tr>
</table>
assert True # leave this here to grade the quark table
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic rich display
Step2: Use the HTML object to display HTML in the notebook that reproduces the table of Quarks on this page. This will require you to learn about how to create HTML tables and then pass that to the HTML object for display. Don't worry about styling and formatting the table, but you should use LaTeX where appropriate.
|
3,882
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ipsl', 'sandbox-1', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
3,883
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
print(tf.__version__)
import IPython.display as display
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (12,12)
mpl.rcParams['axes.grid'] = False
import numpy as np
import time
import functools
content_path = tf.keras.utils.get_file('belfry.jpg','https://storage.googleapis.com/khanhlvg-public.appspot.com/arbitrary-style-transfer/belfry-2611573_1280.jpg')
style_path = tf.keras.utils.get_file('style23.jpg','https://storage.googleapis.com/khanhlvg-public.appspot.com/arbitrary-style-transfer/style23.jpg')
style_predict_path = tf.keras.utils.get_file('style_predict.tflite', 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/prediction/1?lite-format=tflite')
style_transform_path = tf.keras.utils.get_file('style_transform.tflite', 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/transfer/1?lite-format=tflite')
# Function to load an image from a file, and add a batch dimension.
def load_img(path_to_img):
img = tf.io.read_file(path_to_img)
img = tf.io.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = img[tf.newaxis, :]
return img
# Function to pre-process by resizing an central cropping it.
def preprocess_image(image, target_dim):
# Resize the image so that the shorter dimension becomes 256px.
shape = tf.cast(tf.shape(image)[1:-1], tf.float32)
short_dim = min(shape)
scale = target_dim / short_dim
new_shape = tf.cast(shape * scale, tf.int32)
image = tf.image.resize(image, new_shape)
# Central crop the image.
image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)
return image
# Load the input images.
content_image = load_img(content_path)
style_image = load_img(style_path)
# Preprocess the input images.
preprocessed_content_image = preprocess_image(content_image, 384)
preprocessed_style_image = preprocess_image(style_image, 256)
print('Style Image Shape:', preprocessed_style_image.shape)
print('Content Image Shape:', preprocessed_content_image.shape)
def imshow(image, title=None):
if len(image.shape) > 3:
image = tf.squeeze(image, axis=0)
plt.imshow(image)
if title:
plt.title(title)
plt.subplot(1, 2, 1)
imshow(preprocessed_content_image, 'Content Image')
plt.subplot(1, 2, 2)
imshow(preprocessed_style_image, 'Style Image')
# Function to run style prediction on preprocessed style image.
def run_style_predict(preprocessed_style_image):
# Load the model.
interpreter = tf.lite.Interpreter(model_path=style_predict_path)
# Set model input.
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]["index"], preprocessed_style_image)
# Calculate style bottleneck.
interpreter.invoke()
style_bottleneck = interpreter.tensor(
interpreter.get_output_details()[0]["index"]
)()
return style_bottleneck
# Calculate style bottleneck for the preprocessed style image.
style_bottleneck = run_style_predict(preprocessed_style_image)
print('Style Bottleneck Shape:', style_bottleneck.shape)
# Run style transform on preprocessed style image
def run_style_transform(style_bottleneck, preprocessed_content_image):
# Load the model.
interpreter = tf.lite.Interpreter(model_path=style_transform_path)
# Set model input.
input_details = interpreter.get_input_details()
interpreter.allocate_tensors()
# Set model inputs.
interpreter.set_tensor(input_details[0]["index"], preprocessed_content_image)
interpreter.set_tensor(input_details[1]["index"], style_bottleneck)
interpreter.invoke()
# Transform content image.
stylized_image = interpreter.tensor(
interpreter.get_output_details()[0]["index"]
)()
return stylized_image
# Stylize the content image using the style bottleneck.
stylized_image = run_style_transform(style_bottleneck, preprocessed_content_image)
# Visualize the output.
imshow(stylized_image, 'Stylized Image')
# Calculate style bottleneck of the content image.
style_bottleneck_content = run_style_predict(
preprocess_image(content_image, 256)
)
# Define content blending ratio between [0..1].
# 0.0: 0% style extracts from content image.
# 1.0: 100% style extracted from content image.
content_blending_ratio = 0.5 #@param {type:"slider", min:0, max:1, step:0.01}
# Blend the style bottleneck of style image and content image
style_bottleneck_blended = content_blending_ratio * style_bottleneck_content \
+ (1 - content_blending_ratio) * style_bottleneck
# Stylize the content image using the style bottleneck.
stylized_image_blended = run_style_transform(style_bottleneck_blended,
preprocessed_content_image)
# Visualize the output.
imshow(stylized_image_blended, 'Blended Stylized Image')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TensorFlow Lite による芸術的スタイル転送
Step2: コンテンツ画像とスタイル画像、および事前トレーニング済みの TensorFlow Lite モデルをダウンロードします。
Step3: 入力を前処理する
Step4: 入力を可視化する
Step5: TensorFlow Lite でスタイル転送を実行する
Step6: スタイルを変換する
Step7: スタイルをブレンドする
|
3,884
|
<ASSISTANT_TASK:>
Python Code:
import os
import time
import pandas as pd
from google.cloud import aiplatform, bigquery
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
REGION = "us-central1"
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
ARTIFACT_STORE = f"gs://{PROJECT_ID}-kfp-artifact-store"
DATA_ROOT = f"{ARTIFACT_STORE}/data"
JOB_DIR_ROOT = f"{ARTIFACT_STORE}/jobs"
TRAINING_FILE_PATH = f"{DATA_ROOT}/training/dataset.csv"
VALIDATION_FILE_PATH = f"{DATA_ROOT}/validation/dataset.csv"
API_ENDPOINT = f"{REGION}-aiplatform.googleapis.com"
os.environ["JOB_DIR_ROOT"] = JOB_DIR_ROOT
os.environ["TRAINING_FILE_PATH"] = TRAINING_FILE_PATH
os.environ["VALIDATION_FILE_PATH"] = VALIDATION_FILE_PATH
os.environ["PROJECT_ID"] = PROJECT_ID
os.environ["REGION"] = REGION
!gsutil ls | grep ^{ARTIFACT_STORE}/$ || gsutil mb -l {REGION} {ARTIFACT_STORE}
%%bash
DATASET_LOCATION=US
DATASET_ID=covertype_dataset
TABLE_ID=covertype
DATA_SOURCE=gs://workshop-datasets/covertype/small/dataset.csv
SCHEMA=Elevation:INTEGER,\
Aspect:INTEGER,\
Slope:INTEGER,\
Horizontal_Distance_To_Hydrology:INTEGER,\
Vertical_Distance_To_Hydrology:INTEGER,\
Horizontal_Distance_To_Roadways:INTEGER,\
Hillshade_9am:INTEGER,\
Hillshade_Noon:INTEGER,\
Hillshade_3pm:INTEGER,\
Horizontal_Distance_To_Fire_Points:INTEGER,\
Wilderness_Area:STRING,\
Soil_Type:STRING,\
Cover_Type:INTEGER
bq --location=$DATASET_LOCATION --project_id=$PROJECT_ID mk --dataset $DATASET_ID
bq --project_id=$PROJECT_ID --dataset_id=$DATASET_ID load \
--source_format=CSV \
--skip_leading_rows=1 \
--replace \
$TABLE_ID \
$DATA_SOURCE \
$SCHEMA
%%bigquery
SELECT *
FROM `covertype_dataset.covertype`
!bq query \
-n 0 \
--destination_table covertype_dataset.training \
--replace \
--use_legacy_sql=false \
'SELECT * \
FROM `covertype_dataset.covertype` AS cover \
WHERE \
MOD(ABS(FARM_FINGERPRINT(TO_JSON_STRING(cover))), 10) IN (1, 2, 3, 4)'
!bq extract \
--destination_format CSV \
covertype_dataset.training \
$TRAINING_FILE_PATH
# TODO: You code to create the BQ table validation split
# TODO: Your code to export the validation table to GCS
df_train = pd.read_csv(TRAINING_FILE_PATH)
df_validation = pd.read_csv(VALIDATION_FILE_PATH)
print(df_train.shape)
print(df_validation.shape)
numeric_feature_indexes = slice(0, 10)
categorical_feature_indexes = slice(10, 12)
preprocessor = ColumnTransformer(
transformers=[
("num", StandardScaler(), numeric_feature_indexes),
("cat", OneHotEncoder(), categorical_feature_indexes),
]
)
pipeline = Pipeline(
[
("preprocessor", preprocessor),
("classifier", SGDClassifier(loss="log", tol=1e-3)),
]
)
num_features_type_map = {
feature: "float64" for feature in df_train.columns[numeric_feature_indexes]
}
df_train = df_train.astype(num_features_type_map)
df_validation = df_validation.astype(num_features_type_map)
X_train = df_train.drop("Cover_Type", axis=1)
y_train = df_train["Cover_Type"]
X_validation = df_validation.drop("Cover_Type", axis=1)
y_validation = df_validation["Cover_Type"]
pipeline.set_params(classifier__alpha=0.001, classifier__max_iter=200)
pipeline.fit(X_train, y_train)
accuracy = pipeline.score(X_validation, y_validation)
print(accuracy)
TRAINING_APP_FOLDER = "training_app"
os.makedirs(TRAINING_APP_FOLDER, exist_ok=True)
%%writefile {TRAINING_APP_FOLDER}/train.py
import os
import subprocess
import sys
import fire
import hypertune
import numpy as np
import pandas as pd
import pickle
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
def train_evaluate(job_dir, training_dataset_path, validation_dataset_path, alpha, max_iter, hptune):
df_train = pd.read_csv(training_dataset_path)
df_validation = pd.read_csv(validation_dataset_path)
if not hptune:
df_train = pd.concat([df_train, df_validation])
numeric_feature_indexes = slice(0, 10)
categorical_feature_indexes = slice(10, 12)
preprocessor = ColumnTransformer(
transformers=[
('num', StandardScaler(), numeric_feature_indexes),
('cat', OneHotEncoder(), categorical_feature_indexes)
])
pipeline = Pipeline([
('preprocessor', preprocessor),
('classifier', SGDClassifier(loss='log',tol=1e-3))
])
num_features_type_map = {feature: 'float64' for feature in df_train.columns[numeric_feature_indexes]}
df_train = df_train.astype(num_features_type_map)
df_validation = df_validation.astype(num_features_type_map)
print('Starting training: alpha={}, max_iter={}'.format(alpha, max_iter))
X_train = df_train.drop('Cover_Type', axis=1)
y_train = df_train['Cover_Type']
pipeline.set_params(classifier__alpha=alpha, classifier__max_iter=max_iter)
pipeline.fit(X_train, y_train)
if hptune:
X_validation = df_validation.drop('Cover_Type', axis=1)
y_validation = df_validation['Cover_Type']
accuracy = pipeline.score(X_validation, y_validation)
print('Model accuracy: {}'.format(accuracy))
# Log it with hypertune
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='accuracy',
metric_value=accuracy
)
# Save the model
if not hptune:
model_filename = 'model.pkl'
with open(model_filename, 'wb') as model_file:
pickle.dump(pipeline, model_file)
gcs_model_path = "{}/{}".format(job_dir, model_filename)
subprocess.check_call(['gsutil', 'cp', model_filename, gcs_model_path], stderr=sys.stdout)
print("Saved model in: {}".format(gcs_model_path))
if __name__ == "__main__":
fire.Fire(train_evaluate)
%%writefile {TRAINING_APP_FOLDER}/Dockerfile
FROM gcr.io/deeplearning-platform-release/base-cpu
RUN pip install -U fire cloudml-hypertune scikit-learn==0.20.4 pandas==0.24.2
# TODO
IMAGE_NAME = "trainer_image"
IMAGE_TAG = "latest"
IMAGE_URI = f"gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{IMAGE_TAG}"
os.environ["IMAGE_URI"] = IMAGE_URI
!gcloud builds submit --tag $IMAGE_URI $TRAINING_APP_FOLDER
TIMESTAMP = time.strftime("%Y%m%d_%H%M%S")
JOB_NAME = f"forestcover_tuning_{TIMESTAMP}"
JOB_DIR = f"{JOB_DIR_ROOT}/{JOB_NAME}"
os.environ["JOB_NAME"] = JOB_NAME
os.environ["JOB_DIR"] = JOB_DIR
%%bash
MACHINE_TYPE="n1-standard-4"
REPLICA_COUNT=1
CONFIG_YAML=config.yaml
cat <<EOF > $CONFIG_YAML
studySpec:
metrics:
- metricId: accuracy
goal: MAXIMIZE
parameters:
# TODO
algorithm: ALGORITHM_UNSPECIFIED # results in Bayesian optimization
trialJobSpec:
workerPoolSpecs:
- machineSpec:
machineType: $MACHINE_TYPE
replicaCount: $REPLICA_COUNT
containerSpec:
imageUri: $IMAGE_URI
args:
- --job_dir=$JOB_DIR
- --training_dataset_path=$TRAINING_FILE_PATH
- --validation_dataset_path=$VALIDATION_FILE_PATH
- --hptune
EOF
gcloud ai hp-tuning-jobs create \
--region=# TODO \
--display-name=# TODO \
--config=# TODO \
--max-trial-count=# TODO \
--parallel-trial-count=# TODO
echo "JOB_NAME: $JOB_NAME"
def retrieve_best_trial_from_job_name(jobname):
# TODO
return best_trial
best_trial = retrieve_best_trial_from_job_name(JOB_NAME)
alpha = best_trial.parameters[0].value
max_iter = best_trial.parameters[1].value
TIMESTAMP = time.strftime("%Y%m%d_%H%M%S")
JOB_NAME = f"JOB_VERTEX_{TIMESTAMP}"
JOB_DIR = f"{JOB_DIR_ROOT}/{JOB_NAME}"
MACHINE_TYPE="n1-standard-4"
REPLICA_COUNT=1
WORKER_POOL_SPEC = f\
machine-type={MACHINE_TYPE},\
replica-count={REPLICA_COUNT},\
container-image-uri={IMAGE_URI}\
ARGS = f\
--job_dir={JOB_DIR},\
--training_dataset_path={TRAINING_FILE_PATH},\
--validation_dataset_path={VALIDATION_FILE_PATH},\
--alpha={alpha},\
--max_iter={max_iter},\
--nohptune\
!gcloud ai custom-jobs create \
--region={REGION} \
--display-name={JOB_NAME} \
--worker-pool-spec={WORKER_POOL_SPEC} \
--args={ARGS}
print("The model will be exported at:", JOB_DIR)
!gsutil ls $JOB_DIR
MODEL_NAME = "forest_cover_classifier_2"
SERVING_CONTAINER_IMAGE_URI = (
"us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-20:latest"
)
SERVING_MACHINE_TYPE = "n1-standard-2"
uploaded_model = # TODO
endpoint = # TODO
instance = [
2841.0,
45.0,
0.0,
644.0,
282.0,
1376.0,
218.0,
237.0,
156.0,
1003.0,
"Commanche",
"C4758",
]
# TODO
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configure environment settings
Step2: We now create the ARTIFACT_STORE bucket if it's not there. Note that this bucket should be created in the region specified in the variable REGION (if you have already a bucket with this name in a different region than REGION, you may want to change the ARTIFACT_STORE name so that you can recreate a bucket in REGION with the command in the cell below).
Step3: Importing the dataset into BigQuery
Step4: Explore the Covertype dataset
Step5: Create training and validation splits
Step6: Create a validation split
Step7: Develop a training application
Step8: Convert all numeric features to float64
Step9: Run the pipeline locally.
Step10: Calculate the trained model's accuracy.
Step11: Prepare the hyperparameter tuning application.
Step12: Write the tuning script.
Step13: Package the script into a docker image.
Step14: Build the docker image.
Step15: Submit an Vertex AI hyperparameter tuning job
Step16: Exercise
Step17: Go to the Vertex AI Training dashboard and view the progression of the HP tuning job under "Hyperparameter Tuning Jobs".
Step18: You'll need to wait for the hyperparameter job to complete before being able to retrieve the best job by running the cell below.
Step21: Retrain the model with the best hyperparameters
Step22: Examine the training output
Step23: Deploy the model to Vertex AI Prediction
Step24: Uploading the trained model
Step25: Deploying the uploaded model
Step26: Serve predictions
|
3,885
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import make_blobs
X, y = make_blobs(random_state=42)
X.shape
plt.scatter(X[:, 0], X[:, 1]);
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=3, random_state=42)
labels = kmeans.fit_predict(X)
labels
print("¿Hemos acertado en todas las etiquetas?", np.all(y == labels))
print("¿En cuantas hemos fallado?", np.sum(y != labels))
plt.scatter(X[:, 0], X[:, 1], c=labels);
plt.scatter(X[:, 0], X[:, 1], c=y);
from sklearn.metrics import confusion_matrix, accuracy_score
print('Porcentaje de precisión:', accuracy_score(y, labels))
print(confusion_matrix(y, labels))
np.mean(y == labels)
from sklearn.metrics import adjusted_rand_score
adjusted_rand_score(y, labels)
kmeans = KMeans(n_clusters=2, random_state=42)
labels = kmeans.fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=labels);
kmeans.cluster_centers_
distortions = []
for i in range(1, 11):
km = KMeans(n_clusters=i,
random_state=0)
km.fit(X)
distortions.append(km.inertia_)
plt.plot(range(1, 11), distortions, marker='o')
plt.xlabel('Número de grupos')
plt.ylabel('Distorsión')
plt.show()
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Número de clusters incorrecto
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Número de clusters incorrecto")
# Datos distribuidos de forma anisotrópica
transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Clusters con distribución anisotrópica")
# Distinta varianza
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Distinta varianza")
# Clusters de distinto tamaño
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3,
random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Distinto tamaño")
from sklearn.datasets import load_digits
digits = load_digits()
# ...
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: En el scatter anterior, podemos ver tres grupos separados de datos y nos gustaría recuperarlos utilizando agrupamiento (algo así como "descubrir" las etiquetas de clase, que ya damos por sentadas en la tarea de clasificación).
Step2: Podemos obtener las etiquetas de los datos o llamando al método fit y después accediendo al atributo labels_ del estimador KMeans, o llamando a fit_predict (que aplica los dos pasos seguidos). En cualquier caso, el resultado contiene el identificador del grupo al que asignamos cada punto.
Step3: Vamos a visualizar lo que hemos obtenido
Step4: Comparando con las etiquetas reales
Step5: Examinando el resultado de forma gráfica, está claro que podríamos estar satisfechos con los resultados obtenidos, pero, en general, nos gustaría tener una evaluación cuantitativa. ¿Qué tal comparar nuestras etiquetas aprendidas con las etiquetas reales?
Step6: <div class="alert alert-success">
Step7: Una de las desventajas del K-means es que tenemos que especificar el número de clusters, cosa que a menudo no conocemos a priori. Por ejemplo, veamos que pasa si ponemos k=2 para el dataset sintético anterior
Step8: El método del codo (Elbow method)
Step9: Después, tomamos el valor que lleva al pico del codo. Como podemos ver, ese valor será k=3 para este caso, lo que tiene sentido dado el conocimiento que tenemos del dataset.
Step10: Algunos métodos importantes de agrupamiento
|
3,886
|
<ASSISTANT_TASK:>
Python Code:
response = requests.get('https://api.spotify.com/v1/search?query=Lil&type=artist&limit=50&market=US')
Lil_data = response.json()
Lil_data.keys()
Lil_data['artists'].keys()
Lil_artists = Lil_data['artists']['items']
for artist in Lil_artists:
print(artist['name'], artist['popularity'])
Lil_artists = Lil_data['artists']['items']
for artist in Lil_artists:
print(artist['name'], artist['popularity'])
#joining
if len(artist['genres']) == 0:
print("No genres listed")
else:
genres = ", ".join(artist['genres'])
print("Genres: ", genres)
Lil_artists = Lil_data['artists']['items']
Lil_genres_list = []
for genres in Lil_artists:
Lil_genres_list = genres["genres"] + Lil_genres_list
print(Lil_genres_list)
Genre_list = [[x,Lil_genres_list.count(x)] for x in set(Lil_genres_list)]
print(Genre_list)
sorted(Genre_list, key = lambda x: int(x[1]), reverse=True)
Sorted_by_occurences_Genre_list = sorted(Genre_list, key = lambda x: int(x[1]), reverse=True)
print("The most frequent genre of the musicians called Lil is", Sorted_by_occurences_Genre_list[0])
Lil_artists = Lil_data['artists']['items']
for artist in Lil_artists:
if artist['genres'] == []:
print(artist['name'], artist['popularity'], "No genres listed.")
else:
print(artist['name'], artist['popularity'], artist['genres'])
for artist in Lil_artists:
if artist['popularity'] >= 72 and artist['name'] != 'Lil Wayne':
print(artist['name'])
#Better solution:
most_popular_name = ""
most_popular_score = 0
for artist in Lil_artists:
#print("Comparing", artist['popularity'], 'to', most_popular_score)
if artist['popularity'] > most_popular_score:
print("checking for Lil Wayne")
if artist['name'] == 'Lil Wayne':
print('go away')
else:
#The change you are keeping track of
#a.k.a. what you are keeping track of
print('not Lil Wayne, updating our notebook')
most_popular_name = artist['name']
most_popular_score = artist['popularity']
print(most_popular_name, most_popular_score)
####### This doesn't work
#name = 'Lil Soma'
#target_score = 72
#1 INITIAL CONDITION
#second_best_artists = []
#second_best_artists = [Lil Yachty]
#Aggregation Problem
#When you're looping through a series of serious objects
#and sometimes you want to add one of those objects
#to a different list
#for artist in artists:
# print('Looking at', artist['name'])
#2 COndition
#wehen we want someone on the list
# if artist['popularity'] == 72:
# print('!!! The artist is popularity is 72.')
# second_best_artists.append(second_best_artists)
Lil_data['artists'].keys()
for artist in Lil_artists:
if artist['name'] == ["Lil Wayne"]:
print(artist['popularity'], "is")
type(artist['followers'])
artist['followers']
Lil_artists = Lil_data['artists']['items']
List_of_Followers = []
for artist in Lil_artists:
List_of_Followers.append(artist['followers']['total'])
print(List_of_Followers)
List_of_Followers.sort(reverse=True)
print(List_of_Followers)
Highest_Number_of_Followers = (List_of_Followers[0])
print(Highest_Number_of_Followers)
for artist in Lil_artists:
if artist['followers']['total'] > List_of_Followers[0] and artist['name'] != 'Lil Wayne':
print(artist['name'], "has more followers than Lil Wayne.")
else:
print("Their are no artists with more followers that Lil Wayne.")
break
for artist in Lil_artists:
if artist['name'] == "Lil' Kim":
print(artist['popularity'])
for artist in Lil_artists:
if artist['popularity'] > 62:
print(artist['name'], artist['popularity'])
for artist in Lil_artists:
print(artist['name'], artist['id'])
response = requests.get('https://api.spotify.com/v1/artists/5einkgXXrjhfYCyac1FANB/top-tracks?country=US')
Lil_Scrappy_data = response.json()
type(Lil_Scrappy_data)
response = requests.get('https://api.spotify.com/v1/artists/5qK5bOC6wLtuLhG5KvU17c/top-tracks?country=US')
Lil_Mama_data = response.json()
type(Lil_Mama_data)
Lil_Scrappy_data.keys()
Lil_Mama_data.keys()
type(Lil_Scrappy_data.keys())
type(Lil_Mama_data.keys())
Scrappy_tracks = Lil_Scrappy_data['tracks']
for tracks in Scrappy_tracks:
print(tracks['name'])
Mama_tracks = Lil_Mama_data['tracks']
for tracks in Mama_tracks:
print(tracks['name'])
explicit_track_scrappy = 0
non_explicit_track_scrappy = 0
unknown_scrappy = 0
for tracks in Scrappy_tracks:
if tracks['explicit'] == True:
explicit_track_scrappy = explicit_track_scrappy + 1
elif tracks['explicit'] == False:
non_explicit_track_scrappy = non_explicit_track_scrappy + 1
else:
unknown_scrappy = unknown_scrappy + 1
explicit_track_pop_total = 0
non_explicit_track_pop_total = 0
for tracks in Scrappy_tracks:
if tracks['explicit'] == True:
explicit_track_pop_total = explicit_track_pop_total + tracks['popularity']
elif tracks['explicit'] == False:
non_explicit_track_pop_total = non_explicit_track_pop_total + tracks['popularity']
explicit_track_duration_total = 0
non_explicit_track_duration_total = 0
for tracks in Scrappy_tracks:
if tracks['explicit'] == True:
explicit_track_duration_total = explicit_track_duration_total + tracks['duration_ms']
elif tracks['explicit'] == False:
non_explicit_track_duration_total = non_explicit_track_duration_total + tracks['duration_ms']
print("The average rating of explicit songs by Lil Scrappy is", round(explicit_track_pop_total / explicit_track_scrappy), ".")
print("The average rating of non-explicit songs by Lil Scrappy is", round(non_explicit_track_pop_total / non_explicit_track_scrappy), ".")
print("The duration of explicit song material of Lil Scrappy is", round(explicit_track_duration_total / 1000), "minutes, and of non explicit material is", round(non_explicit_track_duration_total / 1000), "minutes.")
explicit_track_Mama = 0
non_explicit_track_Mama = 0
unknown = 0
for tracks in Mama_tracks:
if tracks['explicit'] == True:
explicit_track_Mama = explicit_track_Mama + 1
elif tracks['explicit'] == False:
non_explicit_track_Mama = non_explicit_track_Mama + 1
else:
unknown = unknown + 1
explicit_track_pop_total_Mama = 0
non_explicit_track_pop_total_Mama = 0
for tracks in Mama_tracks:
if tracks['explicit'] == True:
explicit_track_pop_total_Mama = explicit_track_pop_total_Mama + tracks['popularity']
elif tracks['explicit'] == False:
non_explicit_track_pop_total_Mama = non_explicit_track_pop_total_Mama + tracks['popularity']
explicit_track_duration_total_Mama = 0
non_explicit_track_duration_total_Mama = 0
for tracks in Mama_tracks:
if tracks['explicit'] == True:
explicit_track_duration_total_Mama = explicit_track_duration_total_Mama + tracks['duration_ms']
elif tracks['explicit'] == False:
non_explicit_track_duration_total_Mama = non_explicit_track_duration_total_Mama + tracks['duration_ms']
print("The average rating of explicit songs by Lil Mama is", round(explicit_track_pop_total_Mama / explicit_track_Mama), ".")
print("The average rating of non-explicit songs by Lil Mama is", round(non_explicit_track_pop_total_Mama / non_explicit_track_Mama), ".")
print("The duration of explicit song material of Lil Mama is", round(explicit_track_duration_total_Mama / 1000), "minutes, and of non explicit material is", round(non_explicit_track_duration_total_Mama / 1000), "minutes.")
response = requests.get('https://api.spotify.com/v1/search?query=Biggie&type=artist&limit=50&market=US')
Biggie_data = response.json()
response = requests.get('https://api.spotify.com/v1/search?query=Lil&type=artist&limit=50&market=US')
Lil_data = response.json()
Biggie_artists = Biggie_data['artists']['total']
Lil_artists = Lil_data['artists']['total']
print("There are", Biggie_artists, "artists named Biggie on Spotify and", Lil_artists, "named Lil",)
Total_Download_Time_Biggie = Biggie_artists / 50 * 5
Total_Download_Time_Lil = Lil_artists / 50 * 5
print("It would take", round(Total_Download_Time_Biggie), "seconds to download all the Biggie artists and", round(Total_Download_Time_Lil), "seconds to download the Lil artists." )
Lil_artists_popularity = Lil_data['artists']['items']
popularity_total = 0
for popularity in Lil_artists_popularity:
popularity_total = popularity_total + popularity['popularity']
print("The average rating for the top 50 artists called Lil is:", round(popularity_total / 50))
Biggie_artists_popularity = Biggie_data['artists']['items']
Biggie_popularity_total = 0
for popularity2 in Biggie_artists_popularity:
Biggie_popularity_total = Biggie_popularity_total + popularity2['popularity']
print("The average rating for the top 50 artists called Biggie is:", round(Biggie_popularity_total / 49) )
Lil_artists_popularity = Lil_data['artists']['items']
for popularity in Lil_artists_popularity:
print(popularity['name'], popularity['popularity'])
Biggie_popularity = Biggie_data['artists']['items']
for artist in Biggie_popularity:
print(artist['name'], artist['popularity'])
import csv
with open('Biggie.csv', 'w') as mycsvfile:
thedatawriter = csv.writer(mycsvfile)
for artist in Biggie_popularity:
thedatawriter.writerow(artist['name'])
thedatawriter.writerow(artist['popularity'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1) With "Lil Wayne" and "Lil Kim" there are a lot of "Lil" musicians. Do a search and print a list of 50 that are playable in the USA (or the country of your choice), along with their popularity score.
Step2: 2 a) What genres are most represented in the search results?
Step3: Counting the genres.
Step4: Sorting the genres by occurences.
Step5: 2 b) Edit your previous printout to also display a list of their genres in the format "GENRE_1, GENRE_2, GENRE_3". If there are no genres, print "No genres listed".
Step6: 3 a) Use a for loop to determine who BESIDES Lil Wayne has the highest popularity rating.
Step7: 3 b) Is it the same artist who has the largest number of followers?
Step8: Creating a list of the popularity values, so we can sort them and say which one is the highest)
Step9: Deciding which one is highest
Step10: 4) Print a list of Lil's that are more popular than Lil' Kim.
Step11: 5) Pick two of your favorite Lils to fight it out, and use their IDs to print out their top tracks.
Step12: 6 Will the world explode if a musicians swears? Get an average popularity for their explicit songs vs. their non-explicit songs. How many minutes of explicit songs do they have? Non-explicit?
Step13: And this is the same for Lil Mama
Step14: 7 a) Since we're talking about Lils, what about Biggies? How many total "Biggie" artists are there? How many total "Lil"s? If you made 1 request every 5 seconds, how long would it take to download information on all the Lils vs the Biggies?
Step15: 8) Out of the top 50 "Lil"s and the top 50 "Biggie"s, who is more popular on average?
|
3,887
|
<ASSISTANT_TASK:>
Python Code:
# Import all ploting and scientific library,
# and embed figures in this file.
%pylab inline
# Package to manipulate dataframes.
import pandas as pd
# Nice looking plot functions.
import seaborn as sn
# The Pearson correlation function.
from scipy.stats import pearsonr
# Read the dataset.
df = pd.read_csv('data/gapminder.csv')
# Set the country name as the index of the dataframe.
df.index = df.country
# This column is no longer needed.
del df['country']
# Select only the variables we're interested.
df = df[['lifeexpectancy','incomeperperson']]
# Convert the types.
df.lifeexpectancy = pd.to_numeric(df.lifeexpectancy, errors='coerce')
df.incomeperperson = pd.to_numeric(df.incomeperperson, errors='coerce')
# Remove missing values.
df = df.dropna()
r = pearsonr(df.incomeperperson, df.lifeexpectancy)
print('Correlation between incomeperperson and lifeexpectancy: {}'.format(r))
print('Percentage of variability in the reponse variable given by the explanatory variable is {:2}%'.
format(round(r[0]**2*100,2)))
# Setting an apropriate size for the graph.
factor = 1.3
figsize(6*factor, 4*factor)
# Plot the graph.
sn.regplot(df.incomeperperson, df.lifeexpectancy);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pearson correlation $r$
Step2: As we can see above, $r = 0.60$ with $pvalue=1.06*10^{-18}$, shows a moderately strong correlation between life expectancy and income per person. Let's take a look at the scatter plot to see how this correlation is formed.
|
3,888
|
<ASSISTANT_TASK:>
Python Code:
PROJECT_DIR = os.path.dirname(dotenv_path)
RAW_DATA_DIR = PROJECT_DIR + os.environ.get("RAW_DATA_DIR")
INTERIM_DATA_DIR = PROJECT_DIR + os.environ.get("INTERIM_DATA_DIR")
files=os.environ.get("FILES").split()
print("Project directory is : {0}".format(PROJECT_DIR))
print("Raw data directory is : {0}".format(RAW_DATA_DIR))
print("Interim directory is : {0}".format(INTERIM_DATA_DIR))
# The following jupyter notebook magic makes the plots appear in the notebook.
# If you run in batch mode, you have to save your plots as images.
%matplotlib inline
# matplotlib.pyplot is traditionally imported as plt
import matplotlib.pyplot as plt
# numpy is imported as np
import numpy as np
# Pandas is traditionaly imported as pd.
import pandas as pd
from pylab import rcParams
# some display options to size the figures. feel free to experiment
pd.set_option('display.max_columns', 25)
rcParams['figure.figsize'] = (17, 7)
family=pd.read_csv(RAW_DATA_DIR+'/familyxx.csv')
samadult=pd.read_csv(RAW_DATA_DIR+'/samadult.csv')
# Start with a data frame to collect all the data in
df = pd.DataFrame()
# Calculate Kessler 6
# How often did you feel:
# nervous, restless, hopeless, worthless, everything is an effort, so sad nothing mattered.
# ASINERV, ASIRSTLS, ASIHOPLS, ASIWTHLS, ASIEFFRT, ASISAD
kessler_6_questions=['ASINERV', 'ASIRSTLS', 'ASIHOPLS', 'ASIWTHLS', 'ASIEFFRT', 'ASISAD']
# 1 ALL of the time
# 2 MOST of the time
# 3 SOME of the time
# 4 A LITTLE of the time
# 5 NONE of the time
# 7 Refused
# 8 Not ascertained
# 9 Don't know
# These have to be encoded as:
# 7, 8, 9 -> NaN
# 5 -> 0
# 4 -> 1
# 3 -> 2
# 2 -> 3
# 1 -> 4
kessler_6_map = { 1:4, 2:3, 3:2, 4:1, 5:0}
kessler_6=pd.DataFrame()
for col in kessler_6_questions:
kessler_6[col]=[ kessler_6_map.get(x, None) for x in samadult[col]]
df['SPD']= kessler_6.sum(axis=1)>=13
df['SPD'] = np.where(df['SPD'], 'Yes', 'No')
del kessler_6
df.head(5)
# Arthritis Status
arth_map= {1:'Yes', 2:'No'}
df['ARTH1']=[ arth_map.get(x, None) for x in samadult['ARTH1']]
# the following variables are used for the chronic condition count
straight_chronic_condition_questions = ['CHDEV','DIBEV','HYPDIFV', 'KIDWKYR']
cancer_nonmelanoma_skin= ['CANEV','CNKIND22'] # CANEV minus CNKIND22
hep_liver=['AHEP','LIVEV']
respiratory=['AASMEV','EPHEV', 'CBRCHYR']
stroke=['STREV','ALCHRC8']
# Create a temporary dataframe and collect the straight forward conditions
chronic_ind=pd.DataFrame()
# this could be a bit too liberal with the Unknown and Refused to answer values
for col in straight_chronic_condition_questions:
chronic_ind[col]=samadult[col]==1
# Assume CANCER is false. Set to True for those diagnosed, and reset a few that were CNKIND22
chronic_ind['CANCER']=False
chronic_ind.loc[samadult['CANEV']==1,'CANCER'] = True
# override a few that have nonmelanoma skin
chronic_ind.loc[samadult['CNKIND22']==1, 'CANCER'] = False
# Assume Hepatitis or Liver condition is false and then set to True if either is reported
chronic_ind['HEPLIVER']=False
chronic_ind.loc[(samadult['AHEP']==1) | (samadult['LIVEV']==1), 'HEPLIVER'] = True
# Assume Respiratory condition is False and set to True if either of the three is reported
chronic_ind['RESPIRATORY']=False
chronic_ind.loc[(samadult['AASMEV']==1) | (samadult['EPHEV']==1) | (samadult['CBRCHYR']==1), 'RESPIRATORY'] = True
# Assume Stroke condition is false and then set to True if either flag is reported
chronic_ind['STROKE']=False
chronic_ind.loc[(samadult['STREV']==1) | (samadult['ALCHRC8']==1), 'STROKE'] = True
chronic_ind.head()
# Now count the chronic conditions and assign to df
chronic_ind['CHRONIC_CT']=np.array(np.sum(chronic_ind, axis=1))
chron_map = {0:'0',1:'1', 2:'2'}
df['CHRONIC_CT']=[chron_map.get(x, '>=3') for x in chronic_ind['CHRONIC_CT']]
del chronic_ind
df.head(10)
# General Health Status, does not exist as in study Very Good/Excellent, Good, Poor/Fair.
# Only and indicator if it was worse, same, better
# we will use it as a proxy.
status_map={1:"Very Good", 2:"Poor", 3: "Good"}
df['GENERAL_HEALTH_STATUS']=[status_map.get(x, None) for x in samadult['AHSTATYR']]
# BMI
bmi=pd.DataFrame()
bmi['BMI']=samadult['BMI']
bmi.loc[bmi['BMI'] < 2500, 'BMI_C'] = '<25'
bmi.loc[(bmi['BMI'] >= 2500)&(bmi['BMI'] < 3000), 'BMI_C'] = '25<30'
bmi.loc[(bmi['BMI'] >= 3000)&(bmi['BMI'] < 9999), 'BMI_C'] = '>30'
df['BMI_C']=bmi['BMI_C']
del bmi
def determine_activity(x):
minutes = 0
if x['VIGLNGTP']==1:
minutes = minutes + x['VIGLNGNO']*2
elif x['VIGLNGTP']==2:
minutes = minutes + x['VIGLNGNO']*120
if x['MODLNGTP']==1:
minutes = minutes + x['MODLNGNO']
elif x['MODLNGTP']==2:
minutes = minutes + x['MODLNGNO']*60
return 'Meets' if minutes >= 150 else 'Does not meet'
physical_activity=pd.DataFrame()
physical_activity=samadult[['VIGLNGNO','VIGLNGTP', 'MODLNGNO', 'MODLNGTP']].copy()
physical_activity['ACTIVITY']=physical_activity.apply(determine_activity, axis=1)
df['ACTIVITY']=physical_activity['ACTIVITY']
del physical_activity
df.head(20)
# Age
age=pd.DataFrame()
age['AGE_P']=samadult['AGE_P']
age.loc[age['AGE_P'] < 45, 'AGE_C'] = '18-44'
age.loc[(age['AGE_P'] >= 45)&(age['AGE_P'] < 65), 'AGE_C'] = '45-64'
age.loc[age['AGE_P'] >= 65, 'AGE_C'] = '65-'
df['AGE_C']=age['AGE_C']
del age
# Sex
df['SEX']=[ 'Male' if x == 1 else 'Female' for x in samadult['SEX']]
# Race. Not exactly a match with the study. Not sure why.
# RACERPI2
race_map= {1: 'White', 2: 'Black/African American', 3:'AIAN', 4: 'Asian',5: 'not releasable',6: 'Multiple'}
df['RACE']=[ race_map.get(x, None) for x in samadult['RACERPI2']]
# Educational level:
# Less than high school
# High school diploma
# Some college or Associates degree
# College or greater
# Can't find it in data?
# Employment status: complex between workstatus and why not worked last week, logic is not described
# Maybe at least get "Out of Work", "Retired", "Other"?
# marital status
# R_MARITL
# 0 Under 14 years -> will combine that with Never Married
# 1 Married - spouse in household \
# 2 Married - spouse not in household > -- will combine these
# 3 Married - spouse in household unknown /
# 4 Widowed
# 5 Divorced \ will combine these
# 6 Separated /
# 7 Never married
# 8 Living with partner
# 9 Unknown marital status -> will combine with 7
marital_map = { 0: "Never Married"
, 1: "Married"
, 2: "Married"
, 3: "Married"
, 4: "Widowed"
, 5: "Divorced/Separated"
, 6: "Divorced/Separated"
, 7: "Never Married"
, 8: "Living with Partner"
, 9: "Never Married"}
df['MARITAL_STATUS']=[ marital_map.get(x, "Never Married") for x in samadult['R_MARITL']]
# Functional limitation score
fl_columns=['FLWALK','FLCLIMB','FLSTAND','FLSIT','FLSTOOP','FLREACH','FLGRASP','FLCARRY','FLPUSH']
fl_cols=samadult[fl_columns].copy()
for col in fl_columns:
fl_cols.loc[fl_cols[col]>=6] = 0
fl_cols['FL_AVG']=fl_cols.mean(axis=1)
fl_cols.loc[fl_cols['FL_AVG'] == 0,'FUNC_LIMIT'] = 'None'
fl_cols.loc[(fl_cols['FL_AVG'] > 0)&(fl_cols['FL_AVG'] <=1),'FUNC_LIMIT'] = 'Low'
fl_cols.loc[(fl_cols['FL_AVG'] > 1)&(fl_cols['FL_AVG'] <=2),'FUNC_LIMIT'] = 'Medium'
fl_cols.loc[fl_cols['FL_AVG'] > 2,'FUNC_LIMIT'] = 'High'
df['FUNC_LIMIT']=fl_cols['FUNC_LIMIT']
del fl_cols
# Social participation restriction
# We defined social participation restriction as
# difficulty or inability to shop, go to events, or participate in
# social activities without special equipment, per previously
# published analyses.
# FLSHOP and FLSOCL
restr_map={1:"Yes", 2:"Yes", 3: "Yes", 4: "Yes"}
social_cols=pd.DataFrame()
social_cols['FLSHOP']=[restr_map.get(x, 'No') for x in samadult['FLSHOP']]
social_cols['FLSOCL']=[restr_map.get(x, 'No') for x in samadult['FLSOCL']]
social_cols.loc[(social_cols['FLSHOP']=='Yes')|(social_cols['FLSOCL']=='Yes'), 'SOC_RESTR']='Yes'
social_cols.loc[(social_cols['FLSHOP']=='No')&(social_cols['FLSOCL']=='No'), 'SOC_RESTR']='No'
df['SOC_RESTR']=social_cols['SOC_RESTR']
#Could not afford mental health care, past 12 months
# AHCAFYR2
# No = 2
# Yes = 1
df['NOT_AFFORD']=[ 'Yes' if x == 1 else 'No' for x in samadult['AHCAFYR2']]
#Seen a mental health professional, past 12 months
# AHCSYR1
#No = 2
#Yes = 1
df['SEEN_MENTAL_DR']=[ 'Yes' if x == 1 else 'No' for x in samadult['AHCSYR1']]
df.head(34)
#From Familyxx get poverty ratio
fam_df=pd.DataFrame()
ratio_map={
1: '<1' # Under 0.50
,2: '<1' # 0.50 - 0.74
,3: '<1' # 0.75 - 0.99
,4: '1 to <2' # 1.00 - 1.24
,5: '1 to <2' # 1.25 - 1.49
,6: '1 to <2' # 1.50 - 1.74
,7: '1 to <2' # 1.75 - 1.99
,8: '>=2' # 2.00 - 2.49
,9: '>=2' # 2.50 - 2.99
,10: '>=2' # 3.00 - 3.49
,11: '>=2' # 3.50 - 3.99
,12: '>=2' # 4.00 - 4.49
,13: '>=2' # 4.50 - 4.99
,14: '>=2' # 5.00 and over
,15: '<1' # Less than 1.00 (no further detail)
,16: '1 to <2' # 1.00 - 1.99 (no further detail)
,17: '>=2' # 2.00 and over (no further detail)
,96: '1 to <2' # Undefinable
,99: '1 to <2' # Unknown
}
fam_df['POV_RATIO']=[ratio_map.get(x, None) for x in family['RAT_CAT4']]
# Just going to go for Yes and No and any unknown/refused as No
# Health insurance
#Any private
#Public only
# Not covered
# FHICOVYN
fam_df['INSURANCE']=['Yes' if x == 1 else 'No' for x in family['FHICOVYN']]
df['HHX']=samadult['HHX']
df['FMX']=samadult['FMX']
fam_df['HHX']=family['HHX']
fam_df['FMX']=family['FMX']
joined_df=pd.merge(df, fam_df, on=['HHX','FMX'],how='left', sort=False)
joined_df.drop(['HHX','FMX'], axis=1,inplace=True )
joined_df.head()
df=joined_df
df.to_csv(INTERIM_DATA_DIR+'/arthritis_study.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Importing pandas and matplotlib.pyplot
Step2: Reading a file in Pandas
Step3: Mental health conditions
Step4: Arthritis indicator itself is very simple
Step5: Chronic condition count
Step6: Now count the TRUE values over this dataframe.
Step7: Another Pandas manipulation trick
Step8: Physical Activity Level
Step9: Similar activities for Age, Sex, and Race
Step10: Some fields are not found or hard to reconstruct
Step11: Do the same for the other fields
Step12: What do we have so far.
Step13: Now get the Insurance and Poverty Ratio fields from the Family file.
Step14: This is how you join two datasets in Pandas.
Step15: And you then do a merge, with the columns indicated in the on= parameter and, very important, specify it is a left join, so that you don't loose any people, if you can't find the family.
Step16: Save the result in the INTERIM data directory
|
3,889
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os
# TO USE A DATABASE OTHER THAN SQLITE, USE THIS LINE
# Note that this is necessary for parallel execution amongst other things...
# os.environ['SNORKELDB'] = 'postgres:///snorkel-intro'
from snorkel import SnorkelSession
session = SnorkelSession()
# Here, we just set how many documents we'll process for automatic testing- you can safely ignore this!
n_docs = 500 if 'CI' in os.environ else 2591
from snorkel.parser import TSVDocPreprocessor
doc_preprocessor = TSVDocPreprocessor('data/articles.tsv', max_docs=n_docs)
from snorkel.parser.spacy_parser import Spacy
from snorkel.parser import CorpusParser
corpus_parser = CorpusParser(parser=Spacy())
%time corpus_parser.apply(doc_preprocessor, count=n_docs)
from snorkel.models import Document, Sentence
print("Documents:", session.query(Document).count())
print("Sentences:", session.query(Sentence).count())
from snorkel.models import candidate_subclass
Spouse = candidate_subclass('Spouse', ['person1', 'person2'])
from snorkel.candidates import Ngrams, CandidateExtractor
from snorkel.matchers import PersonMatcher
ngrams = Ngrams(n_max=7)
person_matcher = PersonMatcher(longest_match_only=True)
cand_extractor = CandidateExtractor(Spouse, [ngrams, ngrams], [person_matcher, person_matcher])
from snorkel.models import Document
from util import number_of_people
docs = session.query(Document).order_by(Document.name).all()
train_sents = set()
dev_sents = set()
test_sents = set()
for i, doc in enumerate(docs):
for s in doc.sentences:
if number_of_people(s) <= 5:
if i % 10 == 8:
dev_sents.add(s)
elif i % 10 == 9:
test_sents.add(s)
else:
train_sents.add(s)
%%time
for i, sents in enumerate([train_sents, dev_sents, test_sents]):
cand_extractor.apply(sents, split=i)
print("Number of candidates:", session.query(Spouse).filter(Spouse.split == i).count())
from util import load_external_labels
%time missed = load_external_labels(session, Spouse, annotator_name='gold')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading the Corpus
Step2: Running a CorpusParser
Step3: We can then use simple database queries (written in the syntax of SQLAlchemy, which Snorkel uses) to check how many documents and sentences were parsed
Step4: Generating Candidates
Step5: Writing a basic CandidateExtractor
Step6: Next, we'll split up the documents into train, development, and test splits; and collect the associated sentences.
Step7: Finally, we'll apply the candidate extractor to the three sets of sentences. The results will be persisted in the database backend.
Step8: Loading Gold Labels
|
3,890
|
<ASSISTANT_TASK:>
Python Code:
from dolo import *
import numpy as np
import matplotlib.pyplot as plt
filename = ('https://raw.githubusercontent.com/EconForge/dolo/master/examples/models/rbc.yaml')
pcat(filename) # Print the model file
model = yaml_import(filename)
print(model)
dr_pert = approximate_controls(model, order=1)
dr_global_spl = time_iteration(model, pert_order=1, verbose=False, interp_type="spline", interp_orders=[3,3])
dr_global_smol = time_iteration(model, pert_order=1, verbose=False, interp_type="smolyak", smolyak_order=3)
%matplotlib inline
# Get bounds for the approximations
spl_bounds = [numpy.min(dr_global_spl.grid[:,1]), numpy.max(dr_global_spl.grid[:,1])]
smol_bounds = [numpy.min(dr_global_smol.grid[:,1]), numpy.max(dr_global_smol.grid[:,1])]
plt.figure(figsize=(17, 7))
plt.subplot(121)
plot_decision_rule(model, dr_global_spl, 'k', 'i', label='Global: spline', bounds=spl_bounds, linewidth=3, alpha=0.5,color='r')
plot_decision_rule(model, dr_global_smol, 'k', 'i', label='Global: Smolyak', bounds=spl_bounds, linewidth=3, alpha=0.5,color='b')
plot_decision_rule(model, dr_pert, 'k', 'i', label='Linear perturbation', bounds=spl_bounds, linewidth=3, alpha=0.5,color='g')
plt.ylabel('i')
plt.title('Investment')
plt.legend()
plt.grid()
plt.subplot(122)
plot_decision_rule(model, dr_global_spl, 'k', 'n', label='Global: spline', bounds=spl_bounds, linewidth=3, alpha=0.5,color='r')
plot_decision_rule(model, dr_global_smol, 'k', 'n', label='Global: Smolyak', bounds=spl_bounds, linewidth=3, alpha=0.5,color='b')
plot_decision_rule(model, dr_pert, 'k', 'n', label='Linear perturbation', bounds=spl_bounds, linewidth=3, alpha=0.5,color='g')
plt.ylabel('n')
plt.title('Labour')
plt.legend()
plt.grid()
plt.show()
original_delta=model.calibration_dict['delta']
drs = []
delta_values = np.linspace(0.01, 0.04,5)
for val in delta_values:
model.set_calibration(delta=val) # Change calibration
drs.append(approximate_controls(model, order=1)) # appending another model object to the list
plt.figure(figsize=(17, 7))
# Plot investment decision rules
plt.subplot(121)
for i,dr in enumerate(drs):
plot_decision_rule(model, dr, 'k', 'i',
label='$\delta={}$'.format(delta_values[i]),
bounds=spl_bounds)
plt.ylabel('i')
plt.title('Investment')
plt.grid()
# Plot labor decision rules
plt.subplot(122)
for i,dr in enumerate(drs):
plot_decision_rule(model, dr, 'k', 'n',
label='$\delta={}$'.format(delta_values[i]),
bounds=spl_bounds)
plt.ylabel('n')
plt.title('Labour')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.grid()
plt.show()
# Reset model back to the original calibration
model.set_calibration(delta=original_delta)
# Reload the model with the original calibration
model = yaml_import(filename)
dr_pert = approximate_controls(model, order=1)
dr_global_spl = time_iteration(model, pert_order=1, verbose=False, interp_type="spline", interp_orders=[3,3])
s0 = model.calibration['states']
sigma2_ez = model.covariances
print(str(model.symbols['states'])+'='+str(s0)) # Print the steady state values of each state variable
s1 = s0.copy() # Copy steady states
s1[0] *= 1.05 # Set size of shock to 5% larger than steady state value
irf_glob = simulate(model, dr_global_spl, s1, n_exp=0, horizon=40 ) # Simulate spline model
irf_pert = simulate(model, dr_pert, s1, n_exp=0, horizon=40 ) # Simulate linear model
plt.figure(figsize=(15, 7))
plt.subplot(221)
plt.plot(irf_glob['z'],linewidth=2, alpha=0.75,color='b')
plt.title('Productivity')
plt.grid()
plt.subplot(222)
plt.plot(irf_glob['i'],linewidth=2, alpha=0.75,color='b',label='Spline')
plt.plot(irf_pert['i'],linewidth=2, alpha=0.75,color='r',label='Linear')
plt.title('Investment')
plt.legend()
plt.grid()
plt.subplot(223)
plt.plot(irf_glob['n'],linewidth=2, alpha=0.75,color='b',label='Spline')
plt.plot(irf_pert['n'],linewidth=2, alpha=0.75,color='r',label='Linear')
plt.title('Labour')
plt.legend()
plt.grid()
plt.subplot(224)
plt.plot(irf_glob['c'],linewidth=2, alpha=0.75,color='b',label='Spline')
plt.plot(irf_pert['c'],linewidth=2, alpha=0.75,color='r',label='Linear')
plt.title('Consumption')
plt.legend()
plt.grid()
plt.show()
s1 = s0.copy() # Copy steady states
s1[0] *= 1.2 # Set size of shock to 5% larger than steady state value
irf_glob = simulate(model, dr_global_spl, s1, n_exp=0, horizon=40 ) # Simulate spline model
irf_pert = simulate(model, dr_pert, s1, n_exp=0, horizon=40 ) # Simulate linear model
plt.figure(figsize=(15, 7))
plt.subplot(221)
plt.plot(irf_glob['z'],linewidth=2, alpha=0.75,color='b')
plt.title('Productivity')
plt.grid()
plt.subplot(222)
plt.plot(irf_glob['i'],linewidth=2, alpha=0.75,color='b',label='Spline')
plt.plot(irf_pert['i'],linewidth=2, alpha=0.75,color='r',label='Linear')
plt.title('Investment')
plt.legend()
plt.grid()
plt.subplot(223)
plt.plot(irf_glob['n'],linewidth=2, alpha=0.75,color='b',label='Spline')
plt.plot(irf_pert['n'],linewidth=2, alpha=0.75,color='r',label='Linear')
plt.title('Labour')
plt.legend()
plt.grid()
plt.subplot(224)
plt.plot(irf_glob['c'],linewidth=2, alpha=0.75,color='b',label='Spline')
plt.plot(irf_pert['c'],linewidth=2, alpha=0.75,color='r',label='Linear')
plt.title('Consumption')
plt.legend()
plt.grid()
plt.show()
# Reload the model with the original calibration
model = yaml_import(filename)
# dr_pert = approximate_controls(model, order=1)
dr_global_spl = time_iteration(model, pert_order=1, verbose=False, interp_type="spline", interp_orders=[4,4])
from dolo.algos.dtcscc.accuracy import omega
err_pert = omega(model, dr_pert)
print("Linear approximation\n")
print(err_pert)
print("--------------\n")
err_spl = omega(model, dr_global_spl)
print("Cubic spline approximation\n")
print(err_spl)
print("--------------\n")
err_smol = omega(model, dr_global_smol)
print("Smolyak approximation\n")
print(err_smol)
a = err_pert['domain'].a
b = err_pert['domain'].b
orders = err_pert['domain'].orders
errors = np.concatenate((err_pert['errors'].reshape( orders.tolist()+[-1] ),
err_spl['errors'].reshape( orders.tolist()+[-1] ),
err_smol['errors'].reshape( orders.tolist()+[-1] )),
2)
plt.figure(figsize=(15,8))
titles=["Investment demand pertubation errors",
"Labor supply pertubation errors",
"Investment demand spline errors",
"Labor supply spline errors",
"Investment demand Smolyak errors",
"Labor supply Smolyak errors"]
for i in range(6):
plt.subplot(3,2,i+1)
imgplot = plt.imshow(errors[:,:,i], origin='lower',
extent=( a[0], b[0], a[1], b[1]), aspect='auto')
imgplot.set_clim(0,3e-4)
plt.colorbar()
plt.xlabel('z')
plt.ylabel('k')
plt.title(titles[i])
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The first thing we'll want to do is read, import, and check the steady state of the model object. Doing this with yaml_import, we'll be able to see what "kind" of model has been imported. In this case we have a Discrete Transiton Continuous State Continuous Controls model.
Step2: Let's take a look at the residuals from the solution for the steady state of the model
Step3: Approximate model solutions
Step4: Next, let's find a global solution to the model. The elements of the code are as follows
Step5: Plotting decision rules
Step6: Comparative statics
Step7: Simulations/Impulse response functions
Step8: Now, we start the simulation at the model steady state, and then get the model shocks.
Step9: We can simulate functions easily using the simulate function. This is very similar to Dynare's stoch_simul command
Step10: Now let's plot the impulse responses to a 5% shock to productivity
Step11: Now let's try again with a 20% shock. We can see how much different the spline and linear approximations are in the face of large shocks
Step12: Approximation errors
Step13: We can also visualize the errors over the state space
|
3,891
|
<ASSISTANT_TASK:>
Python Code::
import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(image, 1.3, 5)
for (x,y, w, h) in faces:
start_point , end_point = (x, y), (x+w, y+h)
cv2.rectangle(image, pt1 = start_point, pt2 = end_point, color = (255,0, 0), thickness = 2)
cv2.imshow('img', image)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
3,892
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import re
import shutil
import string
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Embedding, GlobalAveragePooling1D
from tensorflow.keras.layers import TextVectorization
url = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
dataset = tf.keras.utils.get_file("aclImdb_v1.tar.gz", url,
untar=True, cache_dir='.',
cache_subdir='')
dataset_dir = os.path.join(os.path.dirname(dataset), 'aclImdb')
os.listdir(dataset_dir)
train_dir = os.path.join(dataset_dir, 'train')
os.listdir(train_dir)
remove_dir = os.path.join(train_dir, 'unsup')
shutil.rmtree(remove_dir)
batch_size = 1024
seed = 123
train_ds = tf.keras.utils.text_dataset_from_directory(
'aclImdb/train', batch_size=batch_size, validation_split=0.2,
subset='training', seed=seed)
val_ds = tf.keras.utils.text_dataset_from_directory(
'aclImdb/train', batch_size=batch_size, validation_split=0.2,
subset='validation', seed=seed)
for text_batch, label_batch in train_ds.take(1):
for i in range(5):
print(label_batch[i].numpy(), text_batch.numpy()[i])
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
# Embed a 1,000 word vocabulary into 5 dimensions.
embedding_layer = tf.keras.layers.Embedding(1000, 5)
result = embedding_layer(tf.constant([1, 2, 3]))
result.numpy()
result = embedding_layer(tf.constant([[0, 1, 2], [3, 4, 5]]))
result.shape
# Create a custom standardization function to strip HTML break tags '<br />'.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, '<br />', ' ')
return tf.strings.regex_replace(stripped_html,
'[%s]' % re.escape(string.punctuation), '')
# Vocabulary size and number of words in a sequence.
vocab_size = 10000
sequence_length = 100
# Use the text vectorization layer to normalize, split, and map strings to
# integers. Note that the layer uses the custom standardization defined above.
# Set maximum_sequence length as all samples are not of the same length.
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
# Make a text-only dataset (no labels) and call adapt to build the vocabulary.
text_ds = train_ds.map(lambda x, y: x)
vectorize_layer.adapt(text_ds)
embedding_dim=16
model = Sequential([
vectorize_layer,
Embedding(vocab_size, embedding_dim, name="embedding"),
GlobalAveragePooling1D(),
Dense(16, activation='relu'),
Dense(1)
])
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_ds,
validation_data=val_ds,
epochs=15,
callbacks=[tensorboard_callback])
model.summary()
#docs_infra: no_execute
%load_ext tensorboard
%tensorboard --logdir logs
weights = model.get_layer('embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0:
continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception:
pass
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Word embeddings
Step2: Download the IMDb Dataset
Step3: Take a look at the train/ directory. It has pos and neg folders with movie reviews labelled as positive and negative respectively. You will use reviews from pos and neg folders to train a binary classification model.
Step4: The train directory also has additional folders which should be removed before creating training dataset.
Step5: Next, create a tf.data.Dataset using tf.keras.utils.text_dataset_from_directory. You can read more about using this utility in this text classification tutorial.
Step6: Take a look at a few movie reviews and their labels (1
Step7: Configure the dataset for performance
Step8: Using the Embedding layer
Step9: When you create an Embedding layer, the weights for the embedding are randomly initialized (just like any other layer). During training, they are gradually adjusted via backpropagation. Once trained, the learned word embeddings will roughly encode similarities between words (as they were learned for the specific problem your model is trained on).
Step10: For text or sequence problems, the Embedding layer takes a 2D tensor of integers, of shape (samples, sequence_length), where each entry is a sequence of integers. It can embed sequences of variable lengths. You could feed into the embedding layer above batches with shapes (32, 10) (batch of 32 sequences of length 10) or (64, 15) (batch of 64 sequences of length 15).
Step11: When given a batch of sequences as input, an embedding layer returns a 3D floating point tensor, of shape (samples, sequence_length, embedding_dimensionality). To convert from this sequence of variable length to a fixed representation there are a variety of standard approaches. You could use an RNN, Attention, or pooling layer before passing it to a Dense layer. This tutorial uses pooling because it's the simplest. The Text Classification with an RNN tutorial is a good next step.
Step12: Create a classification model
Step13: Compile and train the model
Step14: Compile and train the model using the Adam optimizer and BinaryCrossentropy loss.
Step15: With this approach the model reaches a validation accuracy of around 78% (note that the model is overfitting since training accuracy is higher).
Step16: Visualize the model metrics in TensorBoard.
Step17: Retrieve the trained word embeddings and save them to disk
Step18: Write the weights to disk. To use the Embedding Projector, you will upload two files in tab separated format
Step19: If you are running this tutorial in Colaboratory, you can use the following snippet to download these files to your local machine (or use the file browser, View -> Table of contents -> File browser).
|
3,893
|
<ASSISTANT_TASK:>
Python Code:
import suspect
import numpy as np
from matplotlib import pyplot as plt
%matplotlib nbagg
data = suspect.io.load_rda("/home/jovyan/suspect/tests/test_data/siemens/SVS_30.rda")
# create a parameters dictionary to set the basis set to use
params = {
"FILBAS": "/path/to/lcmodel/basis.BASIS"
}
suspect.io.lcmodel.write_all_files("lcmodel_data/example.RAW", data, params=params)
!ls lcmodel_data/
!cat lcmodel_data/example_sl0.CONTROL
# create a parameters dictionary to set the basis set to use
params = {
"FILBAS": "/path/to/lcmodel/basis.BASIS",
"LCSV": True
}
suspect.io.lcmodel.write_all_files("lcmodel_data/example.RAW", data, params=params)
!cat lcmodel_data/example_sl0.CONTROL
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: LCModel
Step2: We can use some IPython magic to show the files that were created
Step3: and to look at the contents of the .CONTROL file
Step4: The .CONTROL file contains all the parameters necessary for LCModel to process the file, including the path we specified to the correct basis set. Once the .RAW and .CONTROL files are generated, it only remains to run the LCModel program on the command line, passing in the .CONTROL file like this
|
3,894
|
<ASSISTANT_TASK:>
Python Code:
from quantopian.pipeline import CustomFactor
import numpy
class StdDev(CustomFactor):
def compute(self, today, asset_ids, out, values):
# Calculates the column-wise standard deviation, ignoring NaNs
out[:] = numpy.nanstd(values, axis=0)
def make_pipeline():
std_dev = StdDev(inputs=[USEquityPricing.close], window_length=5)
return Pipeline(
columns={
'std_dev': std_dev
}
)
result = run_pipeline(make_pipeline(), '2015-05-05', '2015-05-05')
result
class TenDayMeanDifference(CustomFactor):
# Default inputs.
inputs = [USEquityPricing.close, USEquityPricing.open]
window_length = 10
def compute(self, today, asset_ids, out, close, open):
# Calculates the column-wise mean difference, ignoring NaNs
out[:] = numpy.nanmean(close - open, axis=0)
# Computes the 10-day mean difference between the daily open and close prices.
close_open_diff = TenDayMeanDifference()
# Computes the 10-day mean difference between the daily high and low prices.
high_low_diff = TenDayMeanDifference(inputs=[USEquityPricing.high, USEquityPricing.low])
class Momentum(CustomFactor):
# Default inputs
inputs = [USEquityPricing.close]
# Compute momentum
def compute(self, today, assets, out, close):
out[:] = close[-1] / close[0]
ten_day_momentum = Momentum(window_length=10)
twenty_day_momentum = Momentum(window_length=20)
positive_momentum = ((ten_day_momentum > 1) & (twenty_day_momentum > 1))
def make_pipeline():
ten_day_momentum = Momentum(window_length=10)
twenty_day_momentum = Momentum(window_length=20)
positive_momentum = ((ten_day_momentum > 1) & (twenty_day_momentum > 1))
std_dev = StdDev(inputs=[USEquityPricing.close], window_length=5)
return Pipeline(
columns={
'std_dev': std_dev,
'ten_day_momentum': ten_day_momentum,
'twenty_day_momentum': twenty_day_momentum
},
screen=positive_momentum
)
result = run_pipeline(make_pipeline(), '2015-05-05', '2015-05-05')
result
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, let's define our custom factor to calculate the standard deviation over a trailing window using numpy.nanstd
Step2: Finally, let's instantiate our factor in make_pipeline()
Step3: When this pipeline is run, StdDev.compute() will be called every day with data as follows
Step4: Default Inputs
Step5: <i>Remember in this case that close and open are each 10 x ~8000 2D numpy arrays.</i>
Step6: The defaults can be manually overridden by specifying arguments in the constructor call.
Step7: Further Example
Step8: Now, let's instantiate our Momentum factor (twice) to create a 10-day momentum factor and a 20-day momentum factor. Let's also create a positive_momentum filter returning True for securities with both a positive 10-day momentum and a positive 20-day momentum.
Step9: Next, let's add our momentum factors and our positive_momentum filter to make_pipeline. Let's also pass positive_momentum as a screen to our pipeline.
Step10: Running this pipeline outputs the standard deviation and each of our momentum computations for securities with positive 10-day and 20-day momentum.
|
3,895
|
<ASSISTANT_TASK:>
Python Code:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import xarray as xr
# Any import of metpy will activate the accessors
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.units import units
# Open the netCDF file as a xarray Dataset
data = xr.open_dataset(get_test_data('irma_gfs_example.nc', False))
# View a summary of the Dataset
print(data)
# To parse the full dataset, we can call parse_cf without an argument, and assign the returned
# Dataset.
data = data.metpy.parse_cf()
# If we instead want just a single variable, we can pass that variable name to parse_cf and
# it will return just that data variable as a DataArray.
data_var = data.metpy.parse_cf('Temperature_isobaric')
# If we want only a subset of variables, we can pass a list of variable names as well.
data_subset = data.metpy.parse_cf(['u-component_of_wind_isobaric',
'v-component_of_wind_isobaric'])
# To rename variables, supply a dictionary between old and new names to the rename method
data = data.rename({
'Vertical_velocity_pressure_isobaric': 'omega',
'Relative_humidity_isobaric': 'relative_humidity',
'Temperature_isobaric': 'temperature',
'u-component_of_wind_isobaric': 'u',
'v-component_of_wind_isobaric': 'v',
'Geopotential_height_isobaric': 'height'
})
data['temperature'].metpy.convert_units('degC')
# Get multiple coordinates (for example, in just the x and y direction)
x, y = data['temperature'].metpy.coordinates('x', 'y')
# If we want to get just a single coordinate from the coordinates method, we have to use
# tuple unpacking because the coordinates method returns a generator
vertical, = data['temperature'].metpy.coordinates('vertical')
# Or, we can just get a coordinate from the property
time = data['temperature'].metpy.time
# To verify, we can inspect all their names
print([coord.name for coord in (x, y, vertical, time)])
print(data['height'].metpy.sel(vertical=850 * units.hPa))
data_crs = data['temperature'].metpy.cartopy_crs
print(data_crs)
data_globe = data['temperature'].metpy.cartopy_globe
print(data_globe)
lat, lon = xr.broadcast(y, x)
f = mpcalc.coriolis_parameter(lat)
dx, dy = mpcalc.lat_lon_grid_deltas(lon, lat, initstring=data_crs.proj4_init)
heights = data['height'].metpy.loc[{'time': time[0], 'vertical': 500. * units.hPa}]
u_geo, v_geo = mpcalc.geostrophic_wind(heights, f, dx, dy)
print(u_geo)
print(v_geo)
heights = data['height'].metpy.loc[{'time': time[0], 'vertical': 500. * units.hPa}]
lat, lon = xr.broadcast(y, x)
f = mpcalc.coriolis_parameter(lat)
dx, dy = mpcalc.grid_deltas_from_dataarray(heights)
u_geo, v_geo = mpcalc.geostrophic_wind(heights, f, dx, dy)
print(u_geo)
print(v_geo)
# A very simple example example of a plot of 500 hPa heights
data['height'].metpy.loc[{'time': time[0], 'vertical': 500. * units.hPa}].plot()
plt.show()
# Let's add a projection and coastlines to it
ax = plt.axes(projection=ccrs.LambertConformal())
data['height'].metpy.loc[{'time': time[0],
'vertical': 500. * units.hPa}].plot(ax=ax, transform=data_crs)
ax.coastlines()
plt.show()
# Or, let's make a full 500 hPa map with heights, temperature, winds, and humidity
# Select the data for this time and level
data_level = data.metpy.loc[{time.name: time[0], vertical.name: 500. * units.hPa}]
# Create the matplotlib figure and axis
fig, ax = plt.subplots(1, 1, figsize=(12, 8), subplot_kw={'projection': data_crs})
# Plot RH as filled contours
rh = ax.contourf(x, y, data_level['relative_humidity'], levels=[70, 80, 90, 100],
colors=['#99ff00', '#00ff00', '#00cc00'])
# Plot wind barbs, but not all of them
wind_slice = slice(5, -5, 5)
ax.barbs(x[wind_slice], y[wind_slice],
data_level['u'].metpy.unit_array[wind_slice, wind_slice].to('knots'),
data_level['v'].metpy.unit_array[wind_slice, wind_slice].to('knots'),
length=6)
# Plot heights and temperature as contours
h_contour = ax.contour(x, y, data_level['height'], colors='k', levels=range(5400, 6000, 60))
h_contour.clabel(fontsize=8, colors='k', inline=1, inline_spacing=8,
fmt='%i', rightside_up=True, use_clabeltext=True)
t_contour = ax.contour(x, y, data_level['temperature'], colors='xkcd:deep blue',
levels=range(-26, 4, 2), alpha=0.8, linestyles='--')
t_contour.clabel(fontsize=8, colors='xkcd:deep blue', inline=1, inline_spacing=8,
fmt='%i', rightside_up=True, use_clabeltext=True)
# Add geographic features
ax.add_feature(cfeature.LAND.with_scale('50m'), facecolor=cfeature.COLORS['land'])
ax.add_feature(cfeature.OCEAN.with_scale('50m'), facecolor=cfeature.COLORS['water'])
ax.add_feature(cfeature.STATES.with_scale('50m'), edgecolor='#c7c783', zorder=0)
ax.add_feature(cfeature.LAKES.with_scale('50m'), facecolor=cfeature.COLORS['water'],
edgecolor='#c7c783', zorder=0)
# Set a title and show the plot
ax.set_title('500 hPa Heights (m), Temperature (\u00B0C), Humidity (%) at '
+ time[0].dt.strftime('%Y-%m-%d %H:%MZ').item())
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting Data
Step2: Preparing Data
Step3: Units
Step4: WARNING
Step5: Indexing and Selecting Data
Step6: For full details on xarray indexing/selection, see
Step7: The cartopy Globe can similarly be accessed via the data_var.metpy.cartopy_globe
Step8: Calculations
Step9: Also, a limited number of calculations directly support xarray DataArrays or Datasets (they
Step10: Plotting
|
3,896
|
<ASSISTANT_TASK:>
Python Code:
import requests
from pprint import pprint
redirect_uri = 'https://not-a-real-site/authorized'
data = {
'client_name': 'Fake Research Application',
'redirect_uris': [redirect_uri],
'scope': 'launch/patient patient/*.read offline_access'
}
response = requests.post('https://portal.demo.syncfor.science/oauth/register', json=data)
response_data = response.json()
print(f'Response status code: {response.status_code}')
pprint(response_data)
client_id = response_data['client_id']
client_secret = response_data['client_secret']
from urllib.parse import urlencode
params = {
'response_type': 'code',
'client_id': client_id,
'redirect_uri': redirect_uri,
'scope': 'launch/patient patient/*.read offline_access',
'state': 'my-obscured-state',
'aud': 'https://portal.demo.syncfor.science/api/fhir'
}
print(f'https://portal.demo.syncfor.science/oauth/authorize?{urlencode(params)}')
code = '5tQu3bV8XDTYgf8t3VOjzsbVZ5Fuqn' # replace with your code
auth = (client_id, client_secret) # for basic authentication
data = {
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': redirect_uri
}
response = requests.post('https://portal.demo.syncfor.science/oauth/token', auth=auth, data=data)
response_data = response.json()
print(f'Response status code: {response.status_code}')
pprint(response_data)
access_token = response_data['access_token']
refresh_token = response_data['refresh_token']
patient_id = response_data['patient']
response = requests.get(f'https://portal.demo.syncfor.science/api/fhir/Patient/{patient_id}') # oops, no header
print(f'Response status code: {response.status_code}')
headers = {
'Authorization': f'Bearer {access_token}'
}
response = requests.get(f'https://portal.demo.syncfor.science/api/fhir/Patient/{patient_id}', headers=headers)
print(f'Response status code: {response.status_code}')
pprint(response.json())
auth = (client_id, client_secret)
data = {
'grant_type': 'refresh_token',
'refresh_token': refresh_token
}
response = requests.post('https://portal.demo.syncfor.science/oauth/token', auth=auth, data=data)
pprint(response.json())
username = 'daniel-adams'
patient_id = 'smart-1288992'
data = {
'client_id': client_id,
'username': username,
'patient_id': patient_id
}
response = requests.post('https://portal.demo.syncfor.science/oauth/debug/token', json=data)
response_data = response.json()
print(f'Response status code: {response.status_code}')
pprint(response_data)
access_token = response_data['access_token']
refresh_token = response_data['refresh_token']
params = {'token': access_token} # can be access or refresh token
response = requests.get('https://portal.demo.syncfor.science/oauth/debug/introspect', params=params)
print(f'Response status code: {response.status_code}')
pprint(response.json())
from time import time
data = {
'client_id': client_id,
'username': username,
'patient_id': patient_id,
'access_lifetime': 3*60*60, # duration in seconds
'approval_expires': time() + 180*24*60*60 # UNIX timestamp
}
token_response = requests.post('https://portal.demo.syncfor.science/oauth/debug/token', json=data)
access_token = token_response.json()['access_token']
introspect_response = requests.get('https://portal.demo.syncfor.science/oauth/debug/introspect',
params={'token': access_token})
pprint(introspect_response.json())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Launch the OAuth workflow
Step2: Collect the authorization code
Step3: Exchange for access token
Step4: Note
Step5: Now let's try the same request with the access token
Step6: Success!
Step7: SMART Reference Server Developer Resources
Step8: Inspect a token
Step9: Here we can see information associated with this token. By default when creating a token with the reference stack's debug endpoint, the access token is valid for 1 hour (access_expires), but the approval is valid for 1 year (approval_expires). This means that after 1 hour, attempts to use the access_token when fetching data will fail; however, the approval is still valid for 1 year, so the refresh_token may be used to generate a new access token within this time frame. A real research application will have no control over these time periods - the SMART Reference Server decides how for long the access token should be valid, and the approval expiration time is generally specified by the participant during the authorization process.
|
3,897
|
<ASSISTANT_TASK:>
Python Code:
import os, json, math
import numpy as np
import tensorflow as tf
from tensorflow.python.feature_column import feature_column_v2 as fc # This will change when Keras FeatureColumn is final.
from matplotlib import pyplot as plt
print("Tensorflow version " + tf.__version__)
tf.enable_eager_execution()
#@title display utilities [RUN ME]
# utility to display training and validation curves
def display_training_curves(training, validation, title, subplot):
if subplot%10==1: # set up the subplots on the first call
plt.subplots(figsize=(10,10), facecolor='#F0F0F0')
plt.tight_layout()
ax = plt.subplot(subplot)
ax.set_facecolor('#F8F8F8')
ax.plot(training)
ax.plot(validation)
ax.set_title('model '+ title)
ax.set_ylabel(title)
ax.set_xlabel('epoch')
ax.legend(['train', 'valid.'])
# backend identification
IS_COLAB = 'COLAB_GPU' in os.environ # this is always set on Colab, the value is 0 or 1 depending on GPU presence
HAS_COLAB_TPU = 'COLAB_TPU_ADDR' in os.environ
# Auth on Colab
if IS_COLAB:
from google.colab import auth
auth.authenticate_user()
# Also propagate the Auth to TPU if available so that it can access your GCS buckets
if IS_COLAB and HAS_COLAB_TPU:
TF_MASTER = 'grpc://{}'.format(os.environ['COLAB_TPU_ADDR'])
with tf.Session(TF_MASTER) as sess:
with open('/content/adc.json', 'r') as f:
auth_info = json.load(f) # Upload the credentials to TPU.
tf.contrib.cloud.configure_gcs(sess, credentials=auth_info)
print('Using TPU')
# TPU usage flag
USE_TPU = HAS_COLAB_TPU
DATA_BUCKET = "gs://cloud-training-demos/taxifare/ch4/taxi_preproc/"
TRAIN_DATA_PATTERN = DATA_BUCKET + "train*"
VALID_DATA_PATTERN = DATA_BUCKET + "valid*"
CSV_COLUMNS = ['fare_amount', 'dayofweek', 'hourofday', 'pickuplon','pickuplat','dropofflon','dropofflat','passengers', 'key']
DEFAULTS = [[0.0], ['null'], [12], [-74.0], [40.0], [-74.0], [40.7], [1.0], ['nokey']]
def decode_csv(line):
column_values = tf.decode_csv(line, DEFAULTS)
column_names = CSV_COLUMNS
decoded_line = dict(zip(column_names, column_values)) # create a dictionary {'column_name': value, ...} for each line
return decoded_line
def load_dataset(pattern):
#filenames = tf.gfile.Glob(pattern)
filenames = tf.data.Dataset.list_files(pattern)
#dataset = tf.data.TextLineDataset(filenames)
dataset = filenames.interleave(tf.data.TextLineDataset, cycle_length=16) # interleave so that reading happens from multiple files in parallel
dataset = dataset.map(decode_csv)
return dataset
dataset = load_dataset(TRAIN_DATA_PATTERN)
for n, data in enumerate(dataset):
numpy_data = {k: v.numpy() for k, v in data.items()} # .numpy() works in eager mode
print(numpy_data)
if n>10: break
def add_engineered(features):
# this is how you can do feature engineering in TensorFlow
distance = tf.sqrt((features['pickuplat'] - features['dropofflat'])**2 +
(features['pickuplon'] - features['dropofflon'])**2)
# euclidian distance is hard for a neural network to emulate
features['euclidean'] = distance
return features
def features_and_labels(features):
features = add_engineered(features)
features.pop('key') # this column not needed
label = features.pop('fare_amount') # this is what we will train for
return features, label
def prepare_dataset(dataset, batch_size, truncate=None, shuffle=True):
dataset = dataset.map(features_and_labels)
if truncate is not None:
dataset = dataset.take(truncate)
dataset = dataset.cache()
if shuffle:
dataset = dataset.shuffle(10000)
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(-1) # prefetch next batch while training (-1: autotune prefetch buffer size)
return dataset
one_item = load_dataset(TRAIN_DATA_PATTERN).map(features_and_labels).take(1).batch(1)
NB_BUCKETS = 16
latbuckets = np.linspace(38.0, 42.0, NB_BUCKETS).tolist()
lonbuckets = np.linspace(-76.0, -72.0, NB_BUCKETS).tolist()
# the columns you can play with
# Categorical columns are used as:
# fc.indicator_column(dayofweek)
dayofweek = fc.categorical_column_with_vocabulary_list('dayofweek', vocabulary_list = ['Sun', 'Mon', 'Tues', 'Wed', 'Thu', 'Fri', 'Sat'])
hourofday = fc.categorical_column_with_identity('hourofday', num_buckets = 24)
# Bucketized columns can be used as such:
bucketized_pick_lat = fc.bucketized_column(fc.numeric_column('pickuplon'), lonbuckets)
bucketized_pick_lon = fc.bucketized_column(fc.numeric_column('pickuplat'), latbuckets)
bucketized_drop_lat = fc.bucketized_column(fc.numeric_column('dropofflon'), lonbuckets)
bucketized_drop_lon = fc.bucketized_column(fc.numeric_column('dropofflat'), latbuckets)
# Cross columns are used as
# fc.embedding_column(day_hr, 5)
day_hr = fc.crossed_column([dayofweek, hourofday], 24 * 7)
pickup_cross = fc.crossed_column([bucketized_pick_lat, bucketized_pick_lon], NB_BUCKETS * NB_BUCKETS)
drofoff_cross = fc.crossed_column([bucketized_drop_lat, bucketized_drop_lon], NB_BUCKETS * NB_BUCKETS)
#pickdorp_pair = fc.crossed_column([pickup_cross, ddropoff_cross], NB_BUCKETS ** 4 )
columns = [
# Categorical columns
fc.indicator_column(dayofweek),
fc.indicator_column(hourofday),
# Bucketized columns
bucketized_pick_lat,
bucketized_pick_lon,
bucketized_drop_lat,
bucketized_drop_lon,
# Numeric columns
fc.numeric_column('passengers'),
#fc.numeric_column('pickuplon'), # replaced by bucketized
#fc.numeric_column('pickuplat'), # replaced by bucketized
#fc.numeric_column('dropofflon'), # replaced by bucketized
#fc.numeric_column('dropofflat'), # replaced by bucketized
# Engineered features that are created in the input_fn
fc.numeric_column('euclidean'),
# Feature crosses
#fc.embedding_column(pd_pair, 5),
fc.embedding_column(pickup_cross, 5),
fc.embedding_column(drofoff_cross, 5),
fc.embedding_column(day_hr, 5)
]
l = tf.keras.layers
model = tf.keras.Sequential(
[
fc.FeatureLayer(columns),
l.Dense(100, activation='relu'),
l.Dense(64, activation='relu'),
l.Dense(32, activation='relu'),
l.Dense(16, activation='relu'),
l.Dense(1, activation=None), # regression
])
def rmse(y_true, y_pred): # Root Mean Squared Error
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
def mae(y_true, y_pred): # Mean Squared Error
return tf.reduce_mean(tf.abs(y_pred - y_true))
model.compile(optimizer=tf.train.AdamOptimizer(), # little bug: in eager mode, 'adam' is not yet accepted, must spell out tf.train.AdamOptimizer()
loss='mean_squared_error',
metrics=[rmse])
# print model layers
model.predict(one_item, steps=1) # little bug: with FeatureLayer, must call the model once on dummy data before .summary can work
model.summary()
EPOCHS = 8
BATCH_SIZE = 512
TRAIN_SIZE = 640*1024 # max is 2,141,023
VALID_SIZE = 64*1024 # max is 2,124,500
# Playground settings: TRAIN_SIZE = 64*1024, VALID_SIZE = 4*1024
# Solution settings: TRAIN_SIZE = 640*1024, VALID_SIZE = 64*1024
# This should reach RMSE = 3.9 (multiple runs may be necessary)
train_dataset = prepare_dataset(load_dataset(TRAIN_DATA_PATTERN), batch_size=BATCH_SIZE, truncate=TRAIN_SIZE)
valid_dataset = prepare_dataset(load_dataset(VALID_DATA_PATTERN), batch_size=BATCH_SIZE, truncate=VALID_SIZE, shuffle=False)
history = model.fit(train_dataset, steps_per_epoch=TRAIN_SIZE//BATCH_SIZE, epochs=EPOCHS, shuffle=True,
validation_data=valid_dataset, validation_steps=VALID_SIZE//BATCH_SIZE)
print(history.history.keys())
display_training_curves(history.history['rmse'], history.history['val_rmse'], 'accuracy', 211)
display_training_curves(history.history['loss'], history.history['val_loss'], 'loss', 212)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Colab-only auth
Step2: Config
Step3: Linear Keras model [WORK REQUIRED]
|
3,898
|
<ASSISTANT_TASK:>
Python Code:
help('learning_lab.01_device_control')
from importlib import import_module
script = import_module('learning_lab.01_device_control')
from inspect import getsource
print(getsource(script.main))
print(getsource(script.demonstrate))
run ../learning_lab/01_device_control.py
from basics.odl_http import http_history
from basics.http import http_history_to_html
from IPython.core.display import HTML
HTML(http_history_to_html(http_history()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Implementation
Step2: Execution
Step3: HTTP
|
3,899
|
<ASSISTANT_TASK:>
Python Code:
from planet4 import io
db = io.DBManager()
data = db.get_all()
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(data.user_name)
len(le.classes_)
data.user_name = le.transform(data.user_name)
le.transform(data.tail().user_name)
import pickle
folder = Path('/Users/klay6683/Dropbox/data/planet4/P4_catalog_v1.0')
fname = folder / 'P4_catalog_v1.0_username_encoder.pkl'
with open(fname, 'wb') as f:
pickle.dump(le, f)
from planet4 import reduction
# storing new database
fname = folder / 'P4_catalog_v1.0_raw_classifications.hdf'
data.to_hdf(fname, 'df', format='t', data_columns=reduction.data_columns)
with open('name_encoder.pkl', 'rb') as f:
le = pickle.load(f)
le = pickle.load
le.inverse_transform(np.array([15, 15, 49, 16, 53]))
np.argwhere(le.classes_=='Vincep1')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reversing name encoding
Step2: Find a certain user
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.