repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
OceanPARCELS/parcels
parcels/examples/tutorial_SummedFields.ipynb
mit
%matplotlib inline from parcels import Field, FieldSet, ParticleSet, JITParticle, plotTrajectoriesFile, AdvectionRK4 import numpy as np """ Explanation: Tutorial on how to combine different Fields for advection into a SummedField object In some oceanographic applications, you may want to advect particles using a combination of different velocity data sets. For example, particles at the surface are transported by a combination of geostrophic, Ekman and Stokes flow. And often, these flows are not even on the same grid. One option would be to write a Kernel that computes the movement of particles due to each of these flows. However, in Parcels it is possible to directly combine different flows (without interpolation) and feed them into the built-in AdvectionRK4 kernel. For that, we use so-called SummedField objects. This tutorial shows how to use these SummedField with a very idealised example. We start by importing the relevant modules. End of explanation """ xdim, ydim = (10, 20) Uflow = Field('U', np.ones((ydim, xdim), dtype=np.float32), lon=np.linspace(0., 1e3, xdim, dtype=np.float32), lat=np.linspace(0., 1e3, ydim, dtype=np.float32)) Vflow = Field('V', np.zeros((ydim, xdim), dtype=np.float32), grid=Uflow.grid) fieldset_flow = FieldSet(Uflow, Vflow) """ Explanation: Now, let's first define a zonal and meridional velocity field on a 1kmx1km grid with a flat mesh. The zonal velocity is uniform and 1 m/s, and the meridional velocity is zero everywhere. End of explanation """ pset = ParticleSet(fieldset_flow, pclass=JITParticle, lon=[0], lat=[900]) output_file = pset.ParticleFile(name='SummedFieldParticle_flow.nc', outputdt=1) pset.execute(AdvectionRK4, runtime=10, dt=1, output_file=output_file) output_file.export() # export the trajectory data to a netcdf file plotTrajectoriesFile('SummedFieldParticle_flow.nc'); """ Explanation: We then run a particle and plot its trajectory End of explanation """ gf = 10 # factor by which the resolution of this grid is higher than of the original one. Ustokes = Field('U', np.zeros((ydim*gf, xdim*gf), dtype=np.float32), lon=np.linspace(0., 1e3, xdim*gf, dtype=np.float32), lat=np.linspace(0., 1e3, ydim*gf, dtype=np.float32)) Vstokes = Field('V', -0.2*np.ones((ydim*gf, xdim*gf), dtype=np.float32), grid=Ustokes.grid) fieldset_stokes=FieldSet(Ustokes, Vstokes) """ Explanation: The trajectory plot shows a particle moving eastward on the 1 m/s flow, as expected Now, let's define another set of velocities (Ustokes, Vstokes) on a different, higher-resolution grid. This flow is southward at -0.2 m/s. End of explanation """ pset = ParticleSet(fieldset_stokes, pclass=JITParticle, lon=[0], lat=[900]) output_file = pset.ParticleFile(name='SummedFieldParticle_stokes.nc', outputdt=1) pset.execute(AdvectionRK4, runtime=10, dt=1, output_file=output_file) output_file.export() # export the trajectory data to a netcdf file plotTrajectoriesFile('SummedFieldParticle_stokes.nc'); """ Explanation: We run a particle in this FieldSet and also plot its trajectory End of explanation """ fieldset_sum = FieldSet(U=fieldset_flow.U+fieldset_stokes.U, V=fieldset_flow.V+fieldset_stokes.V) """ Explanation: Now comes the trick of the SummedFields. We can simply define a new FieldSet with a summation of different Fields, as in U=fieldset_flow.U+fieldset_stokes.U. End of explanation """ pset = ParticleSet(fieldset_sum, pclass=JITParticle, lon=[0], lat=[900]) output_file = pset.ParticleFile(name='SummedFieldParticle_sum.nc', outputdt=1) pset.execute(AdvectionRK4, runtime=10, dt=1, output_file=output_file) output_file.export() # export the trajectory data to a netcdf file plotTrajectoriesFile('SummedFieldParticle_sum.nc'); """ Explanation: And if we then run the particle again and plot its trajectory, we see that it moves southeastward! End of explanation """
Danghor/Algorithms
Python/Chapter-05/Calculator-Frame.ipynb
gpl-2.0
import re """ Explanation: The Shunting Yard Algorithm (Operator Precedence Parsing) End of explanation """ def isWhiteSpace(s): whitespace = re.compile(r'[ \t]+') return whitespace.fullmatch(s) """ Explanation: The function $\texttt{isWhiteSpace}(s)$ checks whether $s$ contains only blanks and tabulators. End of explanation """ def toInt(s): try: return int(s) except ValueError: return s """ Explanation: The function $\texttt{toInt}(s)$ tries to convert the string $s$ to an integer. If this works out, the integer is returned. Otherwise, the string $s$ is returned unchanged. End of explanation """ def tokenize(s): regExp = r''' 0|[1-9][0-9]* | # number \*\* | # power operator [-+*/()] | # arithmetic operators and parentheses [ \t] | # white space sqrt | sin | cos | tan | asin | acos | atan | exp | log | x | e | pi ''' L = [toInt(t) for t in re.findall(regExp, s, flags=re.VERBOSE) if not isWhiteSpace(t)] return list(reversed(L)) tokenize('x**2 - 2') """ Explanation: The module re provides support for <a href='https://en.wikipedia.org/wiki/Regular_expression'>regular expressions</a>. These are needed for <em style="color:blue;">tokenizing</em> a string. The function $\texttt{tokenize}(s)$ takes a string and splits this string into a list of tokens. Whitespace is discarded. End of explanation """ import math """ Explanation: The module math provides a number of mathematical functions like exp, sin, log etc. End of explanation """ def findZero(f, a, b, n): assert a < b, f'{a} has to be less than {b}' assert f(a) * f(b) <= 0, f'f({a}) * f({b}) > 0' if f(a) <= 0 <= f(b): for k in range(n): c = 0.5 * (a + b) print(f'f({c}) = {f(c)}, {b-a}') if f(c) < 0: a = c elif f(c) > 0: b = c else: return c else: for k in range(n): c = 0.5 * (a + b) print(f'f({c}) = {f(c)}, {b-a}') if f(c) > 0: a = c elif f(c) < 0: b = c else: return c return (a + b) / 2 def f(x): return x ** 2 - 2 findZero(f, 0, 2, 55) """ Explanation: The function $\texttt{findZero}(f, a, b, n)$ takes a function $f$ and two numbers $a$ and $b$ such that $a < b$ and $f(a) \leq 0 \leq f(b)$ or $f(a) \geq 0 \geq f(b)$. It uses the bisection method to find a number $x \in [a, b]$ such that $f(x) \approx 0$. $n$ is the number of iterations. End of explanation """ def precedence(op): "your code here" """ Explanation: The function $\texttt{precedence}(o)$ calculates the precedence of the operator $o$. End of explanation """ def isUnaryOperator(op): "your code here" """ Explanation: The function $\texttt{isUnaryOperator}(o)$ returns True of $o$ is a unary operator. End of explanation """ def isConstOperator(op): "your code here" """ Explanation: The function $\texttt{isConstOperator}(o)$ returns True of $o$ is a constant like eor pi. End of explanation """ def isLeftAssociative(op): "your code here" """ Explanation: The function $\texttt{isLeftAssociative}(o)$ returns True of $o$ is left associative. End of explanation """ def evalBefore(stackOp, nextOp): "your code here" import stack """ Explanation: The function $\texttt{evalBefore}(o_1, o_2)$ receives to strings representing arithmetical operators. It returns True if the operator $o_1$ should be evaluated before the operator $o_2$ in an arithmetical expression of the form $a \;\texttt{o}_1\; b \;\texttt{o}_2\; c$. In order to determine whether $o_1$ should be evaluated before $o_2$ it uses the <em style="color:blue">precedence</em> and the <em style="color:blue">associativity</em> of the operators. Its behavior is specified by the following rules: - $\texttt{precedence}(o_1) > \texttt{precedence}(o_2) \rightarrow \texttt{evalBefore}(\texttt{o}_1, \texttt{o}_2) = \texttt{True}$, - $o_1 = o_2 \rightarrow \texttt{evalBefore}(\texttt{o}_1, \texttt{o}_2) = \texttt{isLeftAssociative}(o_1)$, - $\texttt{precedence}(o_1) = \texttt{precedence}(o_2) \wedge o_1 \not= o_2 \rightarrow \texttt{evalBefore}(\texttt{o}_1, \texttt{o}_2) = \texttt{True}$, - $\texttt{precedence}(o_1) < \texttt{precedence}(o_2) \rightarrow \texttt{evalBefore}(\texttt{o}_1, \texttt{o}_2) = \texttt{False}$. End of explanation """ class Calculator: def __init__(self, TL, x): self.mTokens = stack.createStack(TL) self.mOperators = stack.Stack() self.mArguments = stack.Stack() self.mValue = x """ Explanation: The class Calculator supports three member variables: - the token stack mTokens, - the operator stack mOperators, - the argument stack mArguments, - the floating point number mValue, which is the current value of x. The constructor takes a list of tokens TL and initializes the token stack with these tokens. End of explanation """ def toString(self): return '\n'.join(['_'*50, 'TokenStack: ', str(self.mTokens), 'Arguments: ', str(self.mArguments), 'Operators: ', str(self.mOperators), '_'*50]) Calculator.__str__ = toString """ Explanation: The method __str__ is used to convert an object of class Calculator to a string. End of explanation """ def evaluate(self): "your code here" Calculator.evaluate = evaluate del evaluate """ Explanation: The function $\texttt{evaluate}(\texttt{self})$ evaluates the expression that is given by the tokens on the mTokenStack. There are two phases: 1. The first phase is the <em style="color:blue">reading phase</em>. In this phase the tokens are removed from the token stack mTokens. 2. The second phase is the <em style="color:blue">evaluation phase</em>. In this phase, the remaining operators on the operator stack mOperators are evaluated. Note that some operators are already evaluated in the reading phase. We can describe what happens in the reading phase using <em style="color:blue">rewrite rules</em> that describe how the three stacks mTokens, mArguments and mOperators are changed in each step. Here, a step is one iteration of the first while-loop of the function evaluate. The following rewrite rules are executed until the token stack mTokens is empty. 1. If the token on top of the token stack is an integer, it is removed from the token stack and pushed onto the argument stack. The operator stack remains unchanged in this case. $$\begin{array}{lc} \texttt{mTokens} = \texttt{mTokensRest} + [\texttt{token} ] & \wedge \ \texttt{isInteger}(\texttt{token}) & \Rightarrow \[0.2cm] \texttt{mArguments}' = \texttt{mArguments} + [\texttt{token}] & \wedge \ \texttt{mTokens}' = \texttt{mTokensRest} & \wedge \ \texttt{mOperators}' = \texttt{mOperators} \end{array} $$ Here, the primed variable $\texttt{mArguments}'$ refers to the argument stack after $\texttt{token}$ has been pushed onto it. In the following rules we implicitly assume that the token on top of the token stack is not an integer but rather a parenthesis or a proper operator. In order to be more concise, we suppress this precondition from the following rewrite rules. 2. If the operator stack is empty, the next token is pushed onto the operator stack. $$\begin{array}{lc} \texttt{mTokens} = \texttt{mTokensRest} + [\texttt{op} ] & \wedge \ \texttt{mOperators} = [] & \Rightarrow \[0.2cm] \texttt{mOperators}' = \texttt{mOperators} + [\texttt{op}] & \wedge \ \texttt{mTokens}' = \texttt{mTokensRest} & \wedge \ \texttt{mArguments}' = \texttt{mArguments} \end{array} $$ 3. If the next token is an opening parenthesis, this parenthesis token is pushed onto the operator stack. $$\begin{array}{lc} \texttt{mTokens} = \texttt{mTokensRest} + [\texttt{'('} ] & \Rightarrow \[0.2cm] \texttt{mOperators}' = \texttt{mOperators} + [\texttt{'('}] & \wedge \ \texttt{mTokens}' = \texttt{mTokensRest} & \wedge \ \texttt{mArguments}' = \texttt{mArguments} \end{array} $$ 4. If the next token is a closing parenthesis and the operator on top of the operator stack is an opening parenthesis, then both parentheses are removed. $$\begin{array}{lc} \texttt{mTokens} = \texttt{mTokensRest} + [\texttt{')'} ] & \wedge \ \texttt{mOperators} =\texttt{mOperatorsRest} + [\texttt{'('}] & \Rightarrow \[0.2cm] \texttt{mOperators}' = \texttt{mOperatorsRest} & \wedge \ \texttt{mTokens}' = \texttt{mTokensRest} & \wedge \ \texttt{mArguments}' = \texttt{mArguments} \end{array} $$ 5. If the next token is a closing parenthesis but the operator on top of the operator stack is not an opening parenthesis, the operator on top of the operator stack is evaluated. Note that the token stack is not changed in this case. $$\begin{array}{lc} \texttt{mTokens} = \texttt{mTokensRest} + [\texttt{')'} ] & \wedge \ \texttt{mOperatorsRest} + [\texttt{op}] & \wedge \ \texttt{op} \not= \texttt{'('} & \wedge \ \texttt{mArguments} = \texttt{mArgumentsRest} + [\texttt{lhs}, \texttt{rhs}] & \Rightarrow \[0.2cm] \texttt{mOperators}' = \texttt{mOperatorsRest} & \wedge \ \texttt{mTokens}' = \texttt{mTokens} & \wedge \ \texttt{mArguments}' = \texttt{mArgumentsRest} + [\texttt{lhs} \;\texttt{op}\; \texttt{rhs}] \end{array} $$ Here, the expression $\texttt{lhs} \;\texttt{op}\; \texttt{rhs}$ denotes evaluating the operator $\texttt{op}$ with the arguments $\texttt{lhs}$ and $\texttt{rhs}$. 6. If the token on top of the operator stack is an opening parenthesis, then the operator on top of the token stack is pushed onto the operator stack. $$\begin{array}{lc} \texttt{mTokens} = \texttt{mTokensRest} + [\texttt{op}] & \wedge \ \texttt{op} \not= \texttt{')'} & \wedge \ \texttt{mOperators} = \texttt{mOperatorsRest} + [\texttt{'('}] & \Rightarrow \[0.2cm] \texttt{mOperator}' = \texttt{mOperator} + [\texttt{op}] & \wedge \ \texttt{mTokens}' = \texttt{mTokensRest} & \wedge \ \texttt{mArguments}' = \texttt{mArguments} \end{array} $$ In the remaining cases neither the token on top of the token stack nor the operator on top of the operator stack can be a a parenthesis. The following rules will implicitly assume that this is the case. 7. If the operator on top of the operator stack needs to be evaluated before the operator on top of the token stack, the operator on top of the operator stack is evaluated. $$\begin{array}{lc} \texttt{mTokens} = \texttt{mTokensRest} + [o_2] & \wedge \ \texttt{mOperatorsRest} + [o_1] & \wedge \ \texttt{evalBefore}(o_1, o_2) & \wedge \ \texttt{mArguments} = \texttt{mArgumentsRest} + [\texttt{lhs}, \texttt{rhs}] & \Rightarrow \[0.2cm] \texttt{mOperators}' = \texttt{mOperatorRest} & \wedge \ \texttt{mTokens}' = \texttt{mTokens} & \wedge \ \texttt{mArguments}' = \texttt{mArgumentsRest} + [\texttt{lhs} \;o_1\; \texttt{rhs}] \end{array} $$ 8. Otherwise, the operator on top of the token stack is pushed onto the operator stack. $$\begin{array}{lc} \texttt{mTokens} = \texttt{mTokensRest} + [o_2] & \wedge \ \texttt{mOperators} = \texttt{mOperatorsRest} + [o_1] & \wedge \ \neg \texttt{evalBefore}(o_1, o_2) & \Rightarrow \[0.2cm] \texttt{mOperators}' = \texttt{mOperators} + [o_2] & \wedge \ \texttt{mTokens}' = \texttt{mTokensRest} & \wedge \ \texttt{mArguments}' = \texttt{mArguments} \end{array} $$ In every step of the evaluation phase we - remove one operator from the operator stack, - remove its arguments from the argument stack, - evaluate the operator, and - push the result back on the argument stack End of explanation """ def popAndEvaluate(self): "your code here" Calculator.popAndEvaluate = popAndEvaluate TL = tokenize('x - cos(x)') C = Calculator(TL, 1) C.evaluate() def computeZero(s, left, right): TL = tokenize(s) def f(x): c = Calculator(TL, x) return c.evaluate() return findZero(f, left, right, 54); computeZero('log exp x - cos(x)', 0, 1) """ Explanation: The method $\texttt{popAndEvaluate}(\texttt{self})$ removes an operator from the operator stack and removes the corresponding arguments from the arguments stack. It evaluates the operator and pushes the result on the argument stack. End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/bnu/cmip6/models/sandbox-3/atmoschem.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'bnu', 'sandbox-3', 'atmoschem') """ Explanation: ES-DOC CMIP6 Model Properties - Atmoschem MIP Era: CMIP6 Institute: BNU Source ID: SANDBOX-3 Topic: Atmoschem Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry. Properties: 84 (39 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:53:41 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Software Properties 3. Key Properties --&gt; Timestep Framework 4. Key Properties --&gt; Timestep Framework --&gt; Split Operator Order 5. Key Properties --&gt; Tuning Applied 6. Grid 7. Grid --&gt; Resolution 8. Transport 9. Emissions Concentrations 10. Emissions Concentrations --&gt; Surface Emissions 11. Emissions Concentrations --&gt; Atmospheric Emissions 12. Emissions Concentrations --&gt; Concentrations 13. Gas Phase Chemistry 14. Stratospheric Heterogeneous Chemistry 15. Tropospheric Heterogeneous Chemistry 16. Photo Chemistry 17. Photo Chemistry --&gt; Photolysis 1. Key Properties Key properties of the atmospheric chemistry 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of atmospheric chemistry model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of atmospheric chemistry model code. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "troposhere" # "stratosphere" # "mesosphere" # "mesosphere" # "whole atmosphere" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Chemistry Scheme Scope Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Atmospheric domains covered by the atmospheric chemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Basic approximations made in the atmospheric chemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "3D mass/mixing ratio for gas" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.5. Prognostic Variables Form Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Form of prognostic variables in the atmospheric chemistry component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 1.6. Number Of Tracers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of advected tracers in the atmospheric chemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.family_approach') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 1.7. Family Approach Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Atmospheric chemistry calculations (not advection) generalized into families of species? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 1.8. Coupling With Chemical Reactivity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Software Properties Software properties of aerosol code 2.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Operator splitting" # "Integrated" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Timestep Framework Timestepping in the atmospheric chemistry model 3.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Mathematical method deployed to solve the evolution of a given variable End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Split Operator Advection Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for chemical species advection (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.3. Split Operator Physical Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for physics (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.4. Split Operator Chemistry Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for chemistry (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 3.5. Split Operator Alternate Order Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.6. Integrated Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestep for the atmospheric chemistry model (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Implicit" # "Semi-implicit" # "Semi-analytic" # "Impact solver" # "Back Euler" # "Newton Raphson" # "Rosenbrock" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3.7. Integrated Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify the type of timestep scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Timestep Framework --&gt; Split Operator Order ** 4.1. Turbulence Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.2. Convection Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Precipitation Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.4. Emissions Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.5. Deposition Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.6. Gas Phase Chemistry Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.9. Photo Chemistry Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.10. Aerosols Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Tuning Applied Tuning methodology for atmospheric chemistry component 5.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics of the global mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics of mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Grid Atmospheric chemistry grid 6.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the general structure of the atmopsheric chemistry grid End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.2. Matches Atmosphere Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 * Does the atmospheric chemistry grid match the atmosphere grid?* End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Grid --&gt; Resolution Resolution in the atmospheric chemistry grid 7.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Canonical Horizontal Resolution Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 7.3. Number Of Horizontal Gridpoints Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 7.4. Number Of Vertical Levels Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Number of vertical levels resolved on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 7.5. Is Adaptive Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Default is False. Set true if grid resolution changes during execution. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Transport Atmospheric chemistry transport 8.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview of transport implementation End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.2. Use Atmospheric Transport Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is transport handled by the atmosphere, rather than within atmospheric cehmistry? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.transport_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Transport Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If transport is handled within the atmospheric chemistry scheme, describe it. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Emissions Concentrations Atmospheric chemistry emissions 9.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview atmospheric chemistry emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Vegetation" # "Soil" # "Sea surface" # "Anthropogenic" # "Biomass burning" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Emissions Concentrations --&gt; Surface Emissions ** 10.1. Sources Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Climatology" # "Spatially uniform mixing ratio" # "Spatially uniform concentration" # "Interactive" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10.2. Method Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.3. Prescribed Climatology Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant)) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.4. Prescribed Spatially Uniform Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted at the surface and prescribed as spatially uniform End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.5. Interactive Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted at the surface and specified via an interactive method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.6. Other Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted at the surface and specified via any other method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Aircraft" # "Biomass burning" # "Lightning" # "Volcanos" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11. Emissions Concentrations --&gt; Atmospheric Emissions TO DO 11.1. Sources Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Climatology" # "Spatially uniform mixing ratio" # "Spatially uniform concentration" # "Interactive" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.2. Method Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.3. Prescribed Climatology Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant)) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.4. Prescribed Spatially Uniform Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted in the atmosphere and prescribed as spatially uniform End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.5. Interactive Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted in the atmosphere and specified via an interactive method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.6. Other Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted in the atmosphere and specified via an &quot;other method&quot; End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12. Emissions Concentrations --&gt; Concentrations TO DO 12.1. Prescribed Lower Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the lower boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.2. Prescribed Upper Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the upper boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 13. Gas Phase Chemistry Atmospheric chemistry transport 13.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview gas phase atmospheric chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "HOx" # "NOy" # "Ox" # "Cly" # "HSOx" # "Bry" # "VOCs" # "isoprene" # "H2O" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Species included in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.3. Number Of Bimolecular Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of bi-molecular reactions in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.4. Number Of Termolecular Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of ter-molecular reactions in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of reactions in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of reactions in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.7. Number Of Advected Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of advected species in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.8. Number Of Steady State Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.9. Interactive Dry Deposition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.10. Wet Deposition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.11. Wet Oxidation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14. Stratospheric Heterogeneous Chemistry Atmospheric chemistry startospheric heterogeneous chemistry 14.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview stratospheric heterogenous atmospheric chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Cly" # "Bry" # "NOy" # TODO - please enter value(s) """ Explanation: 14.2. Gas Phase Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Gas phase species included in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Polar stratospheric ice" # "NAT (Nitric acid trihydrate)" # "NAD (Nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particule))" # TODO - please enter value(s) """ Explanation: 14.3. Aerosol Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Aerosol species included in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.4. Number Of Steady State Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of steady state species in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 14.5. Sedimentation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 14.6. Coagulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Tropospheric Heterogeneous Chemistry Atmospheric chemistry tropospheric heterogeneous chemistry 15.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview tropospheric heterogenous atmospheric chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Gas Phase Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of gas phase species included in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Nitrate" # "Sea salt" # "Dust" # "Ice" # "Organic" # "Black carbon/soot" # "Polar stratospheric ice" # "Secondary organic aerosols" # "Particulate organic matter" # TODO - please enter value(s) """ Explanation: 15.3. Aerosol Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Aerosol species included in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.4. Number Of Steady State Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of steady state species in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.5. Interactive Dry Deposition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.6. Coagulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16. Photo Chemistry Atmospheric chemistry photo chemistry 16.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview atmospheric photo chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 16.2. Number Of Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of reactions in the photo-chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Offline (clear sky)" # "Offline (with clouds)" # "Online" # TODO - please enter value(s) """ Explanation: 17. Photo Chemistry --&gt; Photolysis Photolysis scheme 17.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Photolysis scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.2. Environmental Conditions Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.) End of explanation """
OceanPARCELS/parcels
parcels/examples/tutorial_parcels_structure.ipynb
mit
from IPython.display import SVG SVG(filename='parcels_user_diagram.svg') """ Explanation: Getting started with Parcels: general structure There are many different ways in which to use Parcels for research. The flexibility of the parcels code enables this wide range of applicability and allows you to build complex simulations. But this also means that you have to know what you're doing. SO please take some time to learn how to use Parcels, starting with this tutorial. For a smooth programming experience with Parcels, it is recommended you make a general structure in setting up your simulations. Here we give you an overview of the main components of a Parcels simulation. These components provide you with the basic requirements for your first simulation, while the structure allows you to keep track of more complex simulations later on. A good practice is to separate the different parts of your code into sections: 1. FieldSet. Load and set up the (velocity) fields that your particles need to access. 2. ParticleSet. Define the type of particles you want to release, what Variables they have and what their initial conditions are. 3. Execute kernels. Define and compile the kernels that encode what your particles need to do each timestep and execute them. 4. Output. Write and store the output to a NetCDF file. Ideas on what you can do with the output in terms of analysis is documented here. End of explanation """ from parcels import FieldSet, ParticleSet, JITParticle, AdvectionRK4 # 1. Setting up the velocity fields in a FieldSet object fname = 'GlobCurrent_example_data/*.nc' filenames = {'U': fname, 'V': fname} variables = {'U': 'eastward_eulerian_current_velocity', 'V': 'northward_eulerian_current_velocity'} dimensions = {'U': {'lat': 'lat', 'lon': 'lon', 'time': 'time'}, 'V': {'lat': 'lat', 'lon': 'lon', 'time': 'time'}} fieldset = FieldSet.from_netcdf(filenames, variables, dimensions) # 2. Defining the particles type and initial conditions in a ParticleSet object pset = ParticleSet(fieldset=fieldset, # the fields on which the particles are advected pclass=JITParticle, # the type of particles (JITParticle or ScipyParticle) lon=28, # release longitudes lat=-33) # release latitudes # 3. Executing an advection kernel on the given fieldset output_file = pset.ParticleFile(name="GCParticles.nc", outputdt=3600) # the file name and the time step of the outputs pset.execute(AdvectionRK4, # the kernel (which defines how particles move) runtime=86400*6, # the total length of the run dt=300, # the timestep of the kernel output_file=output_file) # 4. Exporting the simulation output to a netcdf file output_file.export() """ Explanation: These four components are used in the python cell below. Further on in this notebook, we'll focus on each component separately. End of explanation """ fname = 'GlobCurrent_example_data/*.nc' filenames = {'U': fname, 'V': fname} variables = {'U': 'eastward_eulerian_current_velocity', 'V': 'northward_eulerian_current_velocity'} dimensions = {'U': {'lat': 'lat', 'lon': 'lon', 'time': 'time'}, # In the GlobCurrent data the dimensions are also called 'lon', 'lat' and 'time' 'V': {'lat': 'lat', 'lon': 'lon', 'time': 'time'}} fieldset = FieldSet.from_netcdf(filenames, variables, dimensions) """ Explanation: When you start making the parcels simulation more complex, it is a good idea to keep these different steps separate to keep a clear overview and find bugs more easily 1. FieldSet Parcels provides a framework to simulate the movement of particles within an existing flowfield environment. To start a parcels simulation we must define this environment with the FieldSet class. The minimal requirements for this Fieldset are that it must contain the 'U' and 'V' fields: the 2D hydrodynamic data that will move the particles. The general method to use is FieldSet.from_netcdf, which requires filenames, variables and dimensions. Each of these is a dictionary, and variables requires at least a U and V, but any other variable can be added too (e.g. vertical velocity, temperature, mixedlayerdepth, etc). Note also that filenames can contain wildcards. For example, the GlobCurrent data that can be downloaded using the parcels_get_examples script (see step 4 of the installation guide) can be read with: End of explanation """ pset = ParticleSet(fieldset=fieldset, # the fields on which the particles are advected pclass=JITParticle, # the type of particles (JITParticle or ScipyParticle) lon=28, # release longitude lat=-33) # release latitude """ Explanation: For more advanced tutorials on creating FieldSets: Implement periodic boundaries How to interpolate field data for different fields Converting units in the field data Working around incompatible time coordinates If you are working with field data on different grids: Grid indexing on different grids Load field data from Curvilinear grids Load field data from 3D C-grids If you want to combine different velocity fields: Add different velocity fields in a SummedField Nest velocity fields of different regions or resolutions in a NestedField 2. ParticleSet Once you have set up the environment with the FieldSet object, you can start defining your particles in a ParticleSet object. This object requires: 1. The FieldSet on which the particles live. 2. The type of Particle, which contains the information each particle will store. 3. The initial conditions for each Variable defined in the Particle, most notably the release locations in lon and lat. End of explanation """ from parcels import Variable class PressureParticle(JITParticle): # Define a new particle class p = Variable('p', initial=0) # Variable 'p' with initial value 0. """ Explanation: The different Particle types available are the JITParticle and the ScipyParticle, but it is very easy to create your own particle class which includes other Variables: End of explanation """ output_file = pset.ParticleFile(name="GCParticles.nc", outputdt=3600) # the file name and the time step of the outputs pset.execute(AdvectionRK4, # the kernel (which defines how particles move) runtime=86400*6, # the total length of the run in seconds dt=300, # the timestep of the kernel output_file=output_file) """ Explanation: For more advanced tutorials on how to setup your ParticleSet: Releasing particles at different times The difference between JITParticles and ScipyParticles For more information on how to implement Particle types with specific behavior, see the section on writing your own kernels 3. Kernel execution After defining the flowfield environment with FieldSet and the particle information with ParticleSet, we can move on to actually running the parcels simulation by using ParticleSet.execute(). Running a simulation in parcels actually means executing kernels, little snippets of code that are run for each particle at each timestep. The most basic kernels are advection kernels which calculate the movement of each particle based on the FieldSet in which the ParticleSet lives. A few different advection kernels are included in Parcels. If you want to store the particle data generated in the simulation, you usually first want to define the ParticleFile to which the output of the kernel execution will be written. Then, on the ParticleSet you have defined, you can use the method ParticleSet.execute() which requires the following arguments: 1. The kernels to be executed. 2. The runtime defining how long the execution loop runs. Alternatively, you may define the endtime at which the execution loop stops. 3. The timestep dt at which to execute the kernels. 4. (Optional) The ParticleFile object to write the output to. End of explanation """ def WestVel(particle, fieldset, time): if time > 86400 and time < 2*86400: uvel = -0.02 particle.lon += uvel * particle.dt WV_kernel = pset.Kernel(WestVel) output_file = pset.ParticleFile(name="GC_WestVel.nc", outputdt=3600) pset.execute(AdvectionRK4 + WV_kernel, # simply add kernels using the + operator runtime=86400*6, # the total length of the run in seconds dt=300, # the timestep of the kernel output_file=output_file) """ Explanation: One of the most powerful features of Parcels is the ability to write custom Kernels (see e.g. this example to add the vertical movements of an Argo float). You probably want to define these kernels here; after defining your ParticleSet and before executing them. If your kernels become very large or complex, you might want to store them in another python file and import them into your simulation script. End of explanation """ output_file.export() output_file.close() """ Explanation: <a id="kernelrules"></a> However, there are some key limitations to the Kernels that everyone who wants to write their own should be aware of: * Every Kernel must be a function with the following (and only those) arguments: (particle, fieldset, time) * In order to run successfully in JIT mode, Kernel definitions can only contain the following types of commands: * Basic arithmetical operators (+, -, *, /, **) and assignments (=). * Basic logical operators (&lt;, ==, !=, &gt;, &amp;, |). Note that you can use a statement like particle.lon != particle.lon to check if particle.lon is NaN (since math.nan != math.nan). * if and while loops, as well as break statements. Note that for-loops are not supported in JIT mode. * Interpolation of a Field from the FieldSet at a [time, depth, lat, lon] point, using square brackets notation. For example, to interpolate the zonal velocity (U) field at the particle location, use the following statement: value = fieldset.U[time, particle.depth, particle.lat, particle.lon] * Functions from the math standard library and from the custom ParcelsRandom library at parcels.rng * Simple print statements, such as: * print("Some print") * print(particle.lon) * print("particle id: %d" % particle.id) * print("lon: %f, lat: %f" % (particle.lon, particle.lat)) Although note that these `print` statements are not shown in Jupyter notebooks in JIT mode, see [this long-standing Issue](https://github.com/OceanParcels/parcels/issues/369). * Local variables can be used in Kernels, and these variables will be accessible in all concatenated Kernels. Note that these local variables are not shared between particles, and also not between time steps. * Note that one has to be careful with writing kernels for vector fields on Curvilinear grids. While Parcels automatically rotates the U and V field when necessary, this is not the case for for example wind data. In that case, a custom rotation function will have to be written. For more advanced tutorials on writing custom kernels that work on custom particles: Sample other fields like temperature. Mimic the behavior of ARGO floats. Adding diffusion to approximate subgrid-scale processes and unresolved physics. Converting between units in m/s and degree/s. 4. Output While executing the ParticleSet, parcels stores the data in npy files in an output folder. To take all the data and store them in a netcdf file, you can use ParticleFile.export() if you want to keep the folder with npy files; or ParticleFile.close() if you only want to keep the netcdf file: End of explanation """
exe0cdc/PyscesToolbox
example_notebooks/Thermokin.ipynb
bsd-3-clause
mod = pysces.model('lin4_fb') mod.doLoad() # this method call is necessary to ensure that future `doLoad` method calls are executed correctly tk = psctb.ThermoKin(mod) """ Explanation: Thermokin Thermokin is used to assess the kinetic and thermodynamic aspects of enzyme catalysed reactions in metabolic pathways [5]. It provides the functionality to automatically separate the rate equations of reversible reactions into a mass-action ($v_{ma}$) term and a combined binding ($v_{\Theta}$) and rate capacity ($v_{cap}$) term, however rate equations may be manually split into any arbitrary terms if more granularity is required. Additionally $\Gamma/K_{eq}$ is calculated automatically for reversible reactions. Subsequently, elasticity coefficients for the different rate equation terms are automatically calculated. Similar to symbolic control coefficient and control pattern expressions of Symca, the term and elasticity expressions generated by Thermokin can be inspected and manipulated with standard SymPy functionality and their values are automatically recalculated upon a steady-state recalculation. .. note:: Here we use the word "term" to refer to the terms of the logarithmic form of a rate equation as well as to the corresponding factors of its linear (conventional) form. While not technically correct, this terminology is used in accordance to the original publication [5]. Features Automatically separates rate equations into a mass-action term and a combined binding and rate capacity terms. Allows for splitting rate equations into arbitrary terms. Determines a $\Gamma/K_{eq}$ expression for reversible reactions. Determines elasticity coefficient expressions for each reaction and its associated terms. Calculates values of for reaction rate terms, $\Gamma/K_{eq}$, and elasticity coefficients when a new steady-state is reached. The effect of a parameter change on the reaction rate terms, $\Gamma/K_{eq}$, and elasticity coefficients can be investigated by performing a parameter scan and visualised usig ScanFig. Loading of split rate equation terms Saving of Thermokin results Usage and feature walkthrough Workflow Assessing the kinetic and thermodynamic aspects of enzyme catalysed reactions using Thermokin requires the following steps: Instantiation of a Thermokin object using a PySCeS model object and (optionally) a file in which the rate equations of the model has been split into separate terms. Accessing rate equation terms via reaction_results and the corresponding reaction name, reaction term name, or $\Gamma/K_{eq}$ name. Accessing elasticity coefficient terms via ec_results and the corresponding elasticity coefficient name. Inspection of the values of the various terms and elasticity coefficients. Inspection of the effect of parameter changes on the values of the rate equation terms and elasticity coefficients. Result saving. Further analysis. Rate term file syntax As previously mentioned, Thermokin will attempt to automatically split the rate equations of reversible reactions into separate terms. While this feature should work for most common rate equations and does not require any user intervention or knowledge of the parameter names used in the model file, it is limited in two significant ways: The algorithm cannot distinguish between the binding and rate capacity terms and can therefore not separate them. This is a minor issue if the focus of the analysis will be on the elasticity coefficients of the different terms, as the combined rate capacity and binding term elasticity coefficient will be identical to that of the binding term alone. The algorithm cannot separate the effect of single subunit binding from that of cooperative binding. Additionally, the algorithm can fail in some instances. For these reasons the separate rate equation terms can be manually defined in a .reqn file using a relatively simple syntax. Below follows such a file as automatically generated for the model lin4_fb.psc: ``` Automatically parsed and split rate equations for model: lin4_fb.psc generated on: 13:49:07 12-01-2017 Note that this is a best effort attempt that is highly dependent on the form of the rate equations as defined in the model file. Check correctness before use. R1 :successful separation of rate equation terms !T{R1}{ma} X0 - S1/Keq_1 !T{R1}{bind_vc} 1.0Vf_1(S1/S1_05_1 + X0/X0_05_1)(h_1 - 1.0)(a_1(S3/S3_05_1)h_1 + 1)/(X0_05_1(a_1(S3/S3_05_1)h_1*(S1/S1_05_1 + X0/X0_05_1)h_1 + (S3/S3_05_1)h_1 + (S1/S1_05_1 + X0/X0_05_1)h_1 + 1)) !G{R1}{gamma_keq} S1/(Keq_1*X0) R2 :successful separation of rate equation terms !T{R2}{ma} S1 - S2/Keq_2 !T{R2}{bind_vc} 1.0S2_05_2Vf_2/(S1S2_05_2 + S1_05_2S2 + S1_05_2S2_05_2) !G{R2}{gamma_keq} S2/(Keq_2S1) R3 :successful separation of rate equation terms !T{R3}{ma} S2 - S3/Keq_3 !T{R3}{bind_vc} 1.0S3_05_3Vf_3/(S2S3_05_3 + S2_05_3S3 + S2_05_3S3_05_3) !G{R3}{gamma_keq} S3/(Keq_3S2) R4 :rate equation not included - irreversible or unknown form ``` Two types of "terms" can be defined in a .reqn file. The first type denoted by !T, is factor of the rate equation. When the !T terms for a reaction are multiplied together, they should result in the original rate equation. Secondly !G terms are any arbitrary terms that could contain some useful information. Unlike the !T terms, the !G are not subject to any restrictions in terms of the value of their product or otherwise. For instance, the !G terms are used for define $\Gamma/K_{eq}$ for reversible reactions. The syntax for !T and !G terms are as follows: ``` !T{%reaction_name}{%term_name} %term_expression !G{%reaction_name}{%term_name} %term_expression ``` %reaction_name - The name of the reaction to which the term belongs as defined in the .psc file (see the PySCeS MDL documentation). %term_name - The name of the term. While this name is arbitrary, there can be no duplication for any single reaction. %term_expression - The expression of the term. Thus using the example provided above for reaction 3 the line !T{R3}{ma} S2 - S3/Keq_3 specifies a !T term belonging to reaction 3 with the name ma and the expression S2 - S3/Keq_3. Object instantiation Instantiation of a Thermokin analysis object requires PySCeS model object (PysMod) as an argument. Optionally a .reqn file can be provided that includes specifically slit rate equations. If path is provided, Thermokin will attempt to automatically split the reversible rate equations as described above and save a .reqn file at ~/Pysces/psc/%model_name.reqn. If this file already exists, ThermiKin will load it instead. Using the included lin4_fb.psc model a Thermokin session is instantiated as follows: End of explanation """ # This path leads to the provided rate equation file file path_to_reqn = '~/Pysces/psc/lin4_fb.reqn' # Correct path depending on platform - necessary for platform independent scripts if platform == 'win32': path_to_reqn = psctb.utils.misc.unix_to_windows_path(path_to_reqn) else: path_to_reqn = path.expanduser(path_to_reqn) tk = psctb.ThermoKin(mod,path_to_reqn) """ Explanation: Now that ThermoKin has automatically generated a .reqn file for lin4_fb.psc, we can load that file manually during instantiation as follows: End of explanation """ tk = psctb.ThermoKin(mod,overwrite=True) """ Explanation: If the path specified does not exist, a new .reqn file will be generated there instead. Finally, ThermoKin can also be forced to regenerate a the .reqn file by setting the overwrite argument to True: End of explanation """ tk.reaction_results tk.ec_results """ Explanation: Accessing results Unlike RateChar and Symca, ThermoKin generates results immediately after instantiation. Results are organised similar to the other two modules, however, and can be found in the reaction_results and ec_results objects: End of explanation """ # The binding*v_cap term of reaction 1 tk.reaction_results.J_R1_bind_vc """ Explanation: Each results object contains a variety of fields containing data related to a specific term or expression and may be accessed in a similar way to the results of Symca: Inspecting an individual reactions, terms, or elasticity coefficient yields a symbolic expression together with a value End of explanation """ tk.reaction_results.J_R1_bind_vc.expression """ Explanation: SymPy expressions can be accessed via the expression field End of explanation """ tk.reaction_results.J_R1_bind_vc.value """ Explanation: Values of the reaction, term, or elasticity coefficients End of explanation """ # The reaction can also be accessed at the root level of the ThermoKin object # and the binding*v_cap term is nested under it. tk.J_R1.bind_vc # A reaction or term specific ec_results object is also available tk.J_R1.bind_vc.ec_results.pecR1_X0_bind_vc # All the terms of a specific reaction can be accessed via `terms` tk.J_R1.terms """ Explanation: Additionally the latex_name, latex_expression, and parent model mod can also be accessed In order to promote a logical and exploratory approach to investigating data generated by ThermoKin, the results are also arranged in a manner in which terms and elasticity coefficients associated with a certain reaction can be found nested within the results for that reaction. Using reaction 1 (called J_R1 to signify the fact that its rate is at steady state) as an example, results can also be accessed in the following manner: End of explanation """ # Original value of J_R3 tk.J_R3 mod.doLoad() # mod.Vf_3 has a default value of 1000 mod.Vf_3 = 0.1 # calculating new steady state mod.doState() # New value (original was 44.618) tk.J_R3 # resetting to default Vf_3 value and recalculating mod.doLoad() mod.doState() """ Explanation: While each reaction/term/elasticity coefficient may be accessed in multiple ways, these fields are all references to the same result object. Modifying a term accessed in one way, therefore affects all references to the object. Dynamic value updating The values of the reactions/terms/elasticity coefficients are automatically updated when a new steady state is calculated for the model. Thus changing a parameter of lin4_hill, such as the $V_{f}$ value of reaction 3, will lead to new values: End of explanation """ valscan = tk.J_R1.do_par_scan('Vf_3',scan_range=numpy.logspace(-2,5,200),scan_type='value') valplot = valscan.plot() # Equivalent to clicking the corresponding buttons valplot.toggle_category('J_R1', True) valplot.toggle_category('J_R1_bind_vc', True) valplot.toggle_category('J_R1_gamma_keq', True) valplot.toggle_category('J_R1_ma', True) valplot.interact() """ Explanation: Parameter scans Parameter scans can be performed in order to determine the effect of a parameter change on a reaction rate and its individual terms or on the elasticity coefficients relating to a particular reaction and its related term elasticity coefficients (denoted as pec%reaction_%modifier_%term see basic_usage#syntax) . The procedure for both the "value" and "elasticity" scans are very much the same and rely on the same principles as described under basic_usage#plotting-and-displaying-results. To perform a parameter scan the do_par_scan method is called. This method has the following arguments: parameter: A String representing the parameter which should be varied. scan_range: Any iterable representing the range of values over which to vary the parameter (typically a NumPy ndarray generated by numpy.linspace or numpy.logspace). scan_type: Either "elasticity" or "value" as described above (default: "value"). init_return: If True the parameter value will be reset to its initial value after performing the parameter scan (default: True). par_scan: If True, the parameter scan will be performed by multiple parallel processes rather than a single process, thus speeding performance (default: False). par_engine: Specifies the engine to be used for the parallel scanning processes. Can either be "multiproc" or "ipcluster". A discussion of the differences between these methods are beyond the scope of this document, see here for a brief overview of Multiprocessing in Python. (default: "multiproc"). Below we will perform a value scan of the effect of $V_{f^3}$ on the terms of reaction 1 for 200 points between 0.01 and 100000 in log space: End of explanation """ ecscan = tk.J_R1.do_par_scan('Vf_3',scan_range=numpy.logspace(-2,5,200),scan_type='elasticity') """ Explanation: Similarly, we can perform an elasticity scan using the same parameters: End of explanation """ ecplot = ecscan.plot() # All term elasticity coefficients are enabled # by default, thus only the "full" elasticity # coefficients need to be enabled. Here we # switch on the elasticity coefficients # representing the sensitivity of R1 with # respect to the substrate S1 and the inhibitor # S3. ecplot.toggle_category('ecR1_S1', True) ecplot.toggle_category('ecR1_S3', True) # The y limits are adjusted below as the elasticity # values of this parameter scan have extremely # large magnitudes at low Vf_3 values ecplot.ax.set_ylim((-20,20)) ecplot.interact() """ Explanation: .. note:: Elasticity coefficients with expression equal to zero (which will by definition have zero values regardless of any parameter values) are ommitted from the parameter scan results even though they are included in the ec_results objects. End of explanation """ tk.save_results() """ Explanation: Saving results In addition to being able to save parameter scan results (as previously described in basic_usage#scanfig), a summary of the results found in reaction_results and ec_results can be saved using the save_results method. This saves a csv file (by default) to disk to any specified location. If no location is specified, a file named tk_summary_N is saved to the ~/Pysces/$modelname/thermokin/ directory, where N is a number starting at 0: End of explanation """ # the following code requires `pandas` to run import pandas as pd # load csv file at default path results_path = '~/Pysces/lin4_fb/thermokin/tk_summary_0.csv' # Correct path depending on platform - necessary for platform independent scripts if platform == 'win32': results_path = psctb.utils.misc.unix_to_windows_path(results_path) else: results_path = path.expanduser(results_path) saved_results = pd.read_csv(results_path) # show first 20 lines saved_results.head(n=20) """ Explanation: save_results has the following optional arguments: file_name: Specifies a path to save the results to. If None, the path defaults as described above. separator: The separator between fields (default: ",") The contents of the saved data file is as follows: End of explanation """
rreimche/infdiffusion
Diffusion of REAL news.ipynb
mit
client = pymongo.MongoClient("46.101.236.181") db = client.allfake # get collection names collections = sorted([collection for collection in db.collection_names()]) """ Explanation: Init config Select appropriate: - database server (line 1): give pymongo.MongoClient() an appropriate parameter, else it is localhost - database (line 2): either client.databasename or client.['databasename'] End of explanation """ day = {} # number of tweets per day per collection diff = {} # cumullative diffusion on day per colletion for collection in collections: # timeframe relevant_from = db[collection].find().sort("timestamp", pymongo.ASCENDING).limit(1)[0]['timestamp'] relevant_till = db[collection].find().sort("timestamp", pymongo.DESCENDING).limit(1)[0]['timestamp'] i = 0 day[collection] = [] # number of tweets for every collection for every day diff[collection] = [] # cummulative diffusion for every collection for every day averagediff = [] # average diffusion speed for every day for all news d = relevant_from delta = datetime.timedelta(days=1) while d <= relevant_till: # tweets per day per collection day[collection].append(db[collection].find({"timestamp":{"$gte": d, "$lt": d + delta}}).count()) # cummulative diffusion per day per collection if i == 0: diff[collection].append( day[collection][i] ) else: diff[collection].append( diff[collection][i-1] + day[collection][i] ) d += delta i += 1 """ Explanation: Count number of tweets per day for every news, calculate cummulative diffusion End of explanation """ # the longest duration of diffusion among all news headlines max_days = max([len(day[coll]) for coll in \ [days_col for days_col in day] ]) summ_of_diffusions = [0] * max_days # summary diffusion for every day # calculate summary diffusion for every day for d in range(max_days): for c in collections: # if there is an entry for this day for this collection, add its number of tweets to the number of this day if d < len(day[c]): summ_of_diffusions[d] += day[c][d] plt.step(range(len(summ_of_diffusions)),summ_of_diffusions, 'g') plt.xlabel('Day') plt.ylabel('Number of tweets') plt.title('Diffusion of all real news together') plt.show() """ Explanation: Plot diffusion for every day for all news together End of explanation """ summ_of_diffusions_cumulative = [0] * max_days # summ_of_diffusions_cumulative[0] = summ_of_diffusions[0] for d in range(1, max_days): summ_of_diffusions_cumulative[d] += summ_of_diffusions_cumulative[d-1] + summ_of_diffusions[d] plt.step(range(len(summ_of_diffusions_cumulative)),summ_of_diffusions_cumulative, 'g') plt.xlabel('Day') plt.ylabel('Cummulative number of tweets') plt.title('Cummulative diffusion of all real news together') plt.show() """ Explanation: Plot cummulative diffusion of all news together End of explanation """ for collection in collections: plt.step([d+1 for d in range(len(diff[collection]))], diff[collection]) plt.xlabel('Day') plt.ylabel('Cummulative number of tweets') plt.title('Cumulative diffusion of real news headlines') plt.show() """ Explanation: Plot cummulative diffusion for every news headline End of explanation """ averagediff = [0 for _ in range(max_days)] # average diffusion for every day for collection in collections: for i,d in enumerate(day[collection]): averagediff[i] += d / len(collections) plt.xlabel('Day') plt.ylabel('Average number of tweets') plt.step(range(1,len(averagediff)+1),averagediff, 'g') plt.title('Average diffusion of real news') plt.show() """ Explanation: Average diffusion per day for all news End of explanation """ plt.ylabel('Average number of tweets') plt.xlabel('Day') plt.yscale('log') plt.step(range(1,len(averagediff)+1),averagediff, 'g') plt.show() """ Explanation: The same graph but in logarithmic scale End of explanation """ avgdiff_std = [0 for _ in range(max_days)] # standard deviation for every day for all collections number_tweets = [[] for _ in range(max_days)] # number of tweets for every day for every collection for d in range(max_days): for c in collections: # if there is an entry for this day for this collection if d < len(day[c]): # add number of tweets for this day for this colletion to the number_tweets for this day number_tweets[d].append(day[c][d]) # calculate standard deviation for this day avgdiff_std[d] = np.std(number_tweets[d]) plt.ylabel('Standart deviation for average number of tweets per day') plt.xlabel('Day') plt.step(range(1,len(avgdiff_std)+1),avgdiff_std, 'g') plt.title('Standard deviation for real news average') plt.show() """ Explanation: Calculate and plot standart deviation End of explanation """ inside_std = [0 for _ in range(max_days)] # number of values inside one standard deviation for every day inside_std_share = [0 for _ in range(max_days)] # share of values inside one standard deviation for every day for d in range(max_days): for c in collections: # set borders of mean plusminus one std lowest = averagediff[d] - avgdiff_std[d] highest = averagediff[d] + avgdiff_std[d] # if there is entray for this day for this collection and its value is inside the borderes if d < len(day[c]) and (day[c][d] >= lowest and day[c][d] <= highest): # increment number of values inside one std for this day inside_std[d] += 1 # calculate the share of values inside one std for this day inside_std_share[d] = inside_std[d] / float(len(number_tweets[d])) plt.ylabel('Percent of values in 1 std from average') plt.xlabel('Day') plt.scatter(range(1,len(inside_std_share)+1),inside_std_share, c='g') plt.title('Percentage of values inside the range\n of one standard deviation from mean for real news') plt.show() """ Explanation: Calculate and plot share of values inside one standard deviation for every day End of explanation """ averagediff_real = averagediff %store averagediff_real """ Explanation: Store average diffusion data on hard drive to use by another jupyter notebook End of explanation """ # from hard drive, load data for average diffusion of fake news %store -r averagediff_fake plt.xlabel('Day') plt.ylabel('Average number of tweets') plt.step(range(1,len(averagediff)+1),averagediff, 'g', label="real news") plt.step(range(1,len(averagediff_fake)+1),averagediff_fake, 'r', label="fake news") plt.legend() plt.title('Average diffusion for both types of news') plt.show() """ Explanation: Plot average diffusion for both real and fake news on one graph End of explanation """ plt.ylabel('Average number of tweets') plt.xlabel('Day') plt.yscale('log') plt.step(range(1,len(averagediff_fake)+1),averagediff_fake, 'r', range(1,len(averagediff)+1),averagediff, 'g') plt.show() """ Explanation: In logarithmic scale End of explanation """ diffDurationAvg = 0; # average duration of diffusion durations = [len(day[col]) for col in collections] # all durations diffDurationAvg = np.mean(durations) # mean duration diffDurationAvg_std = np.std(durations) # standard deviation for the mean print "Average diffusion duration: %.2f days" % diffDurationAvg print "Standard deviation: %.2f days" % diffDurationAvg_std """ Explanation: Calculate average diffusion duration (number of days until difussion is dead) End of explanation """
rigetticomputing/pyquil
docs/source/quilt_getting_started.ipynb
apache-2.0
from pyquil import Program, get_qc qc = get_qc("Aspen-8") """ Explanation: Getting Up and Running with Quil-T Language Documentation See https://github.com/rigetti/quil for documentation on the Quil-T language. Construct a QuantumComputer object linked to the Quil-T compiler End of explanation """ qc.compiler.get_version_info() """ Explanation: As a sanity check, the following call should work. End of explanation """ cals = qc.compiler.get_calibration_program() """ Explanation: Get Quil-T Calibrations A production QPU has a set of calibrations associated with it. These include frame definitions, gate and measurement calibrations, and custom waveforms. Below we show how to get the default calibrations. End of explanation """ from pyquil.quilatom import Frame # Look for CZ frames. cz_frames = filter(lambda f: f[0].name == "cz", cals.frames.items()) # The first elt is the frame (of type Frame) and the second elt is # the frame definition (of type DefFrame). print(next(cz_frames)[1]) """ Explanation: The calibration_program property of QPUCompiler provides cached access to the QPU calibration information. Upon first using this property a request will be made for the calibration information and may take some time to complete. Subsequent usage of this property will use the cached calibrations and thus will be instantaneous. It should be noted therefore that calibrations will vary with time and should be regularly refreshed though the specifics of when to refresh the calibrations is left as an exercise for the user. See QPUCompiler#refresh_calibration_program. Frame Definitions Frame definitions correspond to specific hardware channels. These have a name (e.g. 0 "ro_rx" for the hardware readout receive channel on Qubit 0), and some metadata (DAC sample rate, initial frame frequency, and a direction). Note: These are fixed and should not be edited. If you wish to set a frame's frequency to one different from its initial frequency, your Quil-T program should use SET-FREQUENCY (for an absolute value) or SHIFT-FREQUENCY (for a relative shift). End of explanation """ print(len(cals.calibrations), "total calibrations, peeking at first two:\n") for defn in cals.calibrations[:2]: print(defn) """ Explanation: Gate Calibrations Gate and Measurement calibrations present the current Quil-T specification of Rigetti's native gates. End of explanation """ cals.waveforms """ Explanation: Waveform Definitions Certain gates (e.g. RX gates above) use template waveforms. Others, notably CZ gates, use custom waveforms. The waveforms member maps waveform names to their definitions. End of explanation """ print(next(iter(cals.waveforms.values()), None)) """ Explanation: Here is what one of these definitions looks like. End of explanation """ prog = Program( 'DECLARE ro BIT', 'H 0', 'CNOT 0 1', 'MEASURE 0 ro' ) compiled = qc.compiler.quil_to_native_quil(prog) exe = qc.compiler.native_quil_to_executable(compiled) """ Explanation: Compiling and running a Quil-T Program There are three ways to access the compiler from pyQuil: qc.compile is the usual pipeline, and only works for Quil code qc.compiler.quil_to_native_quil is the entry point for compiling Quil to native Quil qc.compiler.native_quil_to_executable is the entry point for compiling Quil-T programs In particular, the usual workflow of just delegating to qc.compile does not currently work with Quil-T. If you wish to use Quil-T right now, your workflow should involve 1. calling qc.compiler.quil_to_native_quil on code blocks which do not involve Quil-T or Quil-T calibrations 2. subsequently calling qc.compiler.native_quil_to_executable on blocks involving Quil-T Compiling Quil Programs qc.compiler.native_quil_to_executable requires native Quil + Quil-T operations. In particular, it is assumed that the only gates used are those Rigetti native gates, or ones for which you have provided explicit calibrations. For example, the program below expresses DECLARE ro BIT H 0 CNOT 0 1 MEASURE 0 ro using Rigetti native gates by first using qc.compiler.quil_to_native_quil. End of explanation """ qc.run(exe) """ Explanation: Note: The above compilation may be done even when not on an active QPU reservation. However, as always, the executable cannot be run until on an active QPU reservation the QPU settings used for compilation may go stale Therefore, we suggest that although you may rely on qc.compiler.native_quil_to_executable for development purposes (for example, to verify correct Quil-T syntax), when executing on a QPU all Quil-T programs should be compiled afresh. Running the executable proceeds as before: End of explanation """ def t1_program(time, qubit, num_shots=1000): prog = Program( "DECLARE ro BIT\n" f"RX(pi) {qubit}\n" f"FENCE 0\n" f"DELAY {qubit} {time}\n" f"MEASURE {qubit} ro") prog.wrap_in_numshots_loop(num_shots) return prog import numpy as np probs = [] times = np.geomspace(20e-9, 60e-4, 20) for time in times: prog = t1_program(time, 0) exe = qc.compiler.native_quil_to_executable(prog) results = qc.run(exe) prob = np.sum(results) / results.size probs.append(prob) print(f"time: {time:.2e} \tprob: {prob:.2}") %matplotlib inline import matplotlib.pyplot as plt plt.semilogx(times, probs, '-') plt.xlabel('time (s)') plt.ylabel('p') plt.show() """ Explanation: Another example: a simple T1 experiment As an example of mixing Quil with the new Quil-T instructions, we consider a simple T1 experiment. In short, we excite the qubit state wait some amount of time measure out In this example, we do not do any further data analysis. The purpose here is simply to demonstrate how to compile and run a Quil-T program. End of explanation """ from math import pi from pyquil.gates import RX print(cals.get_calibration(RX(pi, 0))) """ Explanation: Using a Custom Calibration All gate and measure applications in a Quil-T program are translated according to either user-supplied or Rigetti calibrations. To easily check which calibration applies to a specific gate, use the Program.match_calibrations method. End of explanation """ prog = t1_program(1e-6, 0) # Note: we did NOT specify a calibration for RX(pi) 0 in our previous program assert prog.match_calibrations(RX(pi, 0)) is None # The Quil-T translator provided the default: namely, the same calibration we obtained for ourselves # with `qc.compiler.get_calibration_program()`. """ Explanation: None of the above programs needed us to specify calibrations. By default, the Rigetti native calibrations are used. However, if you specify a calibration in a program, it will take precedence over the native calibrations. End of explanation """ prog = Program("""DEFCAL RX(pi/2) 0: FENCE 0 SET-SCALE 0 "rf" 0.353088482172993 SHIFT-FREQUENCY 0 "rf" 1e6 NONBLOCKING PULSE 0 "rf" drag_gaussian(duration: 6.000000000000001e-08, fwhm: 1.5000000000000002e-08, t0: 3.0000000000000004e-08, anh: -210000000.0, alpha: 6.389096630631076) SHIFT-FREQUENCY 0 "rf" -1e6 FENCE 0 DECLARE ro BIT RX(pi/2) 0 MEASURE 0 ro""") print(prog) print(prog.get_calibration(RX(pi/2, 0))) exe = qc.compiler.native_quil_to_executable(prog) qc.run(exe) """ Explanation: In the example below, we use a custom calibration, we conjugate the usual pulse with a frequency shift. There's no motivation for the particular value used, beyond simply showing what is possible. End of explanation """ prog = Program(""" DECLARE ro BIT FENCE 0 SET-SCALE 0 "rf" 0.353088482172993 SHIFT-FREQUENCY 0 "rf" 1e6 NONBLOCKING PULSE 0 "rf" drag_gaussian(duration: 6.000000000000001e-08, fwhm: 1.5000000000000002e-08, t0: 3.0000000000000004e-08, anh: -210000000.0, alpha: 6.389096630631076) SHIFT-FREQUENCY 0 "rf" -1e6 FENCE 0 MEASURE 0 ro """.strip()) exe = qc.compiler.native_quil_to_executable(prog) qc.run(exe) """ Explanation: Of course, it is not required to use calibrations. One can construct an equivalent program by replacing the RX gate with the body of the calibration: End of explanation """ from pyquil.gates import RZ print(cals.get_calibration(RZ(pi,0))) """ Explanation: Parametric Calibrations Some calibrations (e.g. for RX) are defined for specific parameter values. Others may depend on general symbolic values, as with RZ. End of explanation """ match = cals.match_calibrations(RZ(pi,0)) print(match.cal) print(match.settings) """ Explanation: To get more information about how the matching calibration applies to a specific gate, use Program.match_calibrations. The result is a CalibrationMatch object which indicates not just the calibration, but the value for parameters. End of explanation """ instrs = cals.calibrate(RZ(pi,0)) for instr in instrs: print(instr) """ Explanation: You may conveniently recover the body of the calibration, with the matched parameters substituted, using Program.calibrate. End of explanation """ prog = Program("DECLARE ro BIT\n" "H 0\n" "CNOT 0 1\n" "MEASURE 0 ro") try: qc.compiler.native_quil_to_executable(prog) except Exception as e: print("Fails on non-native operations {H, CNOT} as expected.") """ Explanation: Non-Native Gates As mentioned above, the qc.compiler.native_quil_to_executable call will provide calibrations for Rigetti native gates, if they are not provided by a user. However, a program with non-native gates and no corresponding user-provided calibrations will result in a compilation failure. End of explanation """
Erotemic/ubelt
docs/notebooks/demo_CacheStamp.ipynb
apache-2.0
import ubelt as ub dpath = ub.Path.appdir('stamp-demo').delete().ensuredir() fpath1 = dpath / 'large-file1.txt' fpath2 = dpath / 'large-file2.txt' stamp = ub.CacheStamp('stamp-name', dpath=dpath, product=[fpath1, fpath2]) # If the stamp is expired, we need to recompute the process if stamp.expired(): fpath1.write_text('large-data1') fpath2.write_text('large-data2') # After the process is complete, renew the stamp stamp.renew() # Next time the code is run, the stamp will not be expired assert not stamp.expired() """ Explanation: The ubelt.CacheStamp class is used to mark that a block of code has been run, and it's output has been written to disk. You set up a CacheStamp by giving it a name and letting it know what files we expect to already exist or that need to be written. Then you, check if the stamp is "expired". If it is, you need to recompute the data you wish to cache and "renew" the stamp. If it is not expired, then you can expect that: The file already exist on disk. The file has not been tampered with since you wrote it. Running renew records the size, modification time (mtime), and hash (checksum) of each file registered via product. Running expired checks checks that these attributes match with existing files on disk, which gives you the tamperproof guarentee. This mechanism is similar to how Makefiles and other build systems (e.g. CMake, redo) handle detecting when files are modified. (Note that it is possible to disable the hash checks by specifying hasher=None while still retaining size and mtime checks, this is useful when hashing files it too expensive). End of explanation """ import time # Tell the stamp it will expire 2 seconds, and renew it to set that property. stamp.expires = 2 stamp.renew() assert not stamp.expired(), 'should not be expired yet' # Wait 2 seconds time.sleep(2.1) # The stamp is now expired assert stamp.expired(), 'the stamp should be expired' """ Explanation: The 1.1.0 implementation of CacheStamp also contains other features. For instance, you can set an expiration duration or time for the file to expire. All properties can be updated via the constructor or by setting instance attributes. We can demo the expired property by reusing the above stamp. End of explanation """ import ubelt as ub url = 'https://github.com/Kitware/CMake/releases/download/v3.22.5/cmake-3.22.5.tar.gz' dpath = ub.Path.appdir('stamp-download-demo').delete().ensuredir() fpath = dpath / 'cmake-3.22.5.tar.gz' stamp = ub.CacheStamp( 'download-stamp', dpath=dpath, product=fpath, hash_prefix='057d3d40d49fe1503edb62735a73de399d90c92c', ) if stamp.expired(): ub.download(url, fpath=fpath) stamp.renew() """ Explanation: You can also specify an expected hash prefix for each file, which is useful when you know what file will be produced a-priori (e.g. downloading a known file, in fact the ubelt.grabdata mechanism is now implemented with ubelt.CacheStamp). It works something like this: End of explanation """
teuben/astr288p
notebooks/orbits-01.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import math """ Explanation: Two Dimensional Galactic Orbits set initial conditions (x0,y0) and (vx0,vy0) in the plane z=0 set integration time step set number of integrations or a final integration stop time define the potential and the forces as derivatives of the potential End of explanation """ def radius(x): """length of a vector""" if len(x.shape) == 1: return math.sqrt(np.inner(x,x)) # elif len(x.shape) == 2: def potential(pos): """potential, defined as a negative number""" r = radius(pos) y1 = 1+r*r return -1.0/math.sqrt(y1) def angmomz(pos,vel): """Angular momentum in Z""" return pos[0]*vel[1] - pos[1]*vel[0] def energy(pos,vel): """Kinetic and Potential energy""" return 0.5*np.inner(vel,vel) + potential(pos) def force(pos): """force/acceleration (in our units mass=1 scale-length=1)""" # note we might be able to use sympy r = radius(pos) y2 = 1.0/math.sqrt(1+r*r) return -pos*y2*y2*y2 """ Explanation: The Plummer potential for mass $M_p$ and core radius $r_c$ is given by $$ \Phi = - { M_p \over { {(r_c^2 + r^2)}^{1/2} } } \tag{1a} $$ and is also used to described softened gravity of a point mass (think of the case $r_c = 0$) for N-body calculations. The force is the gradient of the potential $$ f = -\nabla \Phi \tag{1b} $$ We also want to record the total energy (kinetic and potential): $$ E = { 1\over 2} v^2 + \Phi \tag{1c} $$ and angular momentum $$ J = r \times v \tag{1d} $$ although we will only be using the Z component of this vector since we are computing orbits restricted to the Z plane: $$ J_z = x \times v_y - y \times v_x \tag{1e} $$ Q1: We use a smooth function for the potential with smooth derivatives. What would happen with the "galaxy" potential from the previous lecture. Force Field We need some helper functions to compute (1a)..(1e). For simplicity we use $M_p = 1$ and $r_c=1$. End of explanation """ def step0(pos,vel, dt): """step0: simple first order Euler""" old = pos pos = pos + dt*vel vel = vel + dt*force(old) return (pos,vel) def step1(pos,vel, dt): """step1: simple first order Euler - updating position first""" pos = pos + dt*vel vel = vel + dt*force(pos) return (pos,vel) def step2(pos,vel, dt): """step2: simple first order Euler - updating velocity first""" vel = vel + dt*force(pos) pos = pos + dt*vel return (pos,vel) def step4(pos,vel,dt): """step4: Runge Kutta 4 """ # not implemented yet return None """ Explanation: Integrator Here we write a step function to solve $$ { d\boldsymbol{x} \over dt } = \boldsymbol{v} \tag{2a} $$ and $$ { d\boldsymbol{v} \over dt } = f(\boldsymbol{x}) \tag{2b} $$ in the discretization we write this as (in a simple first order Euler algorithm) xnew = xold + dt * vold vnew = vold + dt * fold End of explanation """ def show_stats(data): """Show some stats of a numpy array""" m = data.mean() s = data.std() dmin = data.min() dmax = data.max() rmin = (dmin-m)/s rmax = (dmax-m)/s print("Mean/Std:",m,s) print("Min/Max:",dmin,dmax) print("Rmin/Rmax:",rmin,rmax) print("Goodness: ",s/m) """ Explanation: Helper functions End of explanation """ x0 = 1.0 # initial X coordinate v0 = 0.1 # initial Y launch velocity (0.5946 would be a circular orbit) n = 200 # number of steps to take dt = 0.1 # integration time step step = step1 # pick an integration method print(step.__doc__) # Derived variables for the remainder t = 0.0 # always start at t=0 pos = np.array([x0, 0.0, 0.0]) # keeps the current pos vel = np.array([0.0, v0, 0.0]) # and vel e = energy(pos,vel) j = angmomz(pos,vel) time = np.zeros(1) # time array (we'll append to this) time[0] = t phase = np.concatenate(([t,e,j],pos,vel)).reshape(1,9) # watch this peculiar print("e0 =",e) print("phase = ",phase) # at x0=1.0 this should be the correct speed for a circular orbit print("v0_circular=",1/math.pow(2.0,0.75)) """ Explanation: Initial conditions For 2D orbits we only specify the X coordinate and Y velocity. The remaining values of the 6 phase space coordinates are 0. Why is this? End of explanation """ %%time for i in range(n): (pos,vel) = step(pos,vel,dt) t = t + dt e = energy(pos,vel) j = angmomz(pos,vel) #print(i,pos,vel) p = np.concatenate(([t,e,j],pos,vel)).reshape(1,9) phase = np.concatenate((phase, p),axis=0) time = np.append(time,t) #print(phase) plt.scatter(phase[:,3],phase[:,4],c=time) plt.axis('equal') plt.title("Orbit") x = phase[:,3] y = phase[:,4] rad = np.sqrt(x*x+y*y)-1 plt.scatter(phase[:,0],rad) plt.scatter(phase[:,0], phase[:,1]) plt.title("Conserving Energy?") show_stats(phase[:,1]) plt.scatter(phase[:,0], phase[:,2]) plt.title("Conserving Angular Momentum?") show_stats(phase[:,2]) """ Explanation: Integrate The following cell takes the last (pos,vel) and takes n steps in time dt The cell after this will plot the orbit. If you re-execute the stepper cell, it will append, and shows how the orbit "grows" (or not). End of explanation """ try: import cPickle as pickle print("using cPickle") except: import pickle print("using pickle") # write it pickle.dump(phase,open("orbit1.p","wb")) # read it again phase2 = pickle.load(open("orbit1.p","rb")) print(phase[0]) print(phase2[0]) """ Explanation: Saving data There are many good and less ideal ways to save data. In astronomy standard formats such has FITS and HDF5 are common. For our work here we use a simple and fast native python method, called pickle. You can save whole objects, and reading them back in will ensure the whole object structure and hierarchy is preserved. End of explanation """ from scipy.integrate import odeint def ofunc(y,t): """ function to integrate Note we are re-using the force() function from the first part of this notebook """ pos = y[0:3] vel = y[3:] return np.concatenate((vel,force(pos))) n=200 phase0 = np.array([x0,0,0, 0,v0,0]) # initial conditions times = np.arange(0.0,(n+1)*dt,dt) # requested times where we want a solution # times = np.linspace(0.0,n*dt,n+1) print(ofunc(phase0,0.0)) %%time orbit = odeint(ofunc, phase0, times) plt.scatter(orbit[:,0],orbit[:,1],c=times) plt.axis('equal') #plt.scatter(phase[:,3],phase[:,4]) plt.title("Orbit") # plot the old one again plt.scatter(phase[:,3],phase[:,4],c=time) plt.axis('equal') plt.title("Orbit") # compare the last p1 = phase[-1,3:] p2 = orbit[-1,:] # print(phase[-1,0],p1) print(times[-1],p2) print(0.0,p1-p2) """ Explanation: Questions If we are just doing two dimensional orbits, can't we just leave the Z off and speed up computations? What do you need to change to do this? How would we look for the period orbit? If want to squash the potential and make it slightly oval, what would the changes be. Here we would define an ellipsoidal radius on which the potential is constant: $$ r^2 = { x^2 \over a^2} + { y^2 \over b^2 } $$ instead of the normal $$ r^2 = x^2 + y^2 $$ A 2009 IAS lecture by Tremaine is an excellent lecture for (symplectic) orbit integrators. See https://video.ias.edu/PiTP2009-Tremaine Orbits using scipy For many scientific applications there are canned routines made available by the community. The scipy package is one such module. We will derive the same orbit integration using scipy.odeint See e.g. https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.integrate.odeint.html#scipy.integrate.odeint or better https://docs.scipy.org/doc/scipy-0.18.1/reference/tutorial/integrate.html However, this function uses the usual ODE notation (cf. (2a) and (2b)) $$ { d\boldsymbol{y} \over dt } = f(\boldsymbol{y},t) \tag{3} $$ End of explanation """ # e = energy(pos,vel) # j = angmomz(pos,vel) et = np.zeros(len(times)) jt = np.zeros(len(times)) for i in range(len(times)): pos=orbit[i,:3] vel=orbit[i,3:] et[i] = energy(pos,vel) jt[i] = angmomz(pos,vel) plt.plot(times,et) plt.show() plt.plot(times,jt) plt.show() show_stats(et) show_stats(jt) """ Explanation: Energy and Angular Momentum conservation? Now plot the energy conservation for this method as we did for the hand crafted one; did "odeint" do better? Do the same for angular momentum. End of explanation """
MingChen0919/learning-apache-spark
notebooks/06-machine-learning/classification/naive-bayes-classification.ipynb
mit
from pyspark import SparkContext sc = SparkContext(master = 'local') from pyspark.sql import SparkSession spark = SparkSession.builder \ .appName("Python Spark SQL basic example") \ .config("spark.some.config.option", "some-value") \ .getOrCreate() """ Explanation: Create entry points to spark End of explanation """ iris = spark.read.csv('data/iris.csv', header=True, inferSchema=True) iris.show(5) iris.dtypes iris.describe().show() """ Explanation: load iris data End of explanation """ from pyspark.ml.linalg import Vectors from pyspark.sql import Row iris2 = iris.rdd.map(lambda x: Row(features=Vectors.dense(x[:-1]), species=x[-1])).toDF() iris2.show(5) """ Explanation: Merge features to create a features column End of explanation """ from pyspark.ml.feature import StringIndexer from pyspark.ml import Pipeline """ Explanation: Index label column with StringIndexer Import libraries End of explanation """ stringindexer = StringIndexer(inputCol='species', outputCol='label') stages = [stringindexer] pipeline = Pipeline(stages=stages) """ Explanation: Build pipeline Try to use pipeline whenever you can to get used to this format. End of explanation """ iris_df = pipeline.fit(iris2).transform(iris2) iris_df.show(5) """ Explanation: Transform data End of explanation """ iris_df.describe().show(5) iris_df.dtypes """ Explanation: Check the data one more time End of explanation """ train, test = iris_df.randomSplit([0.8, 0.2], seed=1234) """ Explanation: Naive Bayes classification Split data into training and test sets End of explanation """ from pyspark.ml.classification import NaiveBayes naivebayes = NaiveBayes(featuresCol="features", labelCol="label") """ Explanation: Build cross-validation model Estimator End of explanation """ from pyspark.ml.tuning import ParamGridBuilder param_grid = ParamGridBuilder().\ addGrid(naivebayes.smoothing, [0, 1, 2, 4, 8]).\ build() """ Explanation: Parameter grid End of explanation """ from pyspark.ml.evaluation import MulticlassClassificationEvaluator evaluator = MulticlassClassificationEvaluator() """ Explanation: Evaluator There are three categories in the label column. Therefore, we use MulticlassClassificationEvaluator End of explanation """ from pyspark.ml.tuning import CrossValidator crossvalidator = CrossValidator(estimator=naivebayes, estimatorParamMaps=param_grid, evaluator=evaluator) """ Explanation: Build cross-validation model End of explanation """ crossvalidation_mode = crossvalidator.fit(train) """ Explanation: Fit cross-validation model End of explanation """ pred_train = crossvalidation_mode.transform(train) pred_train.show(5) pred_test = crossvalidation_mode.transform(test) pred_test.show(5) """ Explanation: Prediction on training and test sets End of explanation """ print("The parameter smoothing has best value:", crossvalidation_mode.bestModel._java_obj.getSmoothing()) """ Explanation: Best model from cross validation End of explanation """ print('training data (f1):', evaluator.setMetricName('f1').evaluate(pred_train), "\n", 'training data (weightedPrecision): ', evaluator.setMetricName('weightedPrecision').evaluate(pred_train),"\n", 'training data (weightedRecall): ', evaluator.setMetricName('weightedRecall').evaluate(pred_train),"\n", 'training data (accuracy): ', evaluator.setMetricName('accuracy').evaluate(pred_train)) """ Explanation: Prediction accurary Four accuracy matrices are avaiable for this evaluator. * f1 * weightedPrecision * weightedRecall * accuracy Prediction accuracy on training data End of explanation """ print('test data (f1):', evaluator.setMetricName('f1').evaluate(pred_test), "\n", 'test data (weightedPrecision): ', evaluator.setMetricName('weightedPrecision').evaluate(pred_test),"\n", 'test data (weightedRecall): ', evaluator.setMetricName('weightedRecall').evaluate(pred_test),"\n", 'test data (accuracy): ', evaluator.setMetricName('accuracy').evaluate(pred_test)) """ Explanation: Prediction accuracy on test data End of explanation """ train_conf_mat = pred_train.select('label', 'prediction') train_conf_mat.rdd.zipWithIndex().countByKey() """ Explanation: Confusion matrix Confusion matrix on training data End of explanation """ test_conf_mat = pred_test.select('label', 'prediction') test_conf_mat.rdd.zipWithIndex().countByKey() """ Explanation: Confusion matrix on test data End of explanation """
square/pysurvival
notebooks/Churn Prediction - Predicting when your customers will churn.ipynb
apache-2.0
# Importing modules import pandas as pd import numpy as np from matplotlib import pyplot as plt from pysurvival.datasets import Dataset %pylab inline # Reading the dataset raw_dataset = Dataset('churn').load() print("The raw_dataset has the following shape: {}.".format(raw_dataset.shape)) raw_dataset.head(2) """ Explanation: Churn Prediction - Predicting when your customers will churn 1 - Introduction A software as a service (SaaS) company provides a suite of products for Small-to-Medium enterprises, such as data storage, Accounting, Travel and Expenses management as well as Payroll management. So as to help the CFO forecast the acquisition and marketing costs for the next fiscal year, the Data Science team wants to build a churn model to predict when customers are likely to stop their monthly subscription. Thus, once customers have been flagged as likely to churn within a certain time window, the company could take the necessary retention actions. 2 - Dataset 2.1 - Description and Overview End of explanation """ # Creating one-hot vectors categories = ['product_travel_expense', 'product_payroll', 'product_accounting', 'us_region', 'company_size'] dataset = pd.get_dummies(raw_dataset, columns=categories, drop_first=True) # Creating the time and event columns time_column = 'months_active' event_column = 'churned' # Extracting the features features = np.setdiff1d(dataset.columns, [time_column, event_column] ).tolist() """ Explanation: 2.2 - From categorical to numerical There are several categorical features that need to be encoded into one-hot vectors: * product_travel_expense * product_payroll * product_accounting * us_region * company_size End of explanation """ # Checking for null values N_null = sum(dataset[features].isnull().sum()) print("The raw_dataset contains {} null values".format(N_null)) #0 null values # Removing duplicates if there exist N_dupli = sum(dataset.duplicated(keep='first')) dataset = dataset.drop_duplicates(keep='first').reset_index(drop=True) print("The raw_dataset contains {} duplicates".format(N_dupli)) # Number of samples in the dataset N = dataset.shape[0] """ Explanation: 3 - Exploratory Data Analysis As this tutorial is mainly designed to provide an example of how to use Pysurvival, we will not perform a thorough exploratory data analysis but we greatly encourage the reader to do so by taking a look at the predictive maintenance tutorial that provides a very detailed study. Here, we will just check if the dataset contains Null values or duplicated rows, and have a look at feature correlations. 3.1 - Null values and duplicates The first thing to do is checking if the raw_dataset contains Null values and has duplicated rows. End of explanation """ from pysurvival.utils.display import correlation_matrix correlation_matrix(dataset[features], figure_size=(30,15), text_fontsize=10) """ Explanation: As it turns out the raw_dataset doesn't have any Null values or duplicates. 3.2 - Correlations Let's compute and visualize the correlation between the features End of explanation """ # Building training and testing sets from sklearn.model_selection import train_test_split index_train, index_test = train_test_split( range(N), test_size = 0.35) data_train = dataset.loc[index_train].reset_index( drop = True ) data_test = dataset.loc[index_test].reset_index( drop = True ) # Creating the X, T and E inputs X_train, X_test = data_train[features], data_test[features] T_train, T_test = data_train[time_column], data_test[time_column] E_train, E_test = data_train[event_column], data_test[event_column] """ Explanation: 4 - Modeling 4.1 - Building the model So as to perform cross-validation later on and assess the performance of the model, let's split the dataset into training and testing sets. End of explanation """ from pysurvival.models.survival_forest import ExtraSurvivalTreesModel # Fitting the model xst = ExtraSurvivalTreesModel(num_trees=200) xst.fit(X_train, T_train, E_train, max_features="sqrt", max_depth=5, min_node_size=20, num_random_splits= 200 ) """ Explanation: Let's now fit an Extra Survival Trees model to the training set. Note: The choice of the model and hyperparameters was obtained using grid-search selection, not displayed in this tutorial. End of explanation """ # Computing variables importance xst.variable_importance_table.head(5) """ Explanation: 4.2 - Variables importance Having built a Survival Forest model allows us to compute the features importance: End of explanation """ from pysurvival.utils.metrics import concordance_index c_index = concordance_index(xst, X_test, T_test, E_test) print('C-index: {:.2f}'.format(c_index)) """ Explanation: Thanks to the feature importance, we get a better understanding of what drives retention or churn. Here, the Accounting and Payroll Management products, score on the satisfaction survey as well as the amount of time spent on the phone with customer support play a primordial role. Note: The importance is the difference in prediction error between the perturbed and unperturbed error rate as depicted by Breiman et al. 5 - Cross Validation In order to assess the model performance, we previously split the original dataset into training and testing sets, so that we can now compute its performance metrics on the testing set: 5.1 - C-index The C-index represents the global assessment of the model discrimination power: this is the model’s ability to correctly provide a reliable ranking of the survival times based on the individual risk scores. In general, when the C-index is close to 1, the model has an almost perfect discriminatory power; but if it is close to 0.5, it has no ability to discriminate between low and high risk subjects. End of explanation """ from pysurvival.utils.display import integrated_brier_score ibs = integrated_brier_score(xst, X_test, T_test, E_test, t_max=12, figure_size=(15,5)) print('IBS: {:.2f}'.format(ibs)) """ Explanation: 5.2 - Brier Score The Brier score measures the average discrepancies between the status and the estimated probabilities at a given time. Thus, the lower the score (usually below 0.25), the better the predictive performance. To assess the overall error measure across multiple time points, the Integrated Brier Score (IBS) is usually computed as well. End of explanation """ from pysurvival.utils.display import compare_to_actual results = compare_to_actual(xst, X_test, T_test, E_test, is_at_risk = False, figure_size=(16, 6), metrics = ['rmse', 'mean', 'median']) """ Explanation: The IBS is equal to 0.1 on the entire model time axis. This indicates that the model will have good predictive abilities. 6 - Predictions 6.1 - Overall predictions Now that we have built a model that seems to provide great performances, let's compare the time series of the actual and predicted number of customers who stop doing business with the SaaS company, for each time t. End of explanation """ from pysurvival.utils.display import create_risk_groups risk_groups = create_risk_groups(model=xst, X=X_test, use_log = True, num_bins=30, figure_size=(20, 4), low={'lower_bound':0, 'upper_bound':1.65, 'color':'red'}, medium={'lower_bound':1.65, 'upper_bound':2.2,'color':'green'}, high={'lower_bound':2.2, 'upper_bound':3, 'color':'blue'} ) """ Explanation: The model provides very good results overall as on an entire 12 months window, it only makes an average absolute error of ~7 customers. 6.2 - Individual predictions Now that we know that we can provide reliable predictions for an entire cohort, let's compute the probability of remaining a customer for all times t. First, we can construct the risk groups based on risk scores distribution. The helper function create_risk_groups, which can be found in pysurvival.utils.display, will help us do that: End of explanation """ # Initializing the figure fig, ax = plt.subplots(figsize=(15, 5)) # Selecting a random individual that experienced an event from each group groups = [] for i, (label, (color, indexes)) in enumerate(risk_groups.items()) : # Selecting the individuals that belong to this group if len(indexes) == 0 : continue X = X_test.values[indexes, :] T = T_test.values[indexes] E = E_test.values[indexes] # Randomly extracting an individual that experienced an event choices = np.argwhere((E==1.)).flatten() if len(choices) == 0 : continue k = np.random.choice( choices, 1)[0] # Saving the time of event t = T[k] # Computing the Survival function for all times t survival = xst.predict_survival(X[k, :]).flatten() # Displaying the functions label_ = '{} risk'.format(label) plt.plot(xst.times, survival, color = color, label=label_, lw=2) groups.append(label) # Actual time plt.axvline(x=t, color=color, ls ='--') ax.annotate('T={:.1f}'.format(t), xy=(t, 0.5*(1.+0.2*i)), xytext=(t, 0.5*(1.+0.2*i)), fontsize=12) # Show everything groups_str = ', '.join(groups) title = "Comparing Survival functions between {} risk grades".format(groups_str) plt.legend(fontsize=12) plt.title(title, fontsize=15) plt.ylim(0, 1.05) plt.show() """ Explanation: Here, it is possible to distinguish 3 main groups: low, medium and high risk groups. Because the C-index is high, the model will be able to rank the survival times of a random unit of each group, such that $t_{high} \leq t_{medium} \leq t_{low}$. Let's randomly select individual unit in each group and compare their likelihood to remain a customer. To demonstrate our point, we will purposely select units which experienced an event to visualize the actual time of event. End of explanation """ # Let's now save our model from pysurvival.utils import save_model save_model(xst, '/Users/xxx/Desktop/churn_csf.zip') """ Explanation: Here, we can see that the model manages to provide a great prediction of the event time. 7 - Conclusion We can now save our model so as to put it in production and score future customers. End of explanation """
kikocorreoso/brythonmagic
notebooks/Brython usage in the IPython notebook.ipynb
mit
import IPython IPython.version_info """ Explanation: The brythonmagic extension has been tested on: End of explanation """ %install_ext https://raw.github.com/kikocorreoso/brythonmagic/master/brythonmagic.py %load_ext brythonmagic """ Explanation: brythonmagic installation Just type the following: End of explanation """ from brythonmagic import load_brython_dev load_brython_dev() """ Explanation: And load the brython js lib in the notebook: End of explanation """ %%brython -p print('hello world!') """ Explanation: Warning In order to load javascript libraries in a safety way you should try to use https instead of http when possible (read more here). If you don't trust the source and/or the source cannot be loaded using https then you could download the javascript library and load it from a local location. Usage: The brythonmagic provides you a cell magic, %%brython, to run brython code and show the results in a html div tag below the code cell. You can use several options: -p, --print: will show you the generated html code below the results obtained from the brython code. -c, --container: you can define de name of the div container in case you want to 'play' with it in other cell. If you don't define an output the div will have and id with the following format 'brython-container-[random number between 0 and 999999]' -i, --input: you can pass variables defined in the Python namespace separated by commas. If you pass a python list it will be converted to a brython list, a python tuple will be converted to a brython tuple, a python dict will be converted to a brython dict, a python string will be converted to a brython string. -h, --html: you can pass a string with html markup code. This html code will be inserted inside the div container. In this way you can avoid the generation of HTML markup code via a Brython script so you can separate the layout from the 'action'. -s, --script: Use this option to provide and id to the script defined in the Brython code cell. Also, this value could be used to run the code of this cell in other brython cells. -S, --scripts: Use this option to run code previously defined in other Brython code cells. The values should be the provided values in the -s/--script option in other Brython code cells. -f, --fiddle: With this option, the code in the cell will be automatically uploaded to gist.github.com/ as an anonymous gist with several files in it. This files will be used to create an anonymous 'fiddle' on jsfiddle.net. Finally, some links will be printed in the output linking to the gist and the fiddle. See an example here (https://gist.github.com/anonymous/b664e8b4617afc09db6c and http://jsfiddle.net/gh/gist/library/pure/b664e8b4617afc09db6c/) -e, --embedfiddle: With this option, the code in the cell will be automatically uploaded to gist.github.com/ as an anonymous gist with several files in it. This files will be used to create an anonymous 'fiddle' on jsfiddle.net. Finally, some links will be printed in the output linking to the gist and the fiddle and an iframe will be created showing the fiddle on jsfiddle.net. [WARNING] This options may change as the brythonmagic is in active development. -p, --print option The following example shows the use of the -p, --print option. [HINT] The result of the print is shown in the javascript console of your browser. End of explanation """ %%brython -c my_container -p from browser import document, html # This will be printed in the js console of your browser print('Hello world!') # This will be printed in the container div on the output below document["my_container"] <= html.P("This text is inside the div", style = {"backgroundColor": "cyan"}) """ Explanation: -c, --container option In the following example can be seen the use of the -c, --container. The -p is also used to show you the result. See the id attribute of the div tag created: End of explanation """ data_list = [1,2,3,4] data_tuple = (1,2,3,4) data_dict = {'one': 1, 'two': 2} data_str = """ Hello GoodBye """ # A numpy array can be converted to a list and you will obtain a brython list import numpy as np data_arr = np.empty((3,2)) data_arr = data_arr.tolist() """ Explanation: -i, --input option In this example you can see how the data are passed to brython from python using the -i or --input option. First, we create some data in a regular Python cell. End of explanation """ %%brython -c p2b_data_example -i data_list data_tuple data_dict data_str data_arr from browser import document, html document["p2b_data_example"] <= html.P(str(data_list)) document["p2b_data_example"] <= html.P(str(type(data_list))) document["p2b_data_example"] <= html.P(str(data_tuple)) document["p2b_data_example"] <= html.P(str(type(data_tuple))) document["p2b_data_example"] <= html.P(str(data_dict)) document["p2b_data_example"] <= html.P(str(type(data_dict))) document["p2b_data_example"] <= html.P(data_str.replace('Hello', 'Hi')) document["p2b_data_example"] <= html.P(str(type(data_str))) document["p2b_data_example"] <= html.P(str(data_arr)) document["p2b_data_example"] <= html.P(str(type(data_arr))) """ Explanation: And now, the created data are passed to Brython and used in the Brython code cell. Remember that only Python lists, tuples, dicts and strings are allowed as inputs. End of explanation """ html = """ <div id="paragraph">Hi</div> """ %%brython -c html_ex -h html from browser import document document["paragraph"].style = { "color": "yellow", "fontSize": "100px", "lineHeight": "150px", "textAlign": "center", "backgroundColor": "black" } """ Explanation: -h, --html option In this example you can see how to create some HTML code in a cell and then use that HTML code in the brython cell. In this way you do not need to create the HTML code via scripting with Brython. End of explanation """ %%brython -s my_dummy_function def dummy_function(some_text): print(some_text) """ Explanation: -s, --script option With this option you are creating a reference of the code in the Brython cell (e.g., an id of the HTML script tag created to run the Brython code). So, if you need to use the code of the Brython cell in a future Brython cell you could reference it by its id. Let's see this on an example (the -p option is used to show you the generated code and how the id of the script tag is created): End of explanation """ %%brython -S my_dummy_function dummy_function('Hi') """ Explanation: -S, --scripts option This option could be used to call code created in a previous Brython code cell using its id (see the -s option above). In the following code cell we will use the dummy_function created in another Brython code cell. The dummy_function was created in a script tag with an id="my_dummy_function". [HINT] The result of the Brython code cell below is shown in the javascript console of your browser. End of explanation """ %%brython -f from browser import alert alert('hello world from jsfiddle!') """ Explanation: -f, --fiddle option With this option, the code in the cell will be automatically uploaded to gist.github.com/ as an anonymous gist with several files in it. This files will be used to create an anonymous 'fiddle' on jsfiddle.net. Finally, some links will be printed in the output linking to the gist and the fiddle. End of explanation """ %%brython -e from browser import alert alert('hello world from jsfiddle!') """ Explanation: -e, --embedfiddle option With this option, the code in the cell will be automatically uploaded to gist.github.com/ as an anonymous gist with several files in it. These files will be used to create an anonymous 'fiddle' on jsfiddle.net. Finally, some links will be printed in the output linking to the gist and the fiddle and an iframe will be created showing the fiddle on jsfiddle.net. End of explanation """ %%brython from browser import alert alert('Hello world!, Welcome to the brythonmagic!') """ Explanation: How to use Brython in the IPython notebook First step should be to read the brython documentation. You can find the docs here: http://brython.info/doc/en/index.html?lang=en In the following section I will show you some dummy examples. Hello world example In this example let's see how to pop up an alert window. This could be an standard 'Hello world!' example in the Brython world. End of explanation """ %%brython -c simple_example from browser import document, html for i in range(10): document["simple_example"] <= html.P(i) """ Explanation: Simple example, writing some numbers in the div container In this example we just write inside a &lt;div&gt; ten numbers using a &lt;P&gt; tag for each number. [HINT] To see the line numbers in the code cell just go to the cell and press &lt;CTRL&gt;-m and then l. Line 2: We import the libraries to use Line 4: A for loop :-P Line 10: We create a P tag and write the value of i inside. Finally, add the P element to the selected div, in this case the div with "simple_example" id attribute. End of explanation """ %%brython -c table from browser import document, html table = html.TABLE() for i in range(10): color = ['cyan','#dddddd'] * 5 table <= html.TR( html.TD(str(i+1) + ' x 2 =', style = {'backgroundColor':color[i]}) + html.TD((i+1)*2, style = {'backgroundColor':color[i]})) document['table'] <= table """ Explanation: A more useful example: A multiplication table In the following cell we create a multiplication table. First, we create a table tag. We append the table rows and cells (TR and TD tags) and, finally, we append the final table to the div with "table" id attribute. End of explanation """ %%brython -c canvas_example from browser.timer import request_animation_frame as raf from browser.timer import cancel_animation_frame as caf from browser import document, html from time import time import math # First we create a table to insert the elements table = html.TABLE(cellpadding = 10) btn_anim = html.BUTTON('Animate', Id="btn-anim", type="button") btn_stop = html.BUTTON('Stop', Id="btn-stop", type="button") cnvs = html.CANVAS(Id="raf-canvas", width=256, height=256) table <= html.TR(html.TD(btn_anim + btn_stop) + html.TD(cnvs)) document['canvas_example'] <= table # Now we access the canvas context ctx = document['raf-canvas'].getContext( '2d' ) # And we create several functions in charge to animate and stop the draw animation toggle = True def draw(): t = time() * 3 x = math.sin(t) * 96 + 128 y = math.cos(t * 0.9) * 96 + 128 global toggle if toggle: toggle = False else: toggle = True ctx.fillStyle = 'rgb(200,200,20)' if toggle else 'rgb(20,20,200)' ctx.beginPath() ctx.arc( x, y, 6, 0, math.pi * 2, True) ctx.closePath() ctx.fill() def animate(i): global id id = raf(animate) draw() def stop(i): global id print(id) caf(id) document["btn-anim"].bind("click", animate) document["btn-stop"].bind("click", stop) """ Explanation: Let's add some animation using HTML5 canvas technology... In the following example we draw a shape using the HTML5 canvas. Also, we add some controls to stop and animate the shape. The example has been adapted from the javascript example available here. End of explanation """ from brythonmagic import load_js_lib load_js_lib("http://d3js.org/d3.v3.js") """ Explanation: Interaction with other javascript libraries: D3.js In Brython there is a javascript library that allows to access objects available in the javascript namespace. In this example we are using a javascript object (D3.js library) from Brython. So, in order to allow Brython to access to D3 first you should load the D3 library. End of explanation """ %%brython -c simple_d3 from browser import window, document, html d3 = window.d3 container = d3.select("#simple_d3") svg = container.append("svg").attr("width", 100).attr("height", 100) circle1 = svg.append("circle").style("stroke", "gray").style("fill", "gray").attr("r", 40) circle1.attr("cx", 50).attr("cy", 50).attr("id", "mycircle") circle2 = svg.append("circle").style("stroke", "gray").style("fill", "white").attr("r", 20) circle2.attr("cx", 50).attr("cy", 50) def over(ev): document["mycircle"].style.fill = "blue" def out(ev): document["mycircle"].style.fill = "gray" document["mycircle"].bind("mouseover", over) document["mycircle"].bind("mouseout", out) """ Explanation: Now, we can access D3 objects(see example below). In the result you can see how the circle change its color when the mouse is over the circle. End of explanation """ %%brython -c manipulating from browser import document, html def hide(ev): divs = document.get(selector = 'div.input') for div in divs: div.style.display = "none" def show(ev): divs = document.get(selector = 'div.input') for div in divs: div.style.display = "inherit" document["manipulating"] <= html.BUTTON('Hide code cells', Id="btn-hide") document["btn-hide"].bind("click", hide) document["manipulating"] <= html.BUTTON('Show code cells', Id="btn-show") document["btn-show"].bind("click", show) """ Explanation: Manipulating the IPython notebook An example to hide or show the code cells using a button. End of explanation """ from random import randint n = 100 x = [randint(0,800) for i in range(n)] y = [randint(0,600) for i in range(n)] r = [randint(25,50) for i in range(n)] red = [randint(0,255) for i in range(n)] green = [randint(0,255) for i in range(n)] blue = [randint(0,255) for i in range(n)] """ Explanation: A more complete d3 example calculating things in Python and drawing results in Brython using D3.js A more complete D3 example. In this case, first we create some data in Python. End of explanation """ %%brython -c other_d3 -i x y r red green blue from browser import window, document, html d3 = window.d3 WIDTH = 800 HEIGHT = 600 container = d3.select("#other_d3") svg = container.append("svg").attr("width", WIDTH).attr("height", HEIGHT) class AddShapes: def __init__(self, x, y, r, red, green, blue, shape = "circle", interactive = True): self.shape = shape self.interactive = interactive self._color = "gray" self.add(x, y, r, red, green, blue) def over(self, ev): self._color = ev.target.style.fill document[ev.target.id].style.fill = "white" def out(self, ev): document[ev.target.id].style.fill = self._color def add(self, x, y, r, red, green, blue): for i in range(len(x)): self.idx = self.shape + '_' + str(i) self._color = "rgb(%s,%s,%s)" % (red[i], green[i], blue[i]) shaped = svg.append(self.shape).style("stroke", "gray").style("fill", self._color).attr("r", r[i]) shaped.attr("cx", x[i]).attr("cy", y[i]).attr("id", self.idx) if self.interactive: document[self.idx].bind("mouseover", self.over) document[self.idx].bind("mouseout", self.out) plot = AddShapes(x, y, r, red, green, blue, interactive = True) """ Explanation: And now, the data is passed to Brython to be used in a D3 plot. In this case, the D3.js library is already loaded so it is not necessary to load it. End of explanation """ from brythonmagic import load_js_lib load_js_lib("http://cdnjs.cloudflare.com/ajax/libs/openlayers/2.11/OpenLayers.js") """ Explanation: Mapping with Python in the IPython notebook using OpenLayers? In the following example we will use OpenLayers to center a map in a specific location, with a zoom and a projection and then we will draw some vector points around the location. As before, first we should load the OpenLayers.js library. End of explanation """ %%brython -c ol_map from browser import document, window ## Div layout document['ol_map'].style.width = "800px" document['ol_map'].style.height = "400px" document['ol_map'].style.border = "1px solid black" OpenLayers = window.OpenLayers ## Map _map = OpenLayers.Map.new('ol_map') ## Addition of an OpenStreetMap layer _layer = OpenLayers.Layer.OSM.new('Simple OSM map') _map.addLayer(_layer) ## Map centered on Lon, Lat = (-3.671416, 40.435897) and a zoom = 14 ## with a projection = "EPSG:4326" (Lat-Lon WGS84) _proj = OpenLayers.Projection.new("EPSG:4326") _center = OpenLayers.LonLat.new(-3.671416, 40.435897) _center.transform(_proj, _map.getProjectionObject()) _map.setCenter(_center, 10) ## Addition of some points around the defined location lons = [-3.670, -3.671, -3.672, -3.672, -3.672, -3.671, -3.670, -3.670] lats = [40.435, 40.435, 40.435, 40.436, 40.437, 40.437, 40.437, 40.436] points_layer = OpenLayers.Layer.Vector.new("Point Layer") for lon, lat in zip(lons, lats): point = OpenLayers.Geometry.Point.new(lon, lat) point.transform(_proj, _map.getProjectionObject()) _feat = OpenLayers.Feature.Vector.new(point) points_layer.addFeatures([_feat]) _map.addLayer(points_layer) # Add a control for the layers layer_switcher= OpenLayers.Control.LayerSwitcher.new({}) _map.addControl(layer_switcher) """ Explanation: And now we can create a map. End of explanation """ load_js_lib("http://cdnjs.cloudflare.com/ajax/libs/raphael/2.1.2/raphael-min.js") """ Explanation: Using Raphaël.js A dummy example using raphaël.js library. As usual, first we should include the library: End of explanation """ %%brython -c raphael_ex from browser import window from javascript import JSObject Raphael = window.Raphael paper = JSObject(Raphael("raphael_ex", 400, 400)) #Draw rectagle rect = paper.rect(1,1,398,398) rect.attr("stroke", "black") #Draw orbits for rot in range(90,280,60): ellipse = paper.ellipse(200, 200, 180, 50) ellipse.attr("stroke", "gray") ellipse.rotate(rot) #Draw nucleus nucleus = paper.circle(200,200,40) nucleus.attr("fill", "black") # Draw electrons electron = paper.circle(200, 20, 10) electron.attr("fill", "red") electron = paper.circle(44, 290, 10) electron.attr("fill", "yellow") electron = paper.circle(356, 290, 10) electron.attr("fill", "blue") """ Explanation: And now let's make a dumb example using JSObject. End of explanation """ %%brython from browser import doc, html def show_cell_number(on = True): cells = doc.get(selector = '.input_prompt') for i, cell in enumerate(cells): if on: if 'In' in cell.html and '<br>' not in cell.html: cell.html += "<br>cell #" + str(i) else: if 'In' in cell.text: cell.html = cell.html.split('<br>')[0] show_cell_number(on = True) """ Explanation: Include the cell number for each cell The cells starts by 0 and all the cells (markdown, headings, code,...) has a number. If we want to re-run some cells in a programmatically way it is useful to know the number of the cells to identify them. You can delete the cell numbers using show_cell_number(on = False): End of explanation """ %%brython from javascript import JSObject from browser import window IPython = window.IPython nb = IPython.notebook # This is used to prevent an infinite loop this_cell = nb.get_selected_index() for i in range(1,10): # Ths will run cells 1 to 9 (the beginning of the nb) cell = nb.get_cell(i) if cell.cell_type == "code" and i != this_cell: cell.execute() """ Explanation: Running Python cells as a loop Imagine you have several cells of code and you want just to modify some data and run again these cells as a loop not having to create a big cell with the code of the cells together. End of explanation """ %%brython from javascript import JSObject from browser import window IPython = window.IPython nb = IPython.notebook this_cell = nb.get_selected_index() total_cells = nb.ncells() code = "" first_cell = True for i in range(total_cells): cell = nb.get_cell(i) if cell.cell_type == "code" and i != this_cell: if first_cell: code += "# This cell has been generated automatically using a brython script\n\n" code += "# code from cell " + str(i) + '\n' first_cell = False else: code += "\n\n\n# code from cell " + str(i) + '\n' code += cell.get_text() + '\n' nb.insert_cell_below('code') new_cell = nb.get_cell(this_cell + 1) new_cell.set_text(code) """ Explanation: Get the code of all the cells and create a new cell with the code If you want to compile all the code used in a notebook you can use this recipe (<span style="color: red; background-color: yellow;">use crtl + Enter to run the cell if you don't want a bad behaviour</span>): End of explanation """ %%brython -s styling from browser import doc, html # Changing the background color body = doc[html.BODY][0] body.style = {"backgroundColor": "#99EEFF"} # Changing the color of the imput prompt inps = body.get(selector = ".input_prompt") for inp in inps: inp.style = {"color": "blue"} # Changin the color of the output cells outs = body.get(selector = ".output_wrapper") for out in outs: out.style = {"backgroundColor": "#E0E0E0"} # Changing the font of the text cells text_cells = body.get(selector = ".text_cell") for cell in text_cells: cell.style = {"fontFamily": """"Courier New", Courier, monospace""", "fontSize": "20px"} # Changing the color of the code cells. code_cells = body.get(selector = ".CodeMirror") for cell in code_cells: cell.style = {"backgroundColor": "#D0D0D0"} """ Explanation: Styling the nb Lets modify a little bit the look of the notebook. Warning: The result will be very ugly... End of explanation """
jhprinz/openpathsampling
examples/misc/tutorial_storage.ipynb
lgpl-2.1
import openpathsampling as paths """ Explanation: An introduction to Storage Introduction All we need is contained in the openpathsampling package End of explanation """ storage = paths.Storage('mstis.nc') storage """ Explanation: The storage itself is mainly a netCDF file and can also be used as such. Technically it is a subclass of netCDF4.Dataset and can use all of its functions in case we want to add additional tables to the file besides what we store using stores. You can of course also add new stores to the storage. Using Storage() will automatically create a set of needed storages when a new file is created. netCDF files are very generic while our Storage is more tuned to needs we have. It support etc native support for simtk.units, and can recursively store nested objects using JSON pickling. But we will get to that. Open the output from the 'alanine.ipynb' notebook to have something to work with End of explanation """ print storage.list_stores() cv = storage.cvs[0] cv(storage.trajectories[0])[0:10] """ Explanation: and have a look at what stores are available End of explanation """ snapshot_store = storage.snapshots """ Explanation: and we can access all of these using End of explanation """ print 'We have %d snapshots in our storage' % len(storage.snapshots) """ Explanation: Stores are lists In general it is useful to think about the storage as a set of lists. Each of these lists contain objects of the same type, e.g. Sample, Trajectory, Ensemble, Volume, ... The class instances used to access elements from the storage are called a store. Imagine you go into a store to buy and sell objects (luckily our stores are free). All the stores share the same storage space, which is a netCDF file on disc. Still, a store is not really a list or subclassed from a list, but it almost acts like one. End of explanation """ print storage.samples[2:4] """ Explanation: Loading objects In the same way we access lists we can also access these lists using slicing, and even lists of indices. Load by slicing End of explanation """ print storage.ensembles[[0,1,3]] """ Explanation: Load by list of indices End of explanation """ # storage.samples.save(my_sample) """ Explanation: Saving objects Saving is somehow special, since we try to deal exclusively with immutable objects. That means that once an object is saved, it cannot be changed. This is not completely true, since the netCDF file allow changing, but we try not to do it. The only exeption are collective variables, these can store their cached values and we want to store intermediate states so we add new values once we have computed these. This should be the only exception and we use the .sync command to update the status of a once saved collectivevariable Saving is easy. Just use .save on the store End of explanation """ # storage.save(my_sample) """ Explanation: and it will add the object to the end of our store list or do nothing, if the object has already been stored. It is important to note, that each object knows, if it has been stored already. This allows to write nice recursive saving without worrying that we save the same object several times. You can also store directly using the storage. Both is fine and the storage just delegates the task to the appropriate store. End of explanation """ # storage.samples[None] = my_sample # storage.samples[Ellipsis] = my_sample # storage.samples[...] = my_sample """ Explanation: For completeness you can also use __setitem__ to save, but since you cannot explicitely set the number you have to use None as the key or Ellipsis, ... is fine. End of explanation """ volume = storage.volumes[1] print storage.repr_json(volume) """ Explanation: I mentioned recursive saving. This does the following. Imagine a sample snapshot which itself has a Configuration and a Momentum object. If you store the snapshot it also store the content using the approriate stores. This can be arbitrarily complex. And most object can be either stored in a special way or get converted into a JSON string that we can turn into an object again. Python has something like this build it, which works similar, but we needed something that add the recursive storage connection and uses JSON. If you are curious, the json string can be accessed for some objects using .json but is only available for loaded or saved objects. It will not be computed unless it is used. End of explanation """ print storage.volumes['A'].name """ Explanation: Naming Load by name End of explanation """ print storage.volumes.find('A').name """ Explanation: Equivalent to using .find(&lt;name&gt;) End of explanation """ print storage.volumes.find_all('A') """ Explanation: Names can exist multiple times. To find all use .find_all() which returns a list of objects. End of explanation """ print storage.volumes.find_indices('A') """ Explanation: To get indices only use .find_indices(&lt;name&gt;) End of explanation """ storage.volumes.name_idx """ Explanation: A look at the internal list of names End of explanation """ empty_ensemble = paths.EmptyEnsemble() full_ensemble = paths.EmptyEnsemble() """ Explanation: Objects can be saved by name. To do this we need a new objects that we can actually save. All loaded objects cannot be saved again of course. End of explanation """ len(storage.ensembles) storage.ensembles['empty'] = empty_ensemble print storage.ensembles.index[empty_ensemble.__uuid__] storage.variables['ensembles_json'][70:80] """ Explanation: Now store as you would set a dictionary End of explanation """ storage.ensembles.save(full_ensemble, 'full') """ Explanation: Alternatively you can use save. End of explanation """ print empty_ensemble.name print full_ensemble.name """ Explanation: And the ensemble now has the .name property set End of explanation """ print len(storage.tag) for t in storage.tag: print t storage.tag.keys() for name, obj in storage.tag.iteritems(): print '{:20s} : {:20s}'.format(name, obj.__class__.__name__) dict(storage.tag) """ Explanation: tags In storage exists a special store name tag. This is to reference any object and mostly to name stuff for later easy access. End of explanation """ samp = storage.samples[0] print storage.idx(samp) print samp.idx(storage) print samp.idx(storage.samples) """ Explanation: Indexing Each loaded object is equipped with a .idx attribute which is a dictionary that contains the index for a specific storage. This is necessary since we can - in theory - store an object in several different stores at once and these might have different indices. Note that idx is NOT a function, but a dictionary, hence the square brackets. End of explanation """ [ens.name for ens in storage.ensembles][2:4] """ Explanation: Iterators A list is iterable and so is a store. Lets load all ensembles and list their names End of explanation """ stA = storage.volumes['A'] first_5000_snaps = storage.snapshots[0:5000] reversed_samples = [snapshot for snapshot in first_5000_snaps if stA(snapshot)] print 'We found %d snapshots in StateA among %d total snapshots' % (len(reversed_samples), len(first_5000_snaps)) """ Explanation: Maybe you have realized that some command run slower the first time. This is because we use caching and once an object is loaded it stays in memory and can be accessed much faster. Searching for objects One way to find objects is to use their name, which I mentioned before, but in general there are no search functions, but we can use python notation in the usual way to load what we need. List comprehensions is the magic word. Say, we want to get all snapshots that are reversed. We could just load all of these and filter them, but there is a more elegant way to do that, or let's say a more elegant way of writing it in python, because the underlying code does just that. End of explanation """ print storage.samplesets[0] my_network = storage.networks[0] my_ensemble = my_network.sampling_ensembles[0] relevant_samples = [ sample for sample_set in storage.samplesets for sample in sample_set if sample.ensemble is my_ensemble ] print len(relevant_samples) """ Explanation: Lets do something more useful: For TIS ensemble we want statistics on pathlengths associated with sampled trajectories Sample objects that are sampled for a specific ensemble. And we one want samples that have been generated in our production runs and are present in a SampleSet TODO: add a way to select only specific SampleSets End of explanation """ list_of_path_lengths = [ len(sample.trajectory) for sample_set in storage.samplesets for sample in sample_set if sample.ensemble is my_ensemble ] print list_of_path_lengths if len(list_of_path_lengths) > 0: mean = float(sum(list_of_path_lengths))/len(list_of_path_lengths) else: mean = 0.0 # actually, it is not defined, so we just set it to zero print mean """ Explanation: and finally compute the average length End of explanation """ iterator_over_path_lengths = ( len(sample.trajectory) for sample_set in storage.samplesets for sample in sample_set if sample.ensemble is my_ensemble ) total = float(sum(iterator_over_path_lengths)) print total """ Explanation: Allright, we loaded from a bootstrapping sampling algorithm and the analysis is pointless, but still it is rather short considering what we just did. Generator expression There is another very cool feature about python that is worth noting: generator expressions. Before we used list comprehensions to generate a list of all that we need, but what, if we don't want the whole list at once? Maybe that is impossible because of too much memory and also not desirable? We can do the same thing as above using a generator (although it would only be useful if we had to average over billions of samples). So assume the list of lengths is too large for memory. The summing does not mind to use little pieces so we construct a function that always gives us the next element. These functions are called iterators and to make these iteratore there is syntactic way to create them easily: Instead of square brackets in in list comprehensions use round brackets. So the above example would look like this End of explanation """ iterator_over_path_lengths = ( len(sample.trajectory) for sample_set in storage.samplesets for sample in sample_set if sample.ensemble is my_ensemble ) total = 0 count = 0 for length in iterator_over_path_lengths: total += length count += 1 if count > 0: mean = float(total)/count else: mean = 0.0 # actually, it is not defined, so we just set it to zero print mean """ Explanation: Note that we now have a generator and no computed values yet. If we iterator using our iterator called generator it will pass one value at a time and we can use it in sum as we did before. There are two important things to note. Once an iteratore is used, it is consumed and we cannot just be run again so we need to change the code again. I assume there are other ways to do that, too End of explanation """ ff_movers = filter(lambda self : type(self) == paths.ForwardShootMover, storage.pathmovers) ff_movers if len(ff_movers) > 2: mover = ff_movers[2] print "Use a '%s' for ensemble(s) '%s'" % ( mover.cls, mover.ensemble.name ) """ Explanation: Voilà, this time without computing all length before! A last example that will be interesting is the statistics on acceptance. Each sample knows which mover was involved in its creation. This is stored in .details.mover in the .details attribute. Let us try to look at only forward moves End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/miroc/cmip6/models/sandbox-1/ocean.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'miroc', 'sandbox-1', 'ocean') """ Explanation: ES-DOC CMIP6 Model Properties - Ocean MIP Era: CMIP6 Institute: MIROC Source ID: SANDBOX-1 Topic: Ocean Sub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing. Properties: 133 (101 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-20 15:02:41 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Seawater Properties 3. Key Properties --&gt; Bathymetry 4. Key Properties --&gt; Nonoceanic Waters 5. Key Properties --&gt; Software Properties 6. Key Properties --&gt; Resolution 7. Key Properties --&gt; Tuning Applied 8. Key Properties --&gt; Conservation 9. Grid 10. Grid --&gt; Discretisation --&gt; Vertical 11. Grid --&gt; Discretisation --&gt; Horizontal 12. Timestepping Framework 13. Timestepping Framework --&gt; Tracers 14. Timestepping Framework --&gt; Baroclinic Dynamics 15. Timestepping Framework --&gt; Barotropic 16. Timestepping Framework --&gt; Vertical Physics 17. Advection 18. Advection --&gt; Momentum 19. Advection --&gt; Lateral Tracers 20. Advection --&gt; Vertical Tracers 21. Lateral Physics 22. Lateral Physics --&gt; Momentum --&gt; Operator 23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff 24. Lateral Physics --&gt; Tracers 25. Lateral Physics --&gt; Tracers --&gt; Operator 26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff 27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity 28. Vertical Physics 29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details 30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers 31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum 32. Vertical Physics --&gt; Interior Mixing --&gt; Details 33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers 34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum 35. Uplow Boundaries --&gt; Free Surface 36. Uplow Boundaries --&gt; Bottom Boundary Layer 37. Boundary Forcing 38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction 39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction 40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration 41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing 1. Key Properties Ocean key properties 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of ocean model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of ocean model code (NEMO 3.6, MOM 5.0,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.model_family') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OGCM" # "slab ocean" # "mixed layer ocean" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Model Family Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of ocean model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.basic_approximations') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Primitive equations" # "Non-hydrostatic" # "Boussinesq" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Basic approximations made in the ocean. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Potential temperature" # "Conservative temperature" # "Salinity" # "U-velocity" # "V-velocity" # "W-velocity" # "SSH" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.5. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of prognostic variables in the ocean component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Linear" # "Wright, 1997" # "Mc Dougall et al." # "Jackett et al. 2006" # "TEOS 2010" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Seawater Properties Physical properties of seawater in ocean 2.1. Eos Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EOS for sea water End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Potential temperature" # "Conservative temperature" # TODO - please enter value(s) """ Explanation: 2.2. Eos Functional Temp Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Temperature used in EOS for sea water End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Practical salinity Sp" # "Absolute salinity Sa" # TODO - please enter value(s) """ Explanation: 2.3. Eos Functional Salt Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Salinity used in EOS for sea water End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pressure (dbars)" # "Depth (meters)" # TODO - please enter value(s) """ Explanation: 2.4. Eos Functional Depth Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Depth or pressure used in EOS for sea water ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "TEOS 2010" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 2.5. Ocean Freezing Point Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 2.6. Ocean Specific Heat Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specific heat in ocean (cpocean) in J/(kg K) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 2.7. Ocean Reference Density Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Boussinesq reference density (rhozero) in kg / m3 End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Present day" # "21000 years BP" # "6000 years BP" # "LGM" # "Pliocene" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Bathymetry Properties of bathymetry in ocean 3.1. Reference Dates Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Reference date of bathymetry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.type') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 3.2. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the bathymetry fixed in time in the ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. Ocean Smoothing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe any smoothing or hand editing of bathymetry in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.source') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.4. Source Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe source of bathymetry in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Nonoceanic Waters Non oceanic waters treatement in ocean 4.1. Isolated Seas Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how isolated seas is performed End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. River Mouth Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how river mouth mixing or estuaries specific treatment is performed End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Software Properties Software properties of ocean code 5.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Resolution Resolution in the ocean grid 6.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Canonical Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Range Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Range of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 6.4. Number Of Horizontal Gridpoints Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 6.5. Number Of Vertical Levels Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of vertical levels resolved on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.6. Is Adaptive Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Default is False. Set true if grid resolution changes during execution. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 6.7. Thickness Level 1 Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Thickness of first surface ocean level (in meters) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Tuning Applied Tuning methodology for ocean component 7.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics of the global mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation Conservation in the ocean component 8.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Brief description of conservation methodology End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.scheme') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Energy" # "Enstrophy" # "Salt" # "Volume of ocean" # "Momentum" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Properties conserved in the ocean by the numerical schemes End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Consistency Properties Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Any additional consistency properties (energy conversion, pressure gradient discretisation, ...)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.4. Corrected Conserved Prognostic Variables Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Set of variables which are conserved by more than the numerical scheme alone. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.5. Was Flux Correction Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Does conservation involve flux correction ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Grid Ocean grid 9.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of grid in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Z-coordinate" # "Z*-coordinate" # "S-coordinate" # "Isopycnic - sigma 0" # "Isopycnic - sigma 2" # "Isopycnic - sigma 4" # "Isopycnic - other" # "Hybrid / Z+S" # "Hybrid / Z+isopycnic" # "Hybrid / other" # "Pressure referenced (P)" # "P*" # "Z**" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Grid --&gt; Discretisation --&gt; Vertical Properties of vertical discretisation in ocean 10.1. Coordinates Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of vertical coordinates in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 10.2. Partial Steps Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Using partial steps with Z or Z vertical coordinate in ocean ?* End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Lat-lon" # "Rotated north pole" # "Two north poles (ORCA-style)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11. Grid --&gt; Discretisation --&gt; Horizontal Type of horizontal discretisation scheme in ocean 11.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal grid type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Arakawa B-grid" # "Arakawa C-grid" # "Arakawa E-grid" # "N/a" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.2. Staggering Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Horizontal grid staggering type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Finite difference" # "Finite volumes" # "Finite elements" # "Unstructured grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.3. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal discretisation scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12. Timestepping Framework Ocean Timestepping Framework 12.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of time stepping in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Via coupling" # "Specific treatment" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.2. Diurnal Cycle Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Diurnal cycle type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Leap-frog + Asselin filter" # "Leap-frog + Periodic Euler" # "Predictor-corrector" # "Runge-Kutta 2" # "AM3-LF" # "Forward-backward" # "Forward operator" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Timestepping Framework --&gt; Tracers Properties of tracers time stepping in ocean 13.1. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Tracers time stepping scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.2. Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Tracers time step (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Preconditioned conjugate gradient" # "Sub cyling" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Timestepping Framework --&gt; Baroclinic Dynamics Baroclinic dynamics in ocean 14.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Baroclinic dynamics type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Leap-frog + Asselin filter" # "Leap-frog + Periodic Euler" # "Predictor-corrector" # "Runge-Kutta 2" # "AM3-LF" # "Forward-backward" # "Forward operator" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Baroclinic dynamics scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.3. Time Step Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Baroclinic time step (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "split explicit" # "implicit" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15. Timestepping Framework --&gt; Barotropic Barotropic time stepping in ocean 15.1. Splitting Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time splitting method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.2. Time Step Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Barotropic time step (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16. Timestepping Framework --&gt; Vertical Physics Vertical physics time stepping in ocean 16.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Details of vertical time stepping in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17. Advection Ocean advection 17.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of advection in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.momentum.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Flux form" # "Vector form" # TODO - please enter value(s) """ Explanation: 18. Advection --&gt; Momentum Properties of lateral momemtum advection scheme in ocean 18.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of lateral momemtum advection scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.momentum.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.2. Scheme Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of ocean momemtum advection scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.momentum.ALE') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 18.3. ALE Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Using ALE for vertical advection ? (if vertical coordinates are sigma) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 19. Advection --&gt; Lateral Tracers Properties of lateral tracer advection scheme in ocean 19.1. Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Order of lateral tracer advection scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 19.2. Flux Limiter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Monotonic flux limiter for lateral tracer advection scheme in ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 19.3. Effective Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Effective order of limited lateral tracer advection scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.4. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Descriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Ideal age" # "CFC 11" # "CFC 12" # "SF6" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19.5. Passive Tracers Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Passive tracers advected End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.6. Passive Tracers Advection Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Is advection of passive tracers different than active ? if so, describe. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.vertical_tracers.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20. Advection --&gt; Vertical Tracers Properties of vertical tracer advection scheme in ocean 20.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Descriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 20.2. Flux Limiter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Monotonic flux limiter for vertical tracer advection scheme in ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 21. Lateral Physics Ocean lateral physics 21.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of lateral physics in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Eddy active" # "Eddy admitting" # TODO - please enter value(s) """ Explanation: 21.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of transient eddy representation in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Horizontal" # "Isopycnal" # "Isoneutral" # "Geopotential" # "Iso-level" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22. Lateral Physics --&gt; Momentum --&gt; Operator Properties of lateral physics operator for momentum in ocean 22.1. Direction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Direction of lateral physics momemtum scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Harmonic" # "Bi-harmonic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.2. Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Order of lateral physics momemtum scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Second order" # "Higher order" # "Flux limiter" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.3. Discretisation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Discretisation of lateral physics momemtum scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Space varying" # "Time + space varying (Smagorinsky)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff Properties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean 23.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Lateral physics momemtum eddy viscosity coeff type in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 23.2. Constant Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.3. Variable Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.4. Coeff Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.5. Coeff Backscatter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 24. Lateral Physics --&gt; Tracers Properties of lateral physics for tracers in ocean 24.1. Mesoscale Closure Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there a mesoscale closure in the lateral physics tracers scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 24.2. Submesoscale Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Horizontal" # "Isopycnal" # "Isoneutral" # "Geopotential" # "Iso-level" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25. Lateral Physics --&gt; Tracers --&gt; Operator Properties of lateral physics operator for tracers in ocean 25.1. Direction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Direction of lateral physics tracers scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Harmonic" # "Bi-harmonic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.2. Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Order of lateral physics tracers scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Second order" # "Higher order" # "Flux limiter" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.3. Discretisation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Discretisation of lateral physics tracers scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Space varying" # "Time + space varying (Smagorinsky)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff Properties of eddy diffusity coeff in lateral physics tracers scheme in the ocean 26.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Lateral physics tracers eddy diffusity coeff type in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 26.2. Constant Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 26.3. Variable Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 26.4. Coeff Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 26.5. Coeff Backscatter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there backscatter in eddy diffusity coeff in lateral physics tracers scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "GM" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity Properties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean 27.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EIV in lateral physics tracers in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 27.2. Constant Val Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If EIV scheme for tracers is constant, specify coefficient value (M2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.3. Flux Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EIV flux (advective or skew) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.4. Added Diffusivity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EIV added diffusivity (constant, flow dependent or none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 28. Vertical Physics Ocean Vertical Physics 28.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of vertical physics in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details Properties of vertical physics in ocean 29.1. Langmuir Cells Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there Langmuir cells mixing in upper ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure - TKE" # "Turbulent closure - KPP" # "Turbulent closure - Mellor-Yamada" # "Turbulent closure - Bulk Mixed Layer" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers *Properties of boundary layer (BL) mixing on tracers in the ocean * 30.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of boundary layer mixing for tracers in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 30.2. Closure Order Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 30.3. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant BL mixing of tracers, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 30.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background BL mixing of tracers coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure - TKE" # "Turbulent closure - KPP" # "Turbulent closure - Mellor-Yamada" # "Turbulent closure - Bulk Mixed Layer" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum *Properties of boundary layer (BL) mixing on momentum in the ocean * 31.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of boundary layer mixing for momentum in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 31.2. Closure Order Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 31.3. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant BL mixing of momentum, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 31.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background BL mixing of momentum coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Non-penetrative convective adjustment" # "Enhanced vertical diffusion" # "Included in turbulence closure" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 32. Vertical Physics --&gt; Interior Mixing --&gt; Details *Properties of interior mixing in the ocean * 32.1. Convection Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of vertical convection in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 32.2. Tide Induced Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how tide induced mixing is modelled (barotropic, baroclinic, none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 32.3. Double Diffusion Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there double diffusion End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 32.4. Shear Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there interior shear mixing End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure / TKE" # "Turbulent closure - Mellor-Yamada" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers *Properties of interior mixing on tracers in the ocean * 33.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of interior mixing for tracers in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 33.2. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant interior mixing of tracers, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 33.3. Profile Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 33.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background interior mixing of tracers coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure / TKE" # "Turbulent closure - Mellor-Yamada" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum *Properties of interior mixing on momentum in the ocean * 34.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of interior mixing for momentum in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 34.2. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant interior mixing of momentum, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 34.3. Profile Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 34.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background interior mixing of momentum coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 35. Uplow Boundaries --&gt; Free Surface Properties of free surface in ocean 35.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of free surface in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Linear implicit" # "Linear filtered" # "Linear semi-explicit" # "Non-linear implicit" # "Non-linear filtered" # "Non-linear semi-explicit" # "Fully explicit" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 35.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Free surface scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 35.3. Embeded Seaice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the sea-ice embeded in the ocean model (instead of levitating) ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 36. Uplow Boundaries --&gt; Bottom Boundary Layer Properties of bottom boundary layer in ocean 36.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of bottom boundary layer in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Diffusive" # "Acvective" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 36.2. Type Of Bbl Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of bottom boundary layer in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 36.3. Lateral Mixing Coef Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 36.4. Sill Overflow Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe any specific treatment of sill overflows End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37. Boundary Forcing Ocean boundary forcing 37.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of boundary forcing in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.2. Surface Pressure Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.3. Momentum Flux Correction Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.4. Tracers Flux Correction Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.5. Wave Effects Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how wave effects are modelled at ocean surface. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.6. River Runoff Budget Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how river runoff from land surface is routed to ocean and any global adjustment done. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.7. Geothermal Heating Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how geothermal heating is present at ocean bottom. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Linear" # "Non-linear" # "Non-linear (drag function of speed of tides)" # "Constant drag coefficient" # "None" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction Properties of momentum bottom friction in ocean 38.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of momentum bottom friction in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Free-slip" # "No-slip" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction Properties of momentum lateral friction in ocean 39.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of momentum lateral friction in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "1 extinction depth" # "2 extinction depth" # "3 extinction depth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration Properties of sunlight penetration scheme in ocean 40.1. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of sunlight penetration scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 40.2. Ocean Colour Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the ocean sunlight penetration scheme ocean colour dependent ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 40.3. Extinction Depth Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe and list extinctions depths for sunlight penetration scheme (if applicable). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Freshwater flux" # "Virtual salt flux" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing Properties of surface fresh water forcing in ocean 41.1. From Atmopshere Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of surface fresh water forcing from atmos in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Freshwater flux" # "Virtual salt flux" # "Real salt flux" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 41.2. From Sea Ice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of surface fresh water forcing from sea-ice in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 41.3. Forced Mode Restoring Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of surface salinity restoring in forced mode (OMIP) End of explanation """
dietmarw/EK5312_ElectricalMachines
Chapman/Ch6-Problem_6-11.ipynb
unlicense
%pylab notebook """ Explanation: Excercises Electric Machinery Fundamentals Chapter 6 Problem 6-11 End of explanation """ fse = 60 # [Hz] n_nl = 1100 # [r/min] p = 6 """ Explanation: Description The input power to the rotor circuit of a six-pole, 60 Hz, induction motor running at 1100 r/min is 5&nbsp;kW. End of explanation """ n_sync = 120*fse / p print('n_sync = {:.0f} r/min'.format(n_sync)) """ Explanation: What is the rotor copper loss in this motor? SOLUTION This synchronous speed of this motor is: $$n_\text{sync} = \frac{120f_{se}}{p}$$ End of explanation """ s_nl = (n_sync - n_nl) / n_sync print('s_nl = {:.2f} %'.format(s_nl*100)) """ Explanation: The slip of the rotor is: $$s_{nl} = \frac{n_\text{sync}-n_{nl}}{n_\text{sync}} \cdot 100\%$$ End of explanation """ Pag = 5000 # [W] """ Explanation: The air gap power is the input power to the rotor, so: End of explanation """ Pconv = (1-s_nl) * Pag print('Pconv = {:.0f} W'.format(Pconv)) """ Explanation: The power converted from electrical to mechanical form is: $$P_\text{conv} = (1-s)P_{AG}$$ End of explanation """ Prcl = Pag - Pconv print(''' Prcl = {:.0f} W ============'''.format(Prcl)) """ Explanation: The rotor copper losses are the difference between the air gap power and the power converted to mechanical form, so: $$P_\text{RCL} = P_{AG}-P_\text{conv}$$ End of explanation """
highb/deep-learning
autoencoder/Convolutional_Autoencoder.ipynb
mit
%matplotlib inline import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', validation_size=0) img = mnist.train.images[2] plt.imshow(img.reshape((28, 28)), cmap='Greys_r') """ Explanation: Convolutional Autoencoder Sticking with the MNIST dataset, let's improve our autoencoder's performance using convolutional layers. Again, loading modules and the data. End of explanation """ learning_rate = 0.001 image_shape = (None, 28, 28, 1) # Input and target placeholders inputs_ = tf.placeholder(dtype=tf.float32, shape=image_shape, name="inputs") targets_ = tf.placeholder(dtype=tf.float32, shape=image_shape, name="targets") pool_size = (2, 2) strides = (2, 2) ### Encoder conv1 = tf.layers.conv2d(inputs=inputs_, filters=16, kernel_size=(3,3), padding='same', activation=tf.nn.relu) #print(conv1.shape) # Now 28x28x16 maxpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=pool_size, strides=strides, padding='same') #print(maxpool1.shape) # Now 14x14x16 conv2 = tf.layers.conv2d(inputs=maxpool1, filters=8, kernel_size=(3,3), padding='same', activation=tf.nn.relu) #print(conv2.shape) # Now 14x14x8 maxpool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=pool_size, strides=strides, padding='same') #print(maxpool2.shape) # Now 7x7x8 conv3 = tf.layers.conv2d(inputs=maxpool2, filters=8, kernel_size=(3,3), padding='same', activation=tf.nn.relu) #print(conv3.shape) # Now 7x7x8 encoded = tf.layers.max_pooling2d(inputs=conv3, pool_size=pool_size, strides=strides, padding='same') #print(encoded.shape) # Now 4x4x8 ### Decoder upsample1 = tf.image.resize_nearest_neighbor(images=encoded, size=(7,7)) #print(upsample1.shape) # Now 7x7x8 conv4 = tf.layers.conv2d(inputs=upsample1, filters=8, kernel_size=(3,3), padding='same', activation=tf.nn.relu) #print(conv4.shape) # Now 7x7x8 upsample2 = tf.image.resize_nearest_neighbor(images=conv4, size=(14,14)) #print(upsample2.shape) # Now 14x14x8 conv5 = tf.layers.conv2d(inputs=upsample2, filters=8, kernel_size=(3,3), padding='same', activation=tf.nn.relu) #print(conv5.shape) # Now 14x14x8 upsample3 = tf.image.resize_nearest_neighbor(images=conv5, size=(28,28)) #print(upsample3.shape) # Now 28x28x8 conv6 = tf.layers.conv2d(inputs=upsample3, filters=16, kernel_size=(3,3), padding='same', activation=tf.nn.relu) #print(conv6.shape) # Now 28x28x16 logits = tf.layers.conv2d(inputs=conv6, filters=1, kernel_size=(3,3), padding='same', activation=tf.nn.relu) #Now 28x28x1 # Pass logits through sigmoid to get reconstructed image decoded = tf.nn.sigmoid(logits, name='output') # Pass logits through sigmoid and calculate the cross-entropy loss loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits) # Get cost and define the optimizer cost = tf.reduce_mean(loss) opt = tf.train.AdamOptimizer(learning_rate).minimize(cost) """ Explanation: Network Architecture The encoder part of the network will be a typical convolutional pyramid. Each convolutional layer will be followed by a max-pooling layer to reduce the dimensions of the layers. The decoder though might be something new to you. The decoder needs to convert from a narrow representation to a wide reconstructed image. For example, the representation could be a 4x4x8 max-pool layer. This is the output of the encoder, but also the input to the decoder. We want to get a 28x28x1 image out from the decoder so we need to work our way back up from the narrow decoder input layer. A schematic of the network is shown below. <img src='assets/convolutional_autoencoder.png' width=500px> Here our final encoder layer has size 4x4x8 = 128. The original images have size 28x28 = 784, so the encoded vector is roughly 16% the size of the original image. These are just suggested sizes for each of the layers. Feel free to change the depths and sizes, but remember our goal here is to find a small representation of the input data. What's going on with the decoder Okay, so the decoder has these "Upsample" layers that you might not have seen before. First off, I'll discuss a bit what these layers aren't. Usually, you'll see transposed convolution layers used to increase the width and height of the layers. They work almost exactly the same as convolutional layers, but in reverse. A stride in the input layer results in a larger stride in the transposed convolution layer. For example, if you have a 3x3 kernel, a 3x3 patch in the input layer will be reduced to one unit in a convolutional layer. Comparatively, one unit in the input layer will be expanded to a 3x3 path in a transposed convolution layer. The TensorFlow API provides us with an easy way to create the layers, tf.nn.conv2d_transpose. However, transposed convolution layers can lead to artifacts in the final images, such as checkerboard patterns. This is due to overlap in the kernels which can be avoided by setting the stride and kernel size equal. In this Distill article from Augustus Odena, et al, the authors show that these checkerboard artifacts can be avoided by resizing the layers using nearest neighbor or bilinear interpolation (upsampling) followed by a convolutional layer. In TensorFlow, this is easily done with tf.image.resize_images, followed by a convolution. Be sure to read the Distill article to get a better understanding of deconvolutional layers and why we're using upsampling. Exercise: Build the network shown above. Remember that a convolutional layer with strides of 1 and 'same' padding won't reduce the height and width. That is, if the input is 28x28 and the convolution layer has stride = 1 and 'same' padding, the convolutional layer will also be 28x28. The max-pool layers are used the reduce the width and height. A stride of 2 will reduce the size by 2. Odena et al claim that nearest neighbor interpolation works best for the upsampling, so make sure to include that as a parameter in tf.image.resize_images or use tf.image.resize_nearest_neighbor. End of explanation """ sess = tf.Session() epochs = 20 batch_size = 200 sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) imgs = batch[0].reshape((-1, 28, 28, 1)) batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs, targets_: imgs}) print("Epoch: {}/{}...".format(e+1, epochs), "Training loss: {:.4f}".format(batch_cost)) fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4)) in_imgs = mnist.test.images[:10] reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))}) for images, row in zip([in_imgs, reconstructed], axes): for img, ax in zip(images, row): ax.imshow(img.reshape((28, 28)), cmap='Greys_r') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) fig.tight_layout(pad=0.1) sess.close() """ Explanation: Training As before, here we'll train the network. Instead of flattening the images though, we can pass them in as 28x28x1 arrays. End of explanation """ learning_rate = 0.001 inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs') targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets') pool_size = (2, 2) strides = (2, 2) #Now 28x28x1 ### Encoder conv1 = tf.layers.conv2d(inputs=inputs_, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu) #print(conv1.shape) # Now 28x28x32 maxpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=pool_size, strides=strides, padding='same') #print(maxpool1.shape) # Now 14x14x32 conv2 = tf.layers.conv2d(inputs=maxpool1, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu) #print(conv2.shape) # Now 14x14x32 maxpool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=pool_size, strides=strides, padding='same') #print(maxpool2.shape) # Now 7x7x32 conv3 = tf.layers.conv2d(inputs=maxpool2, filters=16, kernel_size=(3,3), padding='same', activation=tf.nn.relu) #print(conv3.shape) # Now 7x7x16 encoded = tf.layers.max_pooling2d(inputs=conv3, pool_size=pool_size, strides=strides, padding='same') #print(encoded.shape) # Now 4x4x16 ### Decoder upsample1 = tf.image.resize_nearest_neighbor(images=encoded, size=(7,7)) #print(upsample1.shape) # Now 7x7x16 conv4 = tf.layers.conv2d(inputs=upsample1, filters=16, kernel_size=(3,3), padding='same', activation=tf.nn.relu) #print(conv4.shape) # Now 7x7x16 upsample2 = tf.image.resize_nearest_neighbor(images=conv4, size=(14,14)) #print(upsample2.shape) # Now 14x14x16 conv5 = tf.layers.conv2d(inputs=upsample2, filters=16, kernel_size=(3,3), padding='same', activation=tf.nn.relu) #print(conv5.shape) # Now 14x14x32 upsample3 = tf.image.resize_nearest_neighbor(images=conv5, size=(28,28)) #print(upsample3.shape) # Now 28x28x32 conv6 = tf.layers.conv2d(inputs=upsample3, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu) #print(conv6.shape) # Now 28x28x32 logits = tf.layers.conv2d(inputs=conv6, filters=1, kernel_size=(3,3), padding='same', activation=tf.nn.relu) #Now 28x28x1 # Pass logits through sigmoid to get reconstructed image decoded = tf.nn.sigmoid(logits, name='output') # Pass logits through sigmoid and calculate the cross-entropy loss loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits) # Get cost and define the optimizer cost = tf.reduce_mean(loss) opt = tf.train.AdamOptimizer(learning_rate).minimize(cost) sess = tf.Session() epochs = 100 batch_size = 200 # Set's how much noise we're adding to the MNIST images noise_factor = 0.5 sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) # Get images from the batch imgs = batch[0].reshape((-1, 28, 28, 1)) # Add random noise to the input images noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape) # Clip the images to be between 0 and 1 noisy_imgs = np.clip(noisy_imgs, 0., 1.) # Noisy images as inputs, original images as targets batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs, targets_: imgs}) print("Epoch: {}/{}...".format(e+1, epochs), "Training loss: {:.4f}".format(batch_cost)) """ Explanation: Denoising As I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1. We'll use noisy images as input and the original, clean images as targets. Here's an example of the noisy images I generated and the denoised images. Since this is a harder problem for the network, we'll want to use deeper convolutional layers here, more feature maps. I suggest something like 32-32-16 for the depths of the convolutional layers in the encoder, and the same depths going backward through the decoder. Otherwise the architecture is the same as before. Exercise: Build the network for the denoising autoencoder. It's the same as before, but with deeper layers. I suggest 32-32-16 for the depths, but you can play with these numbers, or add more layers. End of explanation """ fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4)) in_imgs = mnist.test.images[:10] noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape) noisy_imgs = np.clip(noisy_imgs, 0., 1.) reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))}) for images, row in zip([noisy_imgs, reconstructed], axes): for img, ax in zip(images, row): ax.imshow(img.reshape((28, 28)), cmap='Greys_r') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) fig.tight_layout(pad=0.1) """ Explanation: Checking out the performance Here I'm adding noise to the test images and passing them through the autoencoder. It does a suprisingly great job of removing the noise, even though it's sometimes difficult to tell what the original number is. End of explanation """
ktmud/deep-learning
intro-to-tflearn/TFLearn_Digit_Recognition.ipynb
mit
# Import Numpy, TensorFlow, TFLearn, and MNIST data import numpy as np import tensorflow as tf import tflearn import tflearn.datasets.mnist as mnist """ Explanation: Handwritten Number Recognition with TFLearn and MNIST In this notebook, we'll be building a neural network that recognizes handwritten numbers 0-9. This kind of neural network is used in a variety of real-world applications including: recognizing phone numbers and sorting postal mail by address. To build the network, we'll be using the MNIST data set, which consists of images of handwritten numbers and their correct labels 0-9. We'll be using TFLearn, a high-level library built on top of TensorFlow to build the neural network. We'll start off by importing all the modules we'll need, then load the data, and finally build the network. End of explanation """ # Retrieve the training and test data trainX, trainY, testX, testY = mnist.load_data(one_hot=True) """ Explanation: Retrieving training and test data The MNIST data set already contains both training and test data. There are 55,000 data points of training data, and 10,000 points of test data. Each MNIST data point has: 1. an image of a handwritten digit and 2. a corresponding label (a number 0-9 that identifies the image) We'll call the images, which will be the input to our neural network, X and their corresponding labels Y. We're going to want our labels as one-hot vectors, which are vectors that holds mostly 0's and one 1. It's easiest to see this in a example. As a one-hot vector, the number 0 is represented as [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], and 4 is represented as [0, 0, 0, 0, 1, 0, 0, 0, 0, 0]. Flattened data For this example, we'll be using flattened data or a representation of MNIST images in one dimension rather than two. So, each handwritten number image, which is 28x28 pixels, will be represented as a one dimensional array of 784 pixel values. Flattening the data throws away information about the 2D structure of the image, but it simplifies our data so that all of the training data can be contained in one array whose shape is [55000, 784]; the first dimension is the number of training images and the second dimension is the number of pixels in each image. This is the kind of data that is easy to analyze using a simple neural network. End of explanation """ import warnings # Visualizing the data import matplotlib.pyplot as plt %matplotlib inline # Function for displaying a training image by it's index in the MNIST set def show_digit(index): label = trainY[index].argmax(axis=0) # Reshape 784 array into 28x28 image image = trainX[index].reshape([28,28]) # plt.title('Training data, index: %d, Label: %d' % (index, label)) plt.imshow(image, cmap='gray_r') plt.show() # Display the first (index 0) training image show_digit(0) """ Explanation: Visualize the training data Provided below is a function that will help you visualize the MNIST data. By passing in the index of a training example, the function show_digit will display that training image along with it's corresponding label in the title. End of explanation """ def build_model(input_size, hidden_units, out_size, lr=0.1): # This resets all parameters and variables, leave this here tf.reset_default_graph() # Input -- [batch_size, input_vector_dimension] print('Input features size: %s' % input_size) net = tflearn.input_data([None, input_size]) # Hidden -- for n in hidden_units: net = tflearn.fully_connected(net, n, activation='ReLU') # Output -- net = tflearn.fully_connected(net, out_size, activation='softmax') # sgd: stochastic gradient descent net = tflearn.regression(net, optimizer='sgd', learning_rate=lr, loss='categorical_crossentropy') model = tflearn.DNN(net) return model # Build the model model = build_model(trainX.shape[1], [392, 196, 32], 10, 0.05) """ Explanation: Building the network TFLearn lets you build the network by defining the layers in that network. For this example, you'll define: The input layer, which tells the network the number of inputs it should expect for each piece of MNIST data. Hidden layers, which recognize patterns in data and connect the input to the output layer, and The output layer, which defines how the network learns and outputs a label for a given image. Let's start with the input layer; to define the input layer, you'll define the type of data that the network expects. For example, net = tflearn.input_data([None, 100]) would create a network with 100 inputs. The number of inputs to your network needs to match the size of your data. For this example, we're using 784 element long vectors to encode our input data, so we need 784 input units. Adding layers To add new hidden layers, you use net = tflearn.fully_connected(net, n_units, activation='ReLU') This adds a fully connected layer where every unit (or node) in the previous layer is connected to every unit in this layer. The first argument net is the network you created in the tflearn.input_data call, it designates the input to the hidden layer. You can set the number of units in the layer with n_units, and set the activation function with the activation keyword. You can keep adding layers to your network by repeated calling tflearn.fully_connected(net, n_units). Then, to set how you train the network, use: net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy') Again, this is passing in the network you've been building. The keywords: optimizer sets the training method, here stochastic gradient descent learning_rate is the learning rate loss determines how the network error is calculated. In this example, with categorical cross-entropy. Finally, you put all this together to create the model with tflearn.DNN(net). Exercise: Below in the build_model() function, you'll put together the network using TFLearn. You get to choose how many layers to use, how many hidden units, etc. Hint: The final output layer must have 10 output nodes (one for each digit 0-9). It's also recommended to use a softmax activation layer as your final output layer. End of explanation """ # Training model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128, n_epoch=100) """ Explanation: Training the network Now that we've constructed the network, saved as the variable model, we can fit it to the data. Here we use the model.fit method. You pass in the training features trainX and the training targets trainY. Below I set validation_set=0.1 which reserves 10% of the data set as the validation set. You can also set the batch size and number of epochs with the batch_size and n_epoch keywords, respectively. Too few epochs don't effectively train your network, and too many take a long time to execute. Choose wisely! End of explanation """ # Compare the labels that our model predicts with the actual labels # Find the indices of the most confident prediction for each item. That tells us the predicted digit for that sample. predictions = np.array(model.predict(testX)).argmax(axis=1) # Calculate the accuracy, which is the percentage of times the predicated labels matched the actual labels actual = testY.argmax(axis=1) test_accuracy = np.mean(predictions == actual, axis=0) # Print out the result print("Test accuracy: ", test_accuracy) """ Explanation: Testing After you're satisified with the training output and accuracy, you can then run the network on the test data set to measure it's performance! Remember, only do this after you've done the training and are satisfied with the results. A good result will be higher than 95% accuracy. Some simple models have been known to get up to 99.7% accuracy! End of explanation """
schaber/deep-learning
dcgan-svhn/DCGAN_Exercises.ipynb
mit
%matplotlib inline import pickle as pkl import matplotlib.pyplot as plt import numpy as np from scipy.io import loadmat import tensorflow as tf !mkdir data """ Explanation: Deep Convolutional GANs In this notebook, you'll build a GAN using convolutional layers in the generator and discriminator. This is called a Deep Convolutional GAN, or DCGAN for short. The DCGAN architecture was first explored last year and has seen impressive results in generating new images, you can read the original paper here. You'll be training DCGAN on the Street View House Numbers (SVHN) dataset. These are color images of house numbers collected from Google street view. SVHN images are in color and much more variable than MNIST. So, we'll need a deeper and more powerful network. This is accomplished through using convolutional layers in the discriminator and generator. It's also necessary to use batch normalization to get the convolutional networks to train. The only real changes compared to what you saw previously are in the generator and discriminator, otherwise the rest of the implementation is the same. End of explanation """ from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm data_dir = 'data/' if not isdir(data_dir): raise Exception("Data directory doesn't exist!") class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(data_dir + "train_32x32.mat"): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='SVHN Training Set') as pbar: urlretrieve( 'http://ufldl.stanford.edu/housenumbers/train_32x32.mat', data_dir + 'train_32x32.mat', pbar.hook) if not isfile(data_dir + "test_32x32.mat"): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='SVHN Training Set') as pbar: urlretrieve( 'http://ufldl.stanford.edu/housenumbers/test_32x32.mat', data_dir + 'test_32x32.mat', pbar.hook) """ Explanation: Getting the data Here you can download the SVHN dataset. Run the cell above and it'll download to your machine. End of explanation """ trainset = loadmat(data_dir + 'train_32x32.mat') testset = loadmat(data_dir + 'test_32x32.mat') """ Explanation: These SVHN files are .mat files typically used with Matlab. However, we can load them in with scipy.io.loadmat which we imported above. End of explanation """ idx = np.random.randint(0, trainset['X'].shape[3], size=36) fig, axes = plt.subplots(6, 6, sharex=True, sharey=True, figsize=(5,5),) for ii, ax in zip(idx, axes.flatten()): ax.imshow(trainset['X'][:,:,:,ii], aspect='equal') ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) plt.subplots_adjust(wspace=0, hspace=0) """ Explanation: Here I'm showing a small sample of the images. Each of these is 32x32 with 3 color channels (RGB). These are the real images we'll pass to the discriminator and what the generator will eventually fake. End of explanation """ def scale(x, feature_range=(-1, 1)): # scale to (0, 1) x = ((x - x.min())/(255 - x.min())) # scale to feature_range min, max = feature_range x = x * (max - min) + min return x class Dataset: def __init__(self, train, test, val_frac=0.5, shuffle=False, scale_func=None): split_idx = int(len(test['y'])*(1 - val_frac)) self.test_x, self.valid_x = test['X'][:,:,:,:split_idx], test['X'][:,:,:,split_idx:] self.test_y, self.valid_y = test['y'][:split_idx], test['y'][split_idx:] self.train_x, self.train_y = train['X'], train['y'] self.train_x = np.rollaxis(self.train_x, 3) self.valid_x = np.rollaxis(self.valid_x, 3) self.test_x = np.rollaxis(self.test_x, 3) if scale_func is None: self.scaler = scale else: self.scaler = scale_func self.shuffle = shuffle def batches(self, batch_size): if self.shuffle: idx = np.arange(len(dataset.train_x)) np.random.shuffle(idx) self.train_x = self.train_x[idx] self.train_y = self.train_y[idx] n_batches = len(self.train_y)//batch_size for ii in range(0, len(self.train_y), batch_size): x = self.train_x[ii:ii+batch_size] y = self.train_y[ii:ii+batch_size] yield self.scaler(x), y """ Explanation: Here we need to do a bit of preprocessing and getting the images into a form where we can pass batches to the network. First off, we need to rescale the images to a range of -1 to 1, since the output of our generator is also in that range. We also have a set of test and validation images which could be used if we're trying to identify the numbers in the images. End of explanation """ def model_inputs(real_dim, z_dim): inputs_real = tf.placeholder(tf.float32, (None, *real_dim), name='input_real') inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z') return inputs_real, inputs_z """ Explanation: Network Inputs Here, just creating some placeholders like normal. End of explanation """ def generator(z, output_dim, reuse=False, alpha=0.2, training=True): with tf.variable_scope('generator', reuse=reuse): # First fully connected layer x = tf.layers.dense(z, 4*4*512) x = tf.reshape(x, [-1,4,4,512]) x = tf.layers.batch_normalization(x, training=training) x = tf.maximum(alpha*x, x) #Now 4x4x512 # First conv layer x = tf.layers.conv2d_transpose(x, 256, 5, strides=2, padding='same') x = tf.layers.batch_normalization(x, training=training) x = tf.maximum(alpha*x, x) #Now 8x8x256 # Second conv layer x = tf.layers.conv2d_transpose(x, 128, 5, strides=2, padding='same') x = tf.layers.batch_normalization(x, training=training) x = tf.maximum(alpha*x, x) #Now 16x16x128 # Output layer, 32x32x3 logits = tf.layers.conv2d_transpose(x, output_dim, 5, strides=2, padding='same') out = tf.tanh(logits) return out """ Explanation: Generator Here you'll build the generator network. The input will be our noise vector z as before. Also as before, the output will be a $tanh$ output, but this time with size 32x32 which is the size of our SVHN images. What's new here is we'll use convolutional layers to create our new images. The first layer is a fully connected layer which is reshaped into a deep and narrow layer, something like 4x4x1024 as in the original DCGAN paper. Then we use batch normalization and a leaky ReLU activation. Next is a transposed convolution where typically you'd halve the depth and double the width and height of the previous layer. Again, we use batch normalization and leaky ReLU. For each of these layers, the general scheme is convolution > batch norm > leaky ReLU. You keep stacking layers up like this until you get the final transposed convolution layer with shape 32x32x3. Below is the archicture used in the original DCGAN paper: Note that the final layer here is 64x64x3, while for our SVHN dataset, we only want it to be 32x32x3. Exercise: Build the transposed convolutional network for the generator in the function below. Be sure to use leaky ReLUs on all the layers except for the last tanh layer, as well as batch normalization on all the transposed convolutional layers except the last one. End of explanation """ def discriminator(x, reuse=False, alpha=0.2): with tf.variable_scope('discriminator', reuse=reuse): # Input layer is 32x32x3 x = tf.layers.conv2d(x, 16, 5, strides=1, padding='same') #x = tf.layers.batch_normalization(x, training=True) x = tf.maximum(x, alpha*x) x = tf.layers.conv2d(x, 32, 5, strides=1, padding='same') x = tf.layers.batch_normalization(x, training=True) x = tf.maximum(x, alpha*x) x = tf.layers.conv2d(x, 64, 5, strides=1, padding='same') x = tf.layers.batch_normalization(x, training=True) x = tf.maximum(x, alpha*x) flat = tf.reshape(x, (-1, 4*4*256)) logits = tf.layers.dense(x, 1) out = tf.sigmoid(logits) return out, logits """ Explanation: Discriminator Here you'll build the discriminator. This is basically just a convolutional classifier like you've build before. The input to the discriminator are 32x32x3 tensors/images. You'll want a few convolutional layers, then a fully connected layer for the output. As before, we want a sigmoid output, and you'll need to return the logits as well. For the depths of the convolutional layers I suggest starting with 16, 32, 64 filters in the first layer, then double the depth as you add layers. Note that in the DCGAN paper, they did all the downsampling using only strided convolutional layers with no maxpool layers. You'll also want to use batch normalization with tf.layers.batch_normalization on each layer except the first convolutional and output layers. Again, each layer should look something like convolution > batch norm > leaky ReLU. Note: in this project, your batch normalization layers will always use batch statistics. (That is, always set training to True.) That's because we are only interested in using the discriminator to help train the generator. However, if you wanted to use the discriminator for inference later, then you would need to set the training parameter appropriately. Exercise: Build the convolutional network for the discriminator. The input is a 32x32x3 images, the output is a sigmoid plus the logits. Again, use Leaky ReLU activations and batch normalization on all the layers except the first. End of explanation """ def model_loss(input_real, input_z, output_dim, alpha=0.2): """ Get the loss for the discriminator and generator :param input_real: Images from the real dataset :param input_z: Z input :param out_channel_dim: The number of channels in the output image :return: A tuple of (discriminator loss, generator loss) """ g_model = generator(input_z, output_dim, alpha=alpha) d_model_real, d_logits_real = discriminator(input_real, alpha=alpha) d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, alpha=alpha) d_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_model_real))) d_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake))) g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake))) d_loss = d_loss_real + d_loss_fake return d_loss, g_loss """ Explanation: Model Loss Calculating the loss like before, nothing new here. End of explanation """ def model_opt(d_loss, g_loss, learning_rate, beta1): """ Get optimization operations :param d_loss: Discriminator loss Tensor :param g_loss: Generator loss Tensor :param learning_rate: Learning Rate Placeholder :param beta1: The exponential decay rate for the 1st moment in the optimizer :return: A tuple of (discriminator training operation, generator training operation) """ # Get weights and bias to update t_vars = tf.trainable_variables() d_vars = [var for var in t_vars if var.name.startswith('discriminator')] g_vars = [var for var in t_vars if var.name.startswith('generator')] # Optimize with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars) g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars) return d_train_opt, g_train_opt """ Explanation: Optimizers Not much new here, but notice how the train operations are wrapped in a with tf.control_dependencies block so the batch normalization layers can update their population statistics. End of explanation """ class GAN: def __init__(self, real_size, z_size, learning_rate, alpha=0.2, beta1=0.5): tf.reset_default_graph() self.input_real, self.input_z = model_inputs(real_size, z_size) self.d_loss, self.g_loss = model_loss(self.input_real, self.input_z, real_size[2], alpha=0.2) self.d_opt, self.g_opt = model_opt(self.d_loss, self.g_loss, learning_rate, beta1) """ Explanation: Building the model Here we can use the functions we defined about to build the model as a class. This will make it easier to move the network around in our code since the nodes and operations in the graph are packaged in one object. End of explanation """ def view_samples(epoch, samples, nrows, ncols, figsize=(5,5)): fig, axes = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols, sharey=True, sharex=True) for ax, img in zip(axes.flatten(), samples[epoch]): ax.axis('off') img = ((img - img.min())*255 / (img.max() - img.min())).astype(np.uint8) ax.set_adjustable('box-forced') im = ax.imshow(img, aspect='equal') plt.subplots_adjust(wspace=0, hspace=0) return fig, axes """ Explanation: Here is a function for displaying generated images. End of explanation """ def train(net, dataset, epochs, batch_size, print_every=10, show_every=100, figsize=(5,5)): saver = tf.train.Saver() sample_z = np.random.uniform(-1, 1, size=(72, z_size)) samples, losses = [], [] steps = 0 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(epochs): for x, y in dataset.batches(batch_size): steps += 1 # Sample random noise for G batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size)) # Run optimizers _ = sess.run(net.d_opt, feed_dict={net.input_real: x, net.input_z: batch_z}) _ = sess.run(net.g_opt, feed_dict={net.input_z: batch_z, net.input_real: x}) if steps % print_every == 0: # At the end of each epoch, get the losses and print them out train_loss_d = net.d_loss.eval({net.input_z: batch_z, net.input_real: x}) train_loss_g = net.g_loss.eval({net.input_z: batch_z}) print("Epoch {}/{}...".format(e+1, epochs), "Discriminator Loss: {:.4f}...".format(train_loss_d), "Generator Loss: {:.4f}".format(train_loss_g)) # Save losses to view after training losses.append((train_loss_d, train_loss_g)) if steps % show_every == 0: gen_samples = sess.run( generator(net.input_z, 3, reuse=True, training=False), feed_dict={net.input_z: sample_z}) samples.append(gen_samples) _ = view_samples(-1, samples, 6, 12, figsize=figsize) plt.show() saver.save(sess, './checkpoints/generator.ckpt') with open('samples.pkl', 'wb') as f: pkl.dump(samples, f) return losses, samples """ Explanation: And another function we can use to train our network. Notice when we call generator to create the samples to display, we set training to False. That's so the batch normalization layers will use the population statistics rather than the batch statistics. Also notice that we set the net.input_real placeholder when we run the generator's optimizer. The generator doesn't actually use it, but we'd get an errror without it because of the tf.control_dependencies block we created in model_opt. End of explanation """ real_size = (32,32,3) z_size = 100 learning_rate = 0.001 batch_size = 64 epochs = 1 alpha = 0.01 beta1 = 0.9 # Create the network net = GAN(real_size, z_size, learning_rate, alpha=alpha, beta1=beta1) # Load the data and train the network here dataset = Dataset(trainset, testset) losses, samples = train(net, dataset, epochs, batch_size, figsize=(10,5)) fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator', alpha=0.5) plt.plot(losses.T[1], label='Generator', alpha=0.5) plt.title("Training Losses") plt.legend() _ = view_samples(-1, samples, 6, 12, figsize=(10,5)) """ Explanation: Hyperparameters GANs are very senstive to hyperparameters. A lot of experimentation goes into finding the best hyperparameters such that the generator and discriminator don't overpower each other. Try out your own hyperparameters or read the DCGAN paper to see what worked for them. Exercise: Find hyperparameters to train this GAN. The values found in the DCGAN paper work well, or you can experiment on your own. In general, you want the discriminator loss to be around 0.3, this means it is correctly classifying images as fake or real about 50% of the time. End of explanation """
diegocavalca/Studies
deep-learnining-specialization/2. improving deep neural networks/week3/programming-assignment/Tensorflow+Tutorial.ipynb
cc0-1.0
import math import numpy as np import h5py import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.python.framework import ops from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict %matplotlib inline np.random.seed(1) """ Explanation: TensorFlow Tutorial Welcome to this week's programming assignment. Until now, you've always used numpy to build neural networks. Now we will step you through a deep learning framework that will allow you to build neural networks more easily. Machine learning frameworks like TensorFlow, PaddlePaddle, Torch, Caffe, Keras, and many others can speed up your machine learning development significantly. All of these frameworks also have a lot of documentation, which you should feel free to read. In this assignment, you will learn to do the following in TensorFlow: Initialize variables Start your own session Train algorithms Implement a Neural Network Programing frameworks can not only shorten your coding time, but sometimes also perform optimizations that speed up your code. 1 - Exploring the Tensorflow Library To start, you will import the library: End of explanation """ y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36. y = tf.constant(39, name='y') # Define y. Set to 39 loss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss init = tf.global_variables_initializer() # When init is run later (session.run(init)), # the loss variable will be initialized and ready to be computed with tf.Session() as session: # Create a session and print the output session.run(init) # Initializes the variables print(session.run(loss)) # Prints the loss """ Explanation: Now that you have imported the library, we will walk you through its different applications. You will start with an example, where we compute for you the loss of one training example. $$loss = \mathcal{L}(\hat{y}, y) = (\hat y^{(i)} - y^{(i)})^2 \tag{1}$$ End of explanation """ a = tf.constant(2) b = tf.constant(10) c = tf.multiply(a,b) print(c) """ Explanation: Writing and running programs in TensorFlow has the following steps: Create Tensors (variables) that are not yet executed/evaluated. Write operations between those Tensors. Initialize your Tensors. Create a Session. Run the Session. This will run the operations you'd written above. Therefore, when we created a variable for the loss, we simply defined the loss as a function of other quantities, but did not evaluate its value. To evaluate it, we had to run init=tf.global_variables_initializer(). That initialized the loss variable, and in the last line we were finally able to evaluate the value of loss and print its value. Now let us look at an easy example. Run the cell below: End of explanation """ sess = tf.Session() print(sess.run(c)) """ Explanation: As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type "int32". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it. End of explanation """ # Change the value of x in the feed_dict x = tf.placeholder(tf.int64, name = 'x') print(sess.run(2 * x, feed_dict = {x: 3})) sess.close() """ Explanation: Great! To summarize, remember to initialize your variables, create a session and run the operations inside the session. Next, you'll also have to know about placeholders. A placeholder is an object whose value you can specify only later. To specify values for a placeholder, you can pass in values by using a "feed dictionary" (feed_dict variable). Below, we created a placeholder for x. This allows us to pass in a number later when we run the session. End of explanation """ # GRADED FUNCTION: linear_function def linear_function(): """ Implements a linear function: Initializes W to be a random tensor of shape (4,3) Initializes X to be a random tensor of shape (3,1) Initializes b to be a random tensor of shape (4,1) Returns: result -- runs the session for Y = WX + b """ np.random.seed(1) ### START CODE HERE ### (4 lines of code) X = np.random.randn(3,1) W = np.random.randn(4,3) b = np.random.randn(4,1) Y = tf.add(tf.matmul(W, X), b) ### END CODE HERE ### # Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate ### START CODE HERE ### sess = tf.Session() result = sess.run(Y) ### END CODE HERE ### # close the session sess.close() return result print( "result = " + str(linear_function())) """ Explanation: When you first defined x you did not have to specify a value for it. A placeholder is simply a variable that you will assign data to only later, when running the session. We say that you feed data to these placeholders when running the session. Here's what's happening: When you specify the operations needed for a computation, you are telling TensorFlow how to construct a computation graph. The computation graph can have some placeholders whose values you will specify only later. Finally, when you run the session, you are telling TensorFlow to execute the computation graph. 1.1 - Linear function Lets start this programming exercise by computing the following equation: $Y = WX + b$, where $W$ and $X$ are random matrices and b is a random vector. Exercise: Compute $WX + b$ where $W, X$, and $b$ are drawn from a random normal distribution. W is of shape (4, 3), X is (3,1) and b is (4,1). As an example, here is how you would define a constant X that has shape (3,1): ```python X = tf.constant(np.random.randn(3,1), name = "X") ``` You might find the following functions helpful: - tf.matmul(..., ...) to do a matrix multiplication - tf.add(..., ...) to do an addition - np.random.randn(...) to initialize randomly End of explanation """ # GRADED FUNCTION: sigmoid def sigmoid(z): """ Computes the sigmoid of z Arguments: z -- input value, scalar or vector Returns: results -- the sigmoid of z """ ### START CODE HERE ### ( approx. 4 lines of code) # Create a placeholder for x. Name it 'x'. x = tf.placeholder(tf.float32, name = "x") # compute sigmoid(x) sigmoid = tf.sigmoid(x) # Create a session, and run it. Please use the method 2 explained above. # You should use a feed_dict to pass z's value to x. with tf.Session() as sess: # Run session and call the output "result" result = sess.run(sigmoid, feed_dict={x: z}) ### END CODE HERE ### return result print ("sigmoid(0) = " + str(sigmoid(0))) print ("sigmoid(12) = " + str(sigmoid(12))) """ Explanation: Expected Output : <table> <tr> <td> **result** </td> <td> [[-2.15657382] [ 2.95891446] [-1.08926781] [-0.84538042]] </td> </tr> </table> 1.2 - Computing the sigmoid Great! You just implemented a linear function. Tensorflow offers a variety of commonly used neural network functions like tf.sigmoid and tf.softmax. For this exercise lets compute the sigmoid function of an input. You will do this exercise using a placeholder variable x. When running the session, you should use the feed dictionary to pass in the input z. In this exercise, you will have to (i) create a placeholder x, (ii) define the operations needed to compute the sigmoid using tf.sigmoid, and then (iii) run the session. Exercise : Implement the sigmoid function below. You should use the following: tf.placeholder(tf.float32, name = "...") tf.sigmoid(...) sess.run(..., feed_dict = {x: z}) Note that there are two typical ways to create and use sessions in tensorflow: Method 1: ```python sess = tf.Session() Run the variables initialization (if needed), run the operations result = sess.run(..., feed_dict = {...}) sess.close() # Close the session **Method 2:**python with tf.Session() as sess: # run the variables initialization (if needed), run the operations result = sess.run(..., feed_dict = {...}) # This takes care of closing the session for you :) ``` End of explanation """ # GRADED FUNCTION: cost def cost(logits, labels): """     Computes the cost using the sigmoid cross entropy          Arguments:     logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)     labels -- vector of labels y (1 or 0) Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels" in the TensorFlow documentation. So logits will feed into z, and labels into y.          Returns:     cost -- runs the session of the cost (formula (2)) """ ### START CODE HERE ### # Create the placeholders for "logits" (z) and "labels" (y) (approx. 2 lines) z = tf.placeholder(tf.float32, name = "z") y = tf.placeholder(tf.float32, name = "y") # Use the loss function (approx. 1 line) cost = tf.nn.sigmoid_cross_entropy_with_logits(logits = z, labels = y) # Create a session (approx. 1 line). See method 1 above. sess = tf.Session() # Run the session (approx. 1 line). cost = sess.run(cost, feed_dict={z:logits , y:labels}) # Close the session (approx. 1 line). See method 1 above. sess.close() ### END CODE HERE ### return cost logits = sigmoid(np.array([0.2,0.4,0.7,0.9])) cost = cost(logits, np.array([0,0,1,1])) print ("cost = " + str(cost)) """ Explanation: Expected Output : <table> <tr> <td> **sigmoid(0)** </td> <td> 0.5 </td> </tr> <tr> <td> **sigmoid(12)** </td> <td> 0.999994 </td> </tr> </table> <font color='blue'> To summarize, you how know how to: 1. Create placeholders 2. Specify the computation graph corresponding to operations you want to compute 3. Create the session 4. Run the session, using a feed dictionary if necessary to specify placeholder variables' values. 1.3 - Computing the Cost You can also use a built-in function to compute the cost of your neural network. So instead of needing to write code to compute this as a function of $a^{2}$ and $y^{(i)}$ for i=1...m: $$ J = - \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log a^{ [2] (i)} + (1-y^{(i)})\log (1-a^{ [2] (i)} )\large )\small\tag{2}$$ you can do it in one line of code in tensorflow! Exercise: Implement the cross entropy loss. The function you will use is: tf.nn.sigmoid_cross_entropy_with_logits(logits = ..., labels = ...) Your code should input z, compute the sigmoid (to get a) and then compute the cross entropy cost $J$. All this can be done using one call to tf.nn.sigmoid_cross_entropy_with_logits, which computes $$- \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log \sigma(z^{2}) + (1-y^{(i)})\log (1-\sigma(z^{2})\large )\small\tag{2}$$ End of explanation """ # GRADED FUNCTION: one_hot_matrix def one_hot_matrix(labels, C): """ Creates a matrix where the i-th row corresponds to the ith class number and the jth column corresponds to the jth training example. So if example j had a label i. Then entry (i,j) will be 1. Arguments: labels -- vector containing the labels C -- number of classes, the depth of the one hot dimension Returns: one_hot -- one hot matrix """ ### START CODE HERE ### # Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line) C = tf.constant(C, name = "C") # Use tf.one_hot, be careful with the axis (approx. 1 line) one_hot_matrix = tf.one_hot(indices = labels, depth = C, axis = 0) # Create the session (approx. 1 line) sess = tf.Session() # Run the session (approx. 1 line) one_hot = sess.run(one_hot_matrix) # Close the session (approx. 1 line). See method 1 above. sess.close() ### END CODE HERE ### return one_hot labels = np.array([1,2,3,0,2,1]) one_hot = one_hot_matrix(labels, C = 4) print ("one_hot = " + str(one_hot)) """ Explanation: Expected Output : <table> <tr> <td> **cost** </td> <td> [ 1.00538719 1.03664088 0.41385433 0.39956614] </td> </tr> </table> 1.4 - Using One Hot encodings Many times in deep learning you will have a y vector with numbers ranging from 0 to C-1, where C is the number of classes. If C is for example 4, then you might have the following y vector which you will need to convert as follows: <img src="images/onehot.png" style="width:600px;height:150px;"> This is called a "one hot" encoding, because in the converted representation exactly one element of each column is "hot" (meaning set to 1). To do this conversion in numpy, you might have to write a few lines of code. In tensorflow, you can use one line of code: tf.one_hot(labels, depth, axis) Exercise: Implement the function below to take one vector of labels and the total number of classes $C$, and return the one hot encoding. Use tf.one_hot() to do this. End of explanation """ # GRADED FUNCTION: ones def ones(shape): """ Creates an array of ones of dimension shape Arguments: shape -- shape of the array you want to create Returns: ones -- array containing only ones """ ### START CODE HERE ### # Create "ones" tensor using tf.ones(...). (approx. 1 line) ones = tf.ones(shape) # Create the session (approx. 1 line) sess = tf.Session() # Run the session to compute 'ones' (approx. 1 line) ones = sess.run(ones) # Close the session (approx. 1 line). See method 1 above. sess.close() ### END CODE HERE ### return ones print ("ones = " + str(ones([3]))) """ Explanation: Expected Output: <table> <tr> <td> **one_hot** </td> <td> [[ 0. 0. 0. 1. 0. 0.] [ 1. 0. 0. 0. 0. 1.] [ 0. 1. 0. 0. 1. 0.] [ 0. 0. 1. 0. 0. 0.]] </td> </tr> </table> 1.5 - Initialize with zeros and ones Now you will learn how to initialize a vector of zeros and ones. The function you will be calling is tf.ones(). To initialize with zeros you could use tf.zeros() instead. These functions take in a shape and return an array of dimension shape full of zeros and ones respectively. Exercise: Implement the function below to take in a shape and to return an array (of the shape's dimension of ones). tf.ones(shape) End of explanation """ # Loading the dataset X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset() """ Explanation: Expected Output: <table> <tr> <td> **ones** </td> <td> [ 1. 1. 1.] </td> </tr> </table> 2 - Building your first neural network in tensorflow In this part of the assignment you will build a neural network using tensorflow. Remember that there are two parts to implement a tensorflow model: Create the computation graph Run the graph Let's delve into the problem you'd like to solve! 2.0 - Problem statement: SIGNS Dataset One afternoon, with some friends we decided to teach our computers to decipher sign language. We spent a few hours taking pictures in front of a white wall and came up with the following dataset. It's now your job to build an algorithm that would facilitate communications from a speech-impaired person to someone who doesn't understand sign language. Training set: 1080 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (180 pictures per number). Test set: 120 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (20 pictures per number). Note that this is a subset of the SIGNS dataset. The complete dataset contains many more signs. Here are examples for each number, and how an explanation of how we represent the labels. These are the original pictures, before we lowered the image resolutoion to 64 by 64 pixels. <img src="images/hands.png" style="width:800px;height:350px;"><caption><center> <u><font color='purple'> Figure 1</u><font color='purple'>: SIGNS dataset <br> <font color='black'> </center> Run the following code to load the dataset. End of explanation """ # Example of a picture index = 0 plt.imshow(X_train_orig[index]) print ("y = " + str(np.squeeze(Y_train_orig[:, index]))) """ Explanation: Change the index below and run the cell to visualize some examples in the dataset. End of explanation """ # Flatten the training and test images X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T # Normalize image vectors X_train = X_train_flatten/255. X_test = X_test_flatten/255. # Convert training and test labels to one hot matrices Y_train = convert_to_one_hot(Y_train_orig, 6) Y_test = convert_to_one_hot(Y_test_orig, 6) print ("number of training examples = " + str(X_train.shape[1])) print ("number of test examples = " + str(X_test.shape[1])) print ("X_train shape: " + str(X_train.shape)) print ("Y_train shape: " + str(Y_train.shape)) print ("X_test shape: " + str(X_test.shape)) print ("Y_test shape: " + str(Y_test.shape)) """ Explanation: As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so. End of explanation """ # GRADED FUNCTION: create_placeholders def create_placeholders(n_x, n_y): """ Creates the placeholders for the tensorflow session. Arguments: n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288) n_y -- scalar, number of classes (from 0 to 5, so -> 6) Returns: X -- placeholder for the data input, of shape [n_x, None] and dtype "float" Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float" Tips: - You will use None because it let's us be flexible on the number of examples you will for the placeholders. In fact, the number of examples during test/train is different. """ ### START CODE HERE ### (approx. 2 lines) X = tf.placeholder(tf.float32, [n_x, None], name = "X") Y = tf.placeholder(tf.float32, [n_y, None], name = "Y") ### END CODE HERE ### return X, Y X, Y = create_placeholders(12288, 6) print ("X = " + str(X)) print ("Y = " + str(Y)) """ Explanation: Note that 12288 comes from $64 \times 64 \times 3$. Each image is square, 64 by 64 pixels, and 3 is for the RGB colors. Please make sure all these shapes make sense to you before continuing. Your goal is to build an algorithm capable of recognizing a sign with high accuracy. To do so, you are going to build a tensorflow model that is almost the same as one you have previously built in numpy for cat recognition (but now using a softmax output). It is a great occasion to compare your numpy implementation to the tensorflow one. The model is LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX. The SIGMOID output layer has been converted to a SOFTMAX. A SOFTMAX layer generalizes SIGMOID to when there are more than two classes. 2.1 - Create placeholders Your first task is to create placeholders for X and Y. This will allow you to later pass your training data in when you run your session. Exercise: Implement the function below to create the placeholders in tensorflow. End of explanation """ # GRADED FUNCTION: initialize_parameters def initialize_parameters(): """ Initializes parameters to build a neural network with tensorflow. The shapes are: W1 : [25, 12288] b1 : [25, 1] W2 : [12, 25] b2 : [12, 1] W3 : [6, 12] b3 : [6, 1] Returns: parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3 """ tf.set_random_seed(1) # so that your "random" numbers match ours ### START CODE HERE ### (approx. 6 lines of code) W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1)) b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer()) W2 = tf.get_variable("W2", [12,25], initializer = tf.contrib.layers.xavier_initializer(seed = 1)) b2 = tf.get_variable("b2", [12,1], initializer = tf.zeros_initializer()) W3 = tf.get_variable("W3", [6,12], initializer = tf.contrib.layers.xavier_initializer(seed = 1)) b3 = tf.get_variable("b3", [6,1], initializer = tf.zeros_initializer()) ### END CODE HERE ### parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2, "W3": W3, "b3": b3} return parameters tf.reset_default_graph() with tf.Session() as sess: parameters = initialize_parameters() print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) """ Explanation: Expected Output: <table> <tr> <td> **X** </td> <td> Tensor("Placeholder_1:0", shape=(12288, ?), dtype=float32) (not necessarily Placeholder_1) </td> </tr> <tr> <td> **Y** </td> <td> Tensor("Placeholder_2:0", shape=(10, ?), dtype=float32) (not necessarily Placeholder_2) </td> </tr> </table> 2.2 - Initializing the parameters Your second task is to initialize the parameters in tensorflow. Exercise: Implement the function below to initialize the parameters in tensorflow. You are going use Xavier Initialization for weights and Zero Initialization for biases. The shapes are given below. As an example, to help you, for W1 and b1 you could use: python W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1)) b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer()) Please use seed = 1 to make sure your results match ours. End of explanation """ # GRADED FUNCTION: forward_propagation def forward_propagation(X, parameters): """ Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX Arguments: X -- input dataset placeholder, of shape (input size, number of examples) parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3" the shapes are given in initialize_parameters Returns: Z3 -- the output of the last LINEAR unit """ # Retrieve the parameters from the dictionary "parameters" W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] W3 = parameters['W3'] b3 = parameters['b3'] ### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents: Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1 A1 = tf.nn.relu(Z1) # A1 = relu(Z1) Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2 A2 = tf.nn.relu(Z2) # A2 = relu(Z2) Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3 ### END CODE HERE ### return Z3 tf.reset_default_graph() with tf.Session() as sess: X, Y = create_placeholders(12288, 6) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) print("Z3 = " + str(Z3)) """ Explanation: Expected Output: <table> <tr> <td> **W1** </td> <td> < tf.Variable 'W1:0' shape=(25, 12288) dtype=float32_ref > </td> </tr> <tr> <td> **b1** </td> <td> < tf.Variable 'b1:0' shape=(25, 1) dtype=float32_ref > </td> </tr> <tr> <td> **W2** </td> <td> < tf.Variable 'W2:0' shape=(12, 25) dtype=float32_ref > </td> </tr> <tr> <td> **b2** </td> <td> < tf.Variable 'b2:0' shape=(12, 1) dtype=float32_ref > </td> </tr> </table> As expected, the parameters haven't been evaluated yet. 2.3 - Forward propagation in tensorflow You will now implement the forward propagation module in tensorflow. The function will take in a dictionary of parameters and it will complete the forward pass. The functions you will be using are: tf.add(...,...) to do an addition tf.matmul(...,...) to do a matrix multiplication tf.nn.relu(...) to apply the ReLU activation Question: Implement the forward pass of the neural network. We commented for you the numpy equivalents so that you can compare the tensorflow implementation to numpy. It is important to note that the forward propagation stops at z3. The reason is that in tensorflow the last linear layer output is given as input to the function computing the loss. Therefore, you don't need a3! End of explanation """ # GRADED FUNCTION: compute_cost def compute_cost(Z3, Y): """ Computes the cost Arguments: Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples) Y -- "true" labels vector placeholder, same shape as Z3 Returns: cost - Tensor of the cost function """ # to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...) logits = tf.transpose(Z3) labels = tf.transpose(Y) ### START CODE HERE ### (1 line of code) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)) ### END CODE HERE ### return cost tf.reset_default_graph() with tf.Session() as sess: X, Y = create_placeholders(12288, 6) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) cost = compute_cost(Z3, Y) print("cost = " + str(cost)) """ Explanation: Expected Output: <table> <tr> <td> **Z3** </td> <td> Tensor("Add_2:0", shape=(6, ?), dtype=float32) </td> </tr> </table> You may have noticed that the forward propagation doesn't output any cache. You will understand why below, when we get to brackpropagation. 2.4 Compute cost As seen before, it is very easy to compute the cost using: python tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = ..., labels = ...)) Question: Implement the cost function below. - It is important to know that the "logits" and "labels" inputs of tf.nn.softmax_cross_entropy_with_logits are expected to be of shape (number of examples, num_classes). We have thus transposed Z3 and Y for you. - Besides, tf.reduce_mean basically does the summation over the examples. End of explanation """ def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001, num_epochs = 1500, minibatch_size = 32, print_cost = True): """ Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training set, of shape (input size = 12288, number of training examples = 1080) Y_train -- test set, of shape (output size = 6, number of training examples = 1080) X_test -- training set, of shape (input size = 12288, number of training examples = 120) Y_test -- test set, of shape (output size = 6, number of test examples = 120) learning_rate -- learning rate of the optimization num_epochs -- number of epochs of the optimization loop minibatch_size -- size of a minibatch print_cost -- True to print the cost every 100 epochs Returns: parameters -- parameters learnt by the model. They can then be used to predict. """ ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables tf.set_random_seed(1) # to keep consistent results seed = 3 # to keep consistent results (n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set) n_y = Y_train.shape[0] # n_y : output size costs = [] # To keep track of the cost # Create Placeholders of shape (n_x, n_y) ### START CODE HERE ### (1 line) X, Y = create_placeholders(n_x, n_y) ### END CODE HERE ### # Initialize parameters ### START CODE HERE ### (1 line) parameters = initialize_parameters() ### END CODE HERE ### # Forward propagation: Build the forward propagation in the tensorflow graph ### START CODE HERE ### (1 line) Z3 = forward_propagation(X, parameters) ### END CODE HERE ### # Cost function: Add cost function to tensorflow graph ### START CODE HERE ### (1 line) cost = compute_cost(Z3, Y) ### END CODE HERE ### # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer. ### START CODE HERE ### (1 line) optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost) ### END CODE HERE ### # Initialize all the variables init = tf.global_variables_initializer() # Start the session to compute the tensorflow graph with tf.Session() as sess: # Run the initialization sess.run(init) # Do the training loop for epoch in range(num_epochs): epoch_cost = 0. # Defines a cost related to an epoch num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set seed = seed + 1 minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed) for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch # IMPORTANT: The line that runs the graph on a minibatch. # Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y). ### START CODE HERE ### (1 line) _ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) ### END CODE HERE ### epoch_cost += minibatch_cost / num_minibatches # Print the cost every epoch if print_cost == True and epoch % 100 == 0: print ("Cost after epoch %i: %f" % (epoch, epoch_cost)) if print_cost == True and epoch % 5 == 0: costs.append(epoch_cost) # plot the cost plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per tens)') plt.title("Learning rate =" + str(learning_rate)) plt.show() # lets save the parameters in a variable parameters = sess.run(parameters) print ("Parameters have been trained!") # Calculate the correct predictions correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train})) print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test})) return parameters """ Explanation: Expected Output: <table> <tr> <td> **cost** </td> <td> Tensor("Mean:0", shape=(), dtype=float32) </td> </tr> </table> 2.5 - Backward propagation & parameter updates This is where you become grateful to programming frameworks. All the backpropagation and the parameters update is taken care of in 1 line of code. It is very easy to incorporate this line in the model. After you compute the cost function. You will create an "optimizer" object. You have to call this object along with the cost when running the tf.session. When called, it will perform an optimization on the given cost with the chosen method and learning rate. For instance, for gradient descent the optimizer would be: python optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost) To make the optimization you would do: python _ , c = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) This computes the backpropagation by passing through the tensorflow graph in the reverse order. From cost to inputs. Note When coding, we often use _ as a "throwaway" variable to store values that we won't need to use later. Here, _ takes on the evaluated value of optimizer, which we don't need (and c takes the value of the cost variable). 2.6 - Building the model Now, you will bring it all together! Exercise: Implement the model. You will be calling the functions you had previously implemented. End of explanation """ parameters = model(X_train, Y_train, X_test, Y_test) """ Explanation: Run the following cell to train your model! On our machine it takes about 5 minutes. Your "Cost after epoch 100" should be 1.016458. If it's not, don't waste time; interrupt the training by clicking on the square (⬛) in the upper bar of the notebook, and try to correct your code. If it is the correct cost, take a break and come back in 5 minutes! End of explanation """ import scipy from PIL import Image from scipy import ndimage ## START CODE HERE ## (PUT YOUR IMAGE NAME) my_image = "thumbs_up.jpg" ## END CODE HERE ## # We preprocess your image to fit your algorithm. fname = "images/" + my_image image = np.array(ndimage.imread(fname, flatten=False)) my_image = scipy.misc.imresize(image, size=(64,64)).reshape((1, 64*64*3)).T my_image_prediction = predict(my_image, parameters) plt.imshow(image) print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction))) """ Explanation: Expected Output: <table> <tr> <td> **Train Accuracy** </td> <td> 0.999074 </td> </tr> <tr> <td> **Test Accuracy** </td> <td> 0.716667 </td> </tr> </table> Amazing, your algorithm can recognize a sign representing a figure between 0 and 5 with 71.7% accuracy. Insights: - Your model seems big enough to fit the training set well. However, given the difference between train and test accuracy, you could try to add L2 or dropout regularization to reduce overfitting. - Think about the session as a block of code to train the model. Each time you run the session on a minibatch, it trains the parameters. In total you have run the session a large number of times (1500 epochs) until you obtained well trained parameters. 2.7 - Test with your own image (optional / ungraded exercise) Congratulations on finishing this assignment. You can now take a picture of your hand and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Write your image's name in the following code 4. Run the code and check if the algorithm is right! End of explanation """
albahnsen/ML_SecurityInformatics
notebooks/10_EnsembleMethods_cont.ipynb
mit
# read in and prepare the chrun data # Download the dataset import pandas as pd import numpy as np data = pd.read_csv('../datasets/churn.csv') # Create X and y # Select only the numeric features X = data.iloc[:, [1,2,6,7,8,9,10]].astype(np.float) # Convert bools to floats X = X.join((data.iloc[:, [4,5]] == 'no').astype(np.float)) y = (data.iloc[:, -1] == 'True.').astype(np.int) X.head() y.value_counts().to_frame('count').assign(percentage = lambda x: x/x.sum()) from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) """ Explanation: 10 - Ensemble Methods - Continuation by Alejandro Correa Bahnsen version 0.2, May 2016 Part of the class Machine Learning for Security Informatics This notebook is licensed under a Creative Commons Attribution-ShareAlike 3.0 Unported License. Special thanks goes to Kevin Markham Why are we learning about ensembling? Very popular method for improving the predictive performance of machine learning models Provides a foundation for understanding more sophisticated models Part 1: Combination of classifiers - Majority Voting The most typical form of an ensemble is made by combining $T$ different base classifiers. Each base classifier $M(\mathcal{S}j)$ is trained by applying algorithm $M$ to a random subset $\mathcal{S}_j$ of the training set $\mathcal{S}$. For simplicity we define $M_j \equiv M(\mathcal{S}_j)$ for $j=1,\dots,T$, and $\mathcal{M}={M_j}{j=1}^{T}$ a set of base classifiers. Then, these models are combined using majority voting to create the ensemble $H$ as follows $$ f_{mv}(\mathcal{S},\mathcal{M}) = max_{c \in {0,1}} \sum_{j=1}^T \mathbf{1}_c(M_j(\mathcal{S})). $$ End of explanation """ n_estimators = 100 # set a seed for reproducibility np.random.seed(123) n_samples = X_train.shape[0] # create bootstrap samples (will be used to select rows from the DataFrame) samples = [np.random.choice(a=n_samples, size=n_samples, replace=True) for _ in range(n_estimators)] from sklearn.tree import DecisionTreeClassifier np.random.seed(123) seeds = np.random.randint(1, 10000, size=n_estimators) trees = {} for i in range(n_estimators): trees[i] = DecisionTreeClassifier(max_features="sqrt", max_depth=None, random_state=seeds[i]) trees[i].fit(X_train.iloc[samples[i]], y_train.iloc[samples[i]]) # Predict y_pred_df = pd.DataFrame(index=X_test.index, columns=list(range(n_estimators))) for i in range(n_estimators): y_pred_df.ix[:, i] = trees[i].predict(X_test) y_pred_df.head() """ Explanation: Create 100 decision trees End of explanation """ y_pred_df.sum(axis=1)[:10] y_pred = (y_pred_df.sum(axis=1) >= (n_estimators / 2)).astype(np.int) from sklearn import metrics metrics.f1_score(y_pred, y_test) metrics.accuracy_score(y_pred, y_test) """ Explanation: Predict using majority voting End of explanation """ from sklearn.ensemble import BaggingClassifier clf = BaggingClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=100, bootstrap=True, random_state=42, n_jobs=-1, oob_score=True) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) metrics.f1_score(y_pred, y_test), metrics.accuracy_score(y_pred, y_test) """ Explanation: Using majority voting with sklearn End of explanation """ samples_oob = [] # show the "out-of-bag" observations for each sample for sample in samples: samples_oob.append(sorted(set(range(n_samples)) - set(sample))) """ Explanation: Part 2: Combination of classifiers - Weighted Voting The majority voting approach gives the same weight to each classfier regardless of the performance of each one. Why not take into account the oob performance of each classifier First, in the traditional approach, a similar comparison of the votes of the base classifiers is made, but giving a weight $\alpha_j$ to each classifier $M_j$ during the voting phase $$ f_{wv}(\mathcal{S},\mathcal{M}, \alpha) =\max_{c \in {0,1}} \sum_{j=1}^T \alpha_j \mathbf{1}c(M_j(\mathcal{S})), $$ where $\alpha={\alpha_j}{j=1}^T$. The calculation of $\alpha_j$ is related to the performance of each classifier $M_j$. It is usually defined as the normalized misclassification error $\epsilon$ of the base classifier $M_j$ in the out of bag set $\mathcal{S}j^{oob}=\mathcal{S}-\mathcal{S}_j$ \begin{equation} \alpha_j=\frac{1-\epsilon(M_j(\mathcal{S}_j^{oob}))}{\sum{j_1=1}^T 1-\epsilon(M_{j_1}(\mathcal{S}_{j_1}^{oob}))}. \end{equation} Select each oob sample End of explanation """ errors = np.zeros(n_estimators) for i in range(n_estimators): y_pred_ = trees[i].predict(X_train.iloc[samples_oob[i]]) errors[i] = 1 - metrics.accuracy_score(y_train.iloc[samples_oob[i]], y_pred_) %matplotlib inline import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') plt.scatter(range(n_estimators), errors) plt.xlim([0, n_estimators]) plt.title('OOB error of each tree') """ Explanation: Estimate the oob error of each classifier End of explanation """ alpha = (1 - errors) / (1 - errors).sum() weighted_sum_1 = ((y_pred_df) * alpha).sum(axis=1) weighted_sum_1.head(20) y_pred = (weighted_sum_1 >= 0.5).astype(np.int) metrics.f1_score(y_pred, y_test), metrics.accuracy_score(y_pred, y_test) """ Explanation: Estimate $\alpha$ End of explanation """ clf = BaggingClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=100, bootstrap=True, random_state=42, n_jobs=-1, oob_score=True) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) metrics.f1_score(y_pred, y_test), metrics.accuracy_score(y_pred, y_test) errors = np.zeros(clf.n_estimators) y_pred_all_ = np.zeros((X_test.shape[0], clf.n_estimators)) for i in range(clf.n_estimators): oob_sample = ~clf.estimators_samples_[i] y_pred_ = clf.estimators_[i].predict(X_train.values[oob_sample]) errors[i] = metrics.accuracy_score(y_pred_, y_train.values[oob_sample]) y_pred_all_[:, i] = clf.estimators_[i].predict(X_test) alpha = (1 - errors) / (1 - errors).sum() y_pred = (np.sum(y_pred_all_ * alpha, axis=1) >= 0.5).astype(np.int) metrics.f1_score(y_pred, y_test), metrics.accuracy_score(y_pred, y_test) """ Explanation: Using Weighted voting with sklearn End of explanation """ X_train_2 = pd.DataFrame(index=X_train.index, columns=list(range(n_estimators))) for i in range(n_estimators): X_train_2[i] = trees[i].predict(X_train) X_train_2.head() from sklearn.linear_model import LogisticRegressionCV lr = LogisticRegressionCV() lr.fit(X_train_2, y_train) lr.coef_ y_pred = lr.predict(y_pred_df) metrics.f1_score(y_pred, y_test), metrics.accuracy_score(y_pred, y_test) """ Explanation: Part 3: Combination of classifiers - Stacking The staking method consists in combining the different base classifiers by learning a second level algorithm on top of them. In this framework, once the base classifiers are constructed using the training set $\mathcal{S}$, a new set is constructed where the output of the base classifiers are now considered as the features while keeping the class labels. Even though there is no restriction on which algorithm can be used as a second level learner, it is common to use a linear model, such as $$ f_s(\mathcal{S},\mathcal{M},\beta) = g \left( \sum_{j=1}^T \beta_j M_j(\mathcal{S}) \right), $$ where $\beta={\beta_j}_{j=1}^T$, and $g(\cdot)$ is the sign function $g(z)=sign(z)$ in the case of a linear regression or the sigmoid function, defined as $g(z)=1/(1+e^{-z})$, in the case of a logistic regression. Lets first get a new training set consisting of the output of every classifier End of explanation """ y_pred_all_ = np.zeros((X_test.shape[0], clf.n_estimators)) X_train_3 = np.zeros((X_train.shape[0], clf.n_estimators)) for i in range(clf.n_estimators): X_train_3[:, i] = clf.estimators_[i].predict(X_train) y_pred_all_[:, i] = clf.estimators_[i].predict(X_test) lr = LogisticRegressionCV() lr.fit(X_train_3, y_train) y_pred = lr.predict(y_pred_all_) metrics.f1_score(y_pred, y_test), metrics.accuracy_score(y_pred, y_test) """ Explanation: Using sklearn End of explanation """ dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_pred = dt.predict(X_test) metrics.f1_score(y_pred, y_test), metrics.accuracy_score(y_pred, y_test) """ Explanation: vs using only one dt End of explanation """ from IPython.display import Image Image(url= "http://vision.cs.chubu.ac.jp/wp/wp-content/uploads/2013/07/OurMethodv81.png", width=900) """ Explanation: Part 4: Boosting While boosting is not algorithmically constrained, most boosting algorithms consist of iteratively learning weak classifiers with respect to a distribution and adding them to a final strong classifier. When they are added, they are typically weighted in some way that is usually related to the weak learners' accuracy. After a weak learner is added, the data is reweighted: examples that are misclassified gain weight and examples that are classified correctly lose weight (some boosting algorithms actually decrease the weight of repeatedly misclassified examples, e.g., boost by majority and BrownBoost). Thus, future weak learners focus more on the examples that previous weak learners misclassified. (Wikipedia) End of explanation """ # read in and prepare the chrun data # Download the dataset import pandas as pd import numpy as np data = pd.read_csv('../datasets/churn.csv') # Create X and y # Select only the numeric features X = data.iloc[:, [1,2,6,7,8,9,10]].astype(np.float) # Convert bools to floats X = X.join((data.iloc[:, [4,5]] == 'no').astype(np.float)) y = (data.iloc[:, -1] == 'True.').astype(np.int) from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) n_samples = X_train.shape[0] n_estimators = 10 weights = pd.DataFrame(index=X_train.index, columns=list(range(n_estimators))) t = 0 weights[t] = 1 / n_samples """ Explanation: Adaboost AdaBoost (adaptive boosting) is an ensemble learning algorithm that can be used for classification or regression. Although AdaBoost is more resistant to overfitting than many machine learning algorithms, it is often sensitive to noisy data and outliers. AdaBoost is called adaptive because it uses multiple iterations to generate a single composite strong learner. AdaBoost creates the strong learner (a classifier that is well-correlated to the true classifier) by iteratively adding weak learners (a classifier that is only slightly correlated to the true classifier). During each round of training, a new weak learner is added to the ensemble and a weighting vector is adjusted to focus on examples that were misclassified in previous rounds. The result is a classifier that has higher accuracy than the weak learners’ classifiers. Algorithm: Initialize all weights ($w_i$) to 1 / n_samples Train a classifier $h_t$ using weights Estimate training error $e_t$ set $alpha_t = log\left(\frac{1-e_t}{e_t}\right)$ Update weights $$w_i^{t+1} = w_i^{t}e^{\left(\alpha_t \mathbf{I}\left(y_i \ne h_t(x_t)\right)\right)}$$ Repeat while $e_t<0.5$ and $t<T$ End of explanation """ from sklearn.tree import DecisionTreeClassifier trees = [] trees.append(DecisionTreeClassifier(max_depth=1)) trees[t].fit(X_train, y_train, sample_weight=weights[t].values) """ Explanation: Train the classifier End of explanation """ y_pred_ = trees[t].predict(X_train) error = [] error.append(1 - metrics.accuracy_score(y_pred_, y_train)) error[t] alpha = [] alpha.append(np.log((1 - error[t]) / error[t])) alpha[t] """ Explanation: Estimate error End of explanation """ weights[t + 1] = weights[t] filter_ = y_pred_ != y_train weights.loc[filter_, t + 1] = weights.loc[filter_, t] * np.exp(alpha[t]) """ Explanation: Update weights End of explanation """ weights[t + 1] = weights[t + 1] / weights[t + 1].sum() """ Explanation: Normalize weights End of explanation """ for t in range(1, n_estimators): trees.append(DecisionTreeClassifier(max_depth=1)) trees[t].fit(X_train, y_train, sample_weight=weights[t].values) y_pred_ = trees[t].predict(X_train) error.append(1 - metrics.accuracy_score(y_pred_, y_train)) alpha.append(np.log((1 - error[t]) / error[t])) weights[t + 1] = weights[t] filter_ = y_pred_ != y_train weights.loc[filter_, t + 1] = weights.loc[filter_, t] * np.exp(alpha[t]) weights[t + 1] = weights[t + 1] / weights[t + 1].sum() error """ Explanation: Iteration 2 - n_estimators End of explanation """ new_n_estimators = np.sum([x<0.5 for x in error]) y_pred_all = np.zeros((X_test.shape[0], new_n_estimators)) for t in range(new_n_estimators): y_pred_all[:, t] = trees[t].predict(X_test) y_pred = (np.sum(y_pred_all * alpha[:new_n_estimators], axis=1) >= 1).astype(np.int) metrics.f1_score(y_pred, y_test.values), metrics.accuracy_score(y_pred, y_test.values) """ Explanation: Create classification Only classifiers when error < 0.5 End of explanation """ from sklearn.ensemble import AdaBoostClassifier clf = AdaBoostClassifier() clf clf.fit(X_train, y_train) y_pred = clf.predict(X_test) metrics.f1_score(y_pred, y_test.values), metrics.accuracy_score(y_pred, y_test.values) """ Explanation: Using sklearn End of explanation """ from sklearn.ensemble import GradientBoostingClassifier clf = GradientBoostingClassifier() clf clf.fit(X_train, y_train) y_pred = clf.predict(X_test) metrics.f1_score(y_pred, y_test.values), metrics.accuracy_score(y_pred, y_test.values) """ Explanation: Gradient Boosting End of explanation """
Ironlors/SmartIntersection-Ger
Journal/data1.txt.ipynb
apache-2.0
#Create Lists time = [233.32,198.92,184.7,168.18,148.22,138.88,151.76,127.48,119.12,115.24,110.7,104.28,105.52,109.2,120.7401,147.027] motorTorque = [100,110,121,133.1,146.41,161.051,161.051,177.1561,194.8717,214.3589,235.7948,259.3743,285.3117,313.8429,345.2272,379.74992] print(time) print('elements in time: '+str(len(time))) print(motorTorque) print('elements in motorTorque: '+str(len(motorTorque))) """ Explanation: Vehicle Data Data1.txt Dies sind die Werte aus der Datei "data1.txt". Hierbei hatten wir einen folgende Startwerte: - MotorTorque : 100 - maxSpeed : 10 Nach jeder einzelnen Runde wurden diese beiden Werte um 10% erhöht. Die Listen sind hierbei die Endzeiten der Runden sowie der MotorTorque. Ich beachte hierbei nur eine der beiden Variablen, da diese direkt zueinander proportional sind. Zu beachten ist hier bei, dass die Werte für Torgue, maxSpeed und SteerAngle nach den Wertereihen kommen. End of explanation """ np_time = np.array(time) np_torque = np.array(motorTorque) np_2d=np.array([np_time,np_torque]) np_2d np_2d.shape plot = plt.plot(np_torque,np_time) plt.xlabel('MotorTorque') plt.ylabel('Zeit in s/Runde') plt.title('Messwerte von data1.txt') plt.show() """ Explanation: Da wir nun die entsprechenden Werte in numpy importiert haben, können wir diese nun erkunden und auswerten. Schauen wir nun erst einmal nach ein Paar Plots, welche die Datensätze beschreiben kö End of explanation """ print(np_2d[:,5:8]) """ Explanation: Wie wir sehen ist der Graph in etwa parabelförmig. Es gibt einen ungewöhnlichen Wert bei ca 160 MotorTorque. Betrachten wir diesen genauer mit den benachbarten Werten. Es handelt sich dabei um den 7. Wert. End of explanation """ plot = plt.plot(np_torque[4:8],np_time[4:8]) plt.xlabel('MotorTorque') plt.ylabel('Zeit in s/Runde') plt.title('Messwerte um Wert 7') plt.show() """ Explanation: Hierbei ist zu beachten, dass sich der Wert vom MotorTorque nicht verändert. Dies ist eine Abweichung, welche durch mich verursacht wurde, da ich dort aus Versehen das Programm gestoppt hatte, und durch das neue Starten das Fahrzeug aus den Stand startet musste. Interessant ist hierbei jedoch, das es einen erheblichen Unterschied machte, ob ein Fahrzeug bereit Geschwindigkeit aufgenommen hat, oder aus den Stand startete. Betrachten wir diesen Unterschied einmal genauer. End of explanation """ print(np_time[6]-np_time[7]) """ Explanation: So erhalten wir an dieser Stelle eine Verschiebung um End of explanation """ np_gateSpeed6 = np.array([[1,3.28],[2,7.60],[3,13.70],[4,1.74],[5,5.78],[6,3.58],[7,0.40],[8,-1.8],[9,-1.2],[10,11.53],[11,-0.95],[12,11.71],[13,3.34],[14,10.09],[15,5.95],[16,4.99],[17,4.11],[18,6.74],[19,6.30],[20,8.4]]) np_gateSpeed5 = np.array([[1,2.55],[2,5.41],[3,9.66],[4,4.04],[5,5.44],[6,10.01],[7,8.86],[8,4.41],[9,5.7],[10,9.68],[11,-2.45],[12,12.71],[13,5.9],[14,9.44],[15,5.84],[16,-0.18],[17,2.05],[18,7.73],[19,2.93],[20,4.48]]) np_gate = np_gateSpeed6[:,0] np_speed6 = np_gateSpeed6[:,1] np_speed5 = np_gateSpeed5[:,1] print(np_gate) ; print(np_speed6) ; print(np_speed5) plt.plot(np_gate,np_speed5) plt.plot(np_gate,np_speed6) plt.xlabel('Gates') plt.ylabel('Speed') plt.title('Comparison of Value 5 and 6') plt.show() """ Explanation: 24.28 Sekunden. Dies scheint allerding recht ungewöhnlich, da die entsprechenden Anfangswerte selbst bei den langsamen Werten deutlich schneller sind. Betrachten wir daher einmal die Verteilung der Zeit auf den einzelnen Gates. Da laut Messwerten die Geschwindigkeit die gleich ist, betrachten wir diese an den einzelnen Gates. Dafür fügen wir die entsprechenden Werte in eine Liste ein. Wichtig sind für uns die Indizen 5 und 6. Dabei nutzen wir ein 2D Array mit dem ersten Wert für das Gate und den zweiten Wert für die entsprechende Zeit. Dies erledigen wir für die ersten 20 Werte, da dies eine ausreichende Beispielmenge sein sollte. End of explanation """ avgGS6 = np.mean(np_gateSpeed6) avgGS5 = np.mean(np_gateSpeed5) print('Abweichung = '+str(100-(avgGS6 / avgGS5 *100))+'%') """ Explanation: Selbst wenn beide Graphen sehr starke Spitzen besitzen, in welchen sie sich stark voneinander unterscheiden, haben sie auch Strecken, in welchen sie die gleiche Geschwindigkeit hatten. Daher nutzen wir die Durschnittsgeschwindigkeit um eine endgültiges Urteil zu fassen. End of explanation """ np_coeff = (np_time/np_torque) x = np.linspace(0,16,16) plt.xlabel('rounds') plt.ylabel('$seconds / Torque$') coeff_plt = plt.plot(x,np_coeff) plt.show() print('lowest time: '+str(min(time))) print('torque: '+str(motorTorque[time.index(104.28)])) print('Index: '+str(time.index(104.28))) """ Explanation: Hier sehen wir, dass die Durchschnittsgeschwindigkeit recht nahe liegt. Die Abweichung könnte durch eine ungünstige Wahl des Messbereiches liegen. Da nur jede Sekunde gemessen wurde, kann es sein, dass an bestimmten Gates das Fahrzeug bereits schon weiter war als in der Letzten Reihe. Von daher sollte der Zeitpunkt der Messung abgepasst werden. Dies ist entweder durch eine Erhöhung der Datenauflösung möglich, also durch eine Messung in kleineren Abständen, oder durch feste Messpunkte. Diese könnten zum Beispiel beim erreichen der einzelnen Wegpunkte ausgegeben werden. Eine dritte Möglichkeit besteht in einer Kombination beider Methoden. Hierfür wäre es für die einfache lesbarkeit hilfreich 2 Dateien anzulegen. Auf der einen Seite werden die Werte ausgegeben, sobald ein Wegpunkt erreicht wurde, und in der anderen Datei werden die Daten alle 0.5s ausgegeben. Dabei würden auch in der ersten Datei in Information zu der Position wegfallen, da die Wegpunkte stets an der gleichen Stelle sind. weitere Betrachtungen Relation zwischen Torque und Time Eine weitere interessante Beobachtung, wäre die Erkundung des Verhältnisses zwischen Torque und Zeit. Hierfür nutzen wir diese Gleichung. End of explanation """
crystalzhaizhai/cs207_yi_zhai
lectures/L13/L13.ipynb
mit
import reprlib class Sentence: def __init__(self, text): self.text = text self.words = text.split() def __getitem__(self, index): return self.words[index] def __len__(self): #completes sequence protocol, but not needed for iterable return len(self.words) def __repr__(self): return 'Sentence(%s)' % reprlib.repr(self.text) # Sequence' s = Sentence("Dogs will save the world.") print(len(s), " ", s[3], " ", s) min(s), max(s) list(s) """ Explanation: Lecture 13 Monday, October 23rd 2017 Last time: Data structures motivation Abstract data types Sequences Linked lists This time: Iterators and Iterables Trees, B-trees, and BSTs From pointers to iterators One can simply follow the next pointers to the next position in a linked list. This suggests an abstraction of the position to an iterator. Such an abstraction allows us to treat arrays and linked lists with an identical interface. The salient points of this abstraction are: - The notion of a next - The notion of a first to a last We already implemented the sequence protocol. Now we suggest an additional abstraction that is more fundamental than the notion of a sequence: the iterable. Iterators and Iterables in Python Just as a sequence is something implementing __getitem__ and __len__, an iterable is something implementing __iter__. __len__ is not needed and indeed may not make sense. python len(open('fname.txt')) # File iterator has no length Example 14-1 in Fluent Python: The Sentence sequence and shows how it can be iterated upon. End of explanation """ for i in s: print(i) """ Explanation: To iterate over an object x, Python automatically calls iter(x) (i.e. x.__iter__). An iterable is something which, when iter is called on it, returns an iterator. (1) If __iter__ is defined, it is called to implement an iterator. (2) If not, __getitem__ is called starting from index 0. (3) If no __iter__ and no __getitem__ then raise a TypeError. Any Python sequence is iterable because sequences implement __getitem__. The standard sequences also implement __iter__; for future proofing you should too because (2) might be deprecated in a future version of Python. We know that for operates on iterables: End of explanation """ it = iter(s) # Build an iterator from an iterable while True: try: nextval = next(it) # Get the next item in the iterator print(nextval) except StopIteration: del it # Iterator is exhausted. Release reference and discard. break """ Explanation: What's actually going on here? End of explanation """ class SentenceIterator: # has __next__ and __iter__ def __init__(self, words): self.words = words self.index = 0 def __next__(self): try: word = self.words[self.index] except IndexError: raise StopIteration() self.index += 1 return word def __iter__(self): return self class Sentence: # An iterable b/c it has __iter__ def __init__(self, text): self.text = text self.words = text.split() def __iter__(self): return SentenceIterator(self.words) # Returns an instance of the iterator def __repr__(self): return 'Sentence(%s)' % reprlib.repr(self.text) s2 = Sentence("What is data science?") for i in s2: print(i) s2it=iter(s2) # Make the iterable an iterator print(next(s2it), "\n\n") # Get the next entry s2it2=iter(s2) # Reset the iterator print(next(s2it), " ", next(s2it2)) # Get the next entry of s2it and s2it2 """ Explanation: We can completely abstract away a sequence in favor of an iterable (i.e. we don't need to support indexing anymore) Example 14-4 in Fluent Python: End of explanation """ min(s2), max(s2) """ Explanation: While we could have implemented __next__ in Sentence itself, making it an iterator, we will run into the problem of "exhausting an iterator". The iterator above keeps state in self.index and we must be able to start anew by creating a new instance if we want to re-iterate. Thus the __iter__ in the iterable simply returns the SentenceIterator. From Fluent Python ("Sentence Take #2: A Classic Iterator"): A common cause of errors in building iterables and iterators is to confuse the two. To be clear: iterables have an __iter__ method that instantiates a new iterator every time. Iterators implement a __next__ method that returns individual items, and an __iter__ method that returns self. min() and max() also work even though we no longer satisfy the sequence protocol. min and max are pairwise comparisons and can be handled via iteration. The take home message is that in programming with these iterators we don't need either the length or indexing to work to implement many algorithms: we have abstracted these away. End of explanation """
sdss/marvin
docs/sphinx/jupyter/Shanghai_Demo_Queries.ipynb
bsd-3-clause
# Python 2/3 compatibility from __future__ import print_function, division, absolute_import # import matplolib just in case import matplotlib.pyplot as plt # this line tells the notebook to plot matplotlib static plots in the notebook itself %matplotlib inline # this line does the same thing but makes the plots interactive #%matplotlib notebook # Import the config and set to remote. Let's query MPL-5 data from marvin import config # by default the mode is set to 'auto', but let's set it explicitly to remote. config.mode = 'remote' # by default, Marvin uses the latest MPL but let's set it explicitly to MPL-5 config.setRelease('MPL-5') # By default the API will query using the Utah server, at api.sdss.org/marvin2. See the config.sasurl attribute. config.sasurl # If you are using one of the two local ngrok Marvins, you need to switch the SAS Url to one of our ngrok ids. # Uncomment out the following lines and replace the ngrokid with the provided string #ngrokid = 'ngrok_number_string' #config.switchSasUrl('local', ngrokid=ngrokid) #print(config.sasurl) # this is the Query tool from marvin.tools.query import Query """ Explanation: Queries Marvin Queries are a tool designed to remotely query the MaNGA dataset in global and local galaxy properties, and retrieve only the results you want. Let's learn the basics of how to construct a query and also test drive some of the more advanced features that are unique to the Marvin-tools version of querying. End of explanation """ # the string search condition my_search = 'z < 0.1' """ Explanation: The Marvin Query object allows you to specify a string search condition with which you want to look for results. It will construct the necessary SQL syntax for you, send it to the database at Utah using the Marvin API, and return the results. The Query accepts as a keyword argument search_filter. Let's try searching for all galaxies with a redshift < 0.1. End of explanation """ # the search condition using the full parameter name my_search = 'nsa.z < 0.1' # Let's setup the query. This will not run it automatically. q = Query(search_filter=my_search) print(q) """ Explanation: The above string search condition is a pseudo-natural language format. Natural language in that you type what you mean to say, and pseudo because it still must be formatted in the standard SQL where condition syntax. This syntax generally takes the form of parameter_name operand value. Marvin is smart enough to figure out which database table a parameter_name belongs to if and only if that name is a unique parameter name. If not you must specify the database table name along with the parameter name, in the form of table.parameter_name. Most MaNGA global properties come from the NASA-Sloan Atlas (NSA) catalog used for target selection. The database table name thus is nsa. So the full parameter_name for redshift is nsa.z. If a parameter name is not unique, then Marvin will return an error asking you to fine-tune your parameter name by using the full parameter table.parameter_name End of explanation """ # To run the query r = q.run() """ Explanation: Running the query produces a Marvin Results object (r): End of explanation """ # Print result counts print('total', r.totalcount) print('returned', r.count) """ Explanation: For number of results < 1000, Marvin will return the entire set of results. For queries that return > 1000, Marvin will paginate the results and only return the first 100, by default. (This can be modified with the limit keyword). End of explanation """ # See the raw SQL print(r.showQuery()) # See the runtime of your query. This produces a Python datetime.timedelta object showing days, seconds, microseconds print('timedelta', r.query_runtime) # See the total time in seconds print('query time in seconds:', r.query_runtime.total_seconds()) """ Explanation: It can be useful for informational and debugging purposes to see the raw SQL of your query, and your query runtime. If your query times out or crashes, the Marvin team will need these pieces of info to assess anything. End of explanation """ # Show the results. r.results[0:10] """ Explanation: Query results are stored in r.results. This is a Python list object, and be indexed like an array. Since we have 100 results, let's only look at 10 for brevity. End of explanation """ # my new search new_search = 'nsa.z < 0.1 and nsa.sersic_mass > 3e11' config.setRelease('MPL-5') q2 = Query(search_filter=new_search) r2 = q2.run() print(r2.totalcount) r2.results """ Explanation: We will learn how to use the features of our Results object a little bit later, but first let's revise our search to see how more complex search queries work. Multiple Search Criteria Let's add to our previous search to find only galaxies with M$\star$ > 3 $\times$ 10$^{11}$ M$\odot$. Let's use the Sersic profile determination for stellar mass, which is the sersic_mass parameter of the nsa table, so its full search parameter designation will be nsa.sersic_mass. Since it's unique, you can also just use sersic_mass. Adding multiple search criteria is as easy as writing it how you want it. In this case, we want to AND the two criteria. You can also OR, and NOT criteria. End of explanation """ # new search new_search = '(z<0.1 and nsa.sersic_logmass > 11.47) or (ifu.name=19* and nsa.sersic_n < 2)' q3 = Query(search_filter=new_search) r3 = q3.run() r3.results[0:5] """ Explanation: Compound Search Statements Let's say we are interested in galaxies with redshift < 0.1 and stellar mass > 3e11 or 19-fiber IFUs with an NSA sersic index < 2. We can compound multiple criteria together using parantheses. Use parantheses to help set the order of precedence. Without parantheses, the order is NOT > AND > OR. To find 19 fiber IFUs, we'll use the name parameter of the ifu table, which means the full search parameter is ifu.name. However, ifu.name returns the IFU design name, such as 1901, so we need to to set the value to 19*, which acts as a wildcard. End of explanation """ my_search = 'nsa.z < 0.1' q = Query(search_filter=my_search, return_params=['cube.ra', 'cube.dec']) r = q.run() r.results[0:5] """ Explanation: Returning Additional Parameters Often you want to run a query and return parameters that you didn't explicitly search on. For instance, you want to find galaxies below a redshift of 0.1 and would like to know their RA and DECs. This is as easy as specifying the return_params keyword option in Query with either a string (for a single parameter) or a list of strings (for multiple parameters). End of explanation """ spax_search = 'nsa.z < 0.1 and emline_gflux_ha_6564 > 30' q4 = Query(search_filter=spax_search, return_params=['emline_sew_ha_6564', 'emline_gflux_hb_4862', 'stellar_vel']) r4 = q4.run() r4.totalcount r4.query_runtime.total_seconds() """ Explanation: Local (Sub-Spaxel) Queries (... or DAP Zonal Queries) So far we have seen queries on global galaxy properties. These queries returned a list of galaxies satisfying the search criteria. We can also perform queries on spaxel regions within galaxies. Let's find all spaxels from galaxies with a redshift < 0.1 that have H-alpha emission line flux > 30. DAP properties are in a table called spaxelprop. The DAP-derived H-alpha emission line gaussian flux is called emline_gflux_ha_6564. Since this parameter is unique, you can either specify emline_gflux_ha_6564 or spaxelprop.emline_gflux_ha_6564 End of explanation """ r4.results[0:5] # We have a large number query spaxel results but from how many actual galaxies? plateifu = r4.getListOf('plateifu') print('# unique galaxies', len(set(plateifu))) print(set(plateifu)) """ Explanation: Spaxel queries will return a list of all spaxels satisfying your criteria. By default spaxel queries will return the galaxy information, and spaxel x and y. End of explanation """ # Convert to Cubes. For brevity, let's only convert only the first object. r4.convertToTool('cube', limit=1) print(r4.objects) cube = r4.objects[0] # From a cube, now we can do all things from Marvin Tools, like get a MaNGA MAPS object maps = cube.getMaps() print(maps) # get a emission line sew map em=maps.getMap('emline_sew', channel='ha_6564') # plot it em.plot() # .. and a stellar velocity map st=maps.getMap('stellar_vel') # plot it st.plot() """ Explanation: Once you have a set of query Results, you can easily convert your results into Marvin objects in your workflow. Depending on your result parameters, you can convert to Marvin Cubes, Maps, Spaxels, ModelCubes, or RSS. Let's convert our Results to Marvin Cubes. Note: Depending on the number of results, this conversion step may take a long time. Be careful! End of explanation """ # let's convert to Marvin Spaxels. Again, for brevity, let's only convert the first two. r4.convertToTool('spaxel', limit=2) print(r4.objects) # Now we can do all the Spaxel things, like plot spaxel = r4.objects[0] spaxel.spectrum.plot() """ Explanation: or since our results are from a spaxel query, we can convert to Marvin Spaxels End of explanation """ r4.toTable() r4.toFits('my_r4_results_2.fits') """ Explanation: You can also convert your query results into other formats like an Astropy Table, or FITS End of explanation """ # retrieve the list allparams = q.get_available_params() allparams """ Explanation: A note on Table and Name shortcuts In Queries you must specify a parameter_name or table.parameter_name. However to make it a bit easier, we have created table shortcuts and parameter name shortcuts for a few parameters. (more to be added..) ifu.name = ifudesign.name haflux = emline_gflux_ha_6564 g_r = nsa.elpetro_mag_g_r Retrieving Available Search Parameters There are many parameters to search with. You can retrieve a list of available parameters to query. Please note that while currently many parameters in the list can technically be queried on, they have not been thoroughly tested to work, nor may they make any sense to query on. We cannot guarantee what will happen. If you find a parameter that should be queryable and does not work, please let us know. End of explanation """
david4096/bioapi-examples
python_notebooks/1kg_metadata_service.ipynb
apache-2.0
from ga4gh.client import client c = client.HttpClient("http://1kgenomes.ga4gh.org") """ Explanation: GA4GH 1000 Genomes Metadata Service This example illustrates how to access the available datasets in a GA4GH server. Initialize client In this step we create a client object which will be used to communicate with the server. It is initialized using the URL. End of explanation """ dataset = c.search_datasets().next() print dataset data_set_id = dataset.id """ Explanation: We will continue to refer to this client object for accessing the remote server. Access the dataset Here, we issue or first API call to get a listing of datasets hosted by the server. The API call returns an iterator, which is iterated on once to get the 1kgenomes dataset. End of explanation """ dataset_via_get = c.get_dataset(dataset_id=data_set_id) print dataset_via_get """ Explanation: NOTE: We can also obtain individual datasets by knowing its id. From the above field, we use the id to obtain the dataset which belong to that dataset. End of explanation """
hunterowens/data-pipelines
chicago/chicago_permits.ipynb
mit
%matplotlib inline import datetime from datetime import date import pickle import StringIO import zipfile import luigi import requests import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.colors import Normalize, rgb2hex from matplotlib.collections import PatchCollection from mpl_toolkits.basemap import Basemap # pip install https://github.com/matplotlib/basemap/archive/v1.0.7rel.tar.gz from shapely.geometry import Point, Polygon, MultiPoint, MultiPolygon from shapely.prepared import prep # from pysal.esda.mapclassify import Natural_Breaks as nb from descartes import PolygonPatch import fiona from itertools import chain """ Explanation: Introduction This is an example of using Luigi (https://luigi.readthedocs.io/en/stable/index.html) to create a data pipeline. This is intended to be an example of using Luigi to create a data pipeline that grabs data off the web (in this case building permits and ward boundaries for the city of Chicago) and does some data cleaning and visualization. Cheers! Dave @imagingnerd Github: dmwelch Sources: Excellent blog post by Sensitive Cities: http://sensitivecities.com/so-youd-like-to-make-a-map-using-python-EN.html Hunter Owens presentation at PyData Chicago 2016: https://github.com/hunterowens/data-pipelines End of explanation """ class DownloadData(luigi.ExternalTask): """ Downloads permit data from city of Chicago """ def run(self): url = 'https://data.cityofchicago.org/api/views/ydr8-5enu/rows.csv?accessType=DOWNLOAD' response = requests.get(url) with self.output().open('w') as out_file: out_file.write(response.text) def output(self): return luigi.LocalTarget("data/permits.csv") """ Explanation: Download permit data from the city of Chicago We save the output to data/permits.csv End of explanation """ def to_float(s): default = np.nan try: r = float(s.replace('$', '')) except: return default return r def to_int(s): default = None if s == '': return default return int(s) def to_date(s): default = '01/01/1900' if s == '': s = default return datetime.datetime.strptime(s, "%m/%d/%Y") # *** Additional available headers at the end *** converter = {'ID': to_int, 'PERMIT#': str, 'PERMIT_TYPE': str, 'ISSUE_DATE': to_date, 'ESTIMATED_COST': to_float, 'AMOUNT_WAIVED': to_float, 'AMOUNT_PAID': to_float, 'TOTAL_FEE': to_float, 'STREET_NUMBER': to_int, 'STREET DIRECTION': str, 'STREET_NAME': str, 'SUFFIX': str, 'WORK_DESCRIPTION': str, 'LATITUDE': to_float, 'LONGITUDE': to_float, 'LOCATION': str, } class cleanCSV(luigi.Task): """This is our cleaning step""" def requires(self): return DownloadData() def run(self): df = pd.read_csv(self.input().open('r'), usecols=converter.keys(), converters=converter, skipinitialspace=True) df.to_csv(self.output().fn) def output(self): return luigi.LocalTarget("data/permits_clean.csv") """ Explanation: Clean the data The ESTIMATED_COST column will create Inf values for "$" so we must clean the strings. Save the cleaned data to data/permits_clean.csv End of explanation """ import shutil class DownloadWards(luigi.ExternalTask): """ Downloads ward shapefiles from city of Chicago """ def run(self): url = "https://data.cityofchicago.org/api/geospatial/sp34-6z76?method=export&format=Shapefile" response = requests.get(url) z = zipfile.ZipFile(StringIO.StringIO(response.content)) files = z.namelist() z.extractall('data/') for fname in files: shutil.move('data/' + fname, 'data/geo_export' + fname[-4:]) def output(self): return luigi.LocalTarget("data/geo_export.shp") """ Explanation: Download the ward shapefiles The response is in ZIP format, so we need to extract and return the *.shp file as the output End of explanation """ def plot(m, ldn_points, df_map, bds, sizes, title, label, output): plt.clf() fig = plt.figure() ax = fig.add_subplot(111, axisbg='w', frame_on=False) # we don't need to pass points to m() because we calculated using map_points and shapefile polygons dev = m.scatter( [geom.x for geom in ldn_points], [geom.y for geom in ldn_points], s=sizes, marker='.', lw=.25, facecolor='#33ccff', edgecolor='none', alpha=0.9, antialiased=True, label=label, zorder=3) # plot boroughs by adding the PatchCollection to the axes instance ax.add_collection(PatchCollection(df_map['patches'].values, match_original=True)) # copyright and source data info smallprint = ax.text( 1.03, 0, 'Total points: %s' % len(ldn_points), ha='right', va='bottom', size=4, color='#555555', transform=ax.transAxes) # Draw a map scale m.drawmapscale( bds[0] + 0.08, bds[1] + 0.015, bds[0], bds[1], 10., barstyle='fancy', labelstyle='simple', fillcolor1='w', fillcolor2='#555555', fontcolor='#555555', zorder=5) plt.title(title) plt.tight_layout() # this will set the image width to 722px at 100dpi fig.set_size_inches(7.22, 5.25) plt.savefig(output, dpi=500, alpha=True) # plt.show() def make_basemap(infile): with fiona.open(infile) as shp: bds = shp.bounds extra = 0.05 ll = (bds[0], bds[1]) ur = (bds[2], bds[3]) w, h = bds[2] - bds[0], bds[3] - bds[1] # Check w & h calculations assert bds[0] + w == bds[2] and bds[1] + h == bds[3], "Width or height of image not correct!" center = (bds[0] + (w / 2.0), bds[1] + (h / 2.0)) m = Basemap(projection='tmerc', lon_0=center[0], lat_0=center[1], ellps = 'WGS84', width=w * 100000 + 10000, height=h * 100000 + 10000, lat_ts=0, resolution='i', suppress_ticks=True ) m.readshapefile(infile[:-4], 'chicago', color='blue', zorder=3) # m.fillcontinents() return m, bds def data_map(m): df_map = pd.DataFrame({'poly': [Polygon(xy) for xy in m.chicago], 'ward_name': [ward['ward'] for ward in m.chicago_info]}) df_map['area_m'] = df_map['poly'].map(lambda x: x.area) df_map['area_km'] = df_map['area_m'] / 100000 # draw ward patches from polygons df_map['patches'] = df_map['poly'].map(lambda x: PolygonPatch(x, fc='#555555', ec='#787878', lw=.25, alpha=.9, zorder=4)) return df_map def point_objs(m, df, df_map): # Create Point objects in map coordinates from dataframe lon and lat values map_points = pd.Series( [Point(m(mapped_x, mapped_y)) for mapped_x, mapped_y in zip(df['LONGITUDE'], df['LATITUDE'])]) permit_points = MultiPoint(list(map_points.values)) wards_polygon = prep(MultiPolygon(list(df_map['poly'].values))) return filter(wards_polygon.contains, permit_points) """ Explanation: Convienence functions End of explanation """ class MakePermitMap(luigi.Task): def requires(self): return dict(wards=DownloadWards(), data=cleanCSV()) def run(self): m, bds = make_basemap(self.input()['wards'].fn) df = pd.read_csv(self.input()['data'].open('r')) df_map = data_map(m) ldn_points = point_objs(m, df, df_map) plot(m, ldn_points, df_map, bds, sizes=5, title="Permit Locations, Chicago", label="Permit Locations", output='data/chicago_permits.png') def output(self): return luigi.LocalTarget('data/chicago_permits.png') """ Explanation: Map Permit Distribution End of explanation """ class MakeEstimatedCostMap(luigi.Task): """ Plot the permits and scale the size by the estimated cost (relative to range)""" def requires(self): return dict(wards=DownloadWards(), data=cleanCSV()) def run(self): m, bds = make_basemap(self.input()['wards'].fn) df = pd.read_csv(self.input()['data'].open('r')) # Get the estimated costs, normalize, and scale by 5 <-- optional costs = df['ESTIMATED_COST'] costs.fillna(costs.min() * 2, inplace=True) assert not np.any([cost is np.inf for cost in costs]), "Inf in column!" # plt.hist(costs, 3000, log=True); sizes = ((costs - costs.min()) / (costs.max() - costs.min())) * 100 #scale factor df_map = data_map(m) ldn_points = point_objs(m, df, df_map) plot(m, ldn_points, df_map, bds, sizes=sizes, title="Relative Estimated Permit Cost, Chicago", label="Relative Estimated Permit Cost", output='data/chicago_rel_est_cost.png') def output(self): return luigi.LocalTarget('data/chicago_est_cost.png') """ Explanation: Map Estimated Costs End of explanation """ class MakeMaps(luigi.WrapperTask): """ RUN ALL THE PLOTS!!! """ def requires(self): yield MakePermitMap() yield MakeEstimatedCostMap() def run(self): pass """ Explanation: Run All Tasks End of explanation """ # if __name__ == '__main__': luigi.run(['MakeMaps', '--local-scheduler']) """ Explanation: Note: to run from the commandline: Export to '.py' file Run: python -m luigi chicago_permits MakeMaps --local-scheduler End of explanation """ # For reference, cost spread is exponential # plt.hist(costs, 3000, log=True); # Additional headers available... """ # PIN1, # PIN2, # PIN3, # PIN4, # PIN5, # PIN6, # PIN7, # PIN8, # PIN9, # PIN10, 'CONTRACTOR_1_TYPE, 'CONTRACTOR_1_NAME, 'CONTRACTOR_1_ADDRESS, 'CONTRACTOR_1_CITY, 'CONTRACTOR_1_STATE, 'CONTRACTOR_1_ZIPCODE, 'CONTRACTOR_1_PHONE, 'CONTRACTOR_2_TYPE, 'CONTRACTOR_2_NAME, 'CONTRACTOR_2_ADDRESS, 'CONTRACTOR_2_CITY, 'CONTRACTOR_2_STATE, 'CONTRACTOR_2_ZIPCODE, 'CONTRACTOR_2_PHONE, 'CONTRACTOR_3_TYPE, 'CONTRACTOR_3_NAME, 'CONTRACTOR_3_ADDRESS, 'CONTRACTOR_3_CITY, 'CONTRACTOR_3_STATE, 'CONTRACTOR_3_ZIPCODE, 'CONTRACTOR_3_PHONE, 'CONTRACTOR_4_TYPE, 'CONTRACTOR_4_NAME, 'CONTRACTOR_4_ADDRESS, 'CONTRACTOR_4_CITY, 'CONTRACTOR_4_STATE, 'CONTRACTOR_4_ZIPCODE, 'CONTRACTOR_4_PHONE, 'CONTRACTOR_5_TYPE, 'CONTRACTOR_5_NAME, 'CONTRACTOR_5_ADDRESS, 'CONTRACTOR_5_CITY, 'CONTRACTOR_5_STATE, 'CONTRACTOR_5_ZIPCODE, 'CONTRACTOR_5_PHONE, 'CONTRACTOR_6_TYPE, 'CONTRACTOR_6_NAME, 'CONTRACTOR_6_ADDRESS, 'CONTRACTOR_6_CITY, 'CONTRACTOR_6_STATE, 'CONTRACTOR_6_ZIPCODE, 'CONTRACTOR_6_PHONE, 'CONTRACTOR_7_TYPE, 'CONTRACTOR_7_NAME, 'CONTRACTOR_7_ADDRESS, 'CONTRACTOR_7_CITY, 'CONTRACTOR_7_STATE, 'CONTRACTOR_7_ZIPCODE, 'CONTRACTOR_7_PHONE, 'CONTRACTOR_8_TYPE, 'CONTRACTOR_8_NAME, 'CONTRACTOR_8_ADDRESS, 'CONTRACTOR_8_CITY, 'CONTRACTOR_8_STATE, 'CONTRACTOR_8_ZIPCODE, 'CONTRACTOR_8_PHONE, 'CONTRACTOR_9_TYPE, 'CONTRACTOR_9_NAME, 'CONTRACTOR_9_ADDRESS, 'CONTRACTOR_9_CITY, 'CONTRACTOR_9_STATE, 'CONTRACTOR_9_ZIPCODE, 'CONTRACTOR_9_PHONE, 'CONTRACTOR_10_TYPE, 'CONTRACTOR_10_NAME, 'CONTRACTOR_10_ADDRESS, 'CONTRACTOR_10_CITY, 'CONTRACTOR_10_STATE, 'CONTRACTOR_10_ZIPCODE, 'CONTRACTOR_10_PHONE, 'CONTRACTOR_11_TYPE, 'CONTRACTOR_11_NAME, 'CONTRACTOR_11_ADDRESS, 'CONTRACTOR_11_CITY, 'CONTRACTOR_11_STATE, 'CONTRACTOR_11_ZIPCODE, 'CONTRACTOR_11_PHONE, 'CONTRACTOR_12_TYPE, 'CONTRACTOR_12_NAME, 'CONTRACTOR_12_ADDRESS, 'CONTRACTOR_12_CITY, 'CONTRACTOR_12_STATE, 'CONTRACTOR_12_ZIPCODE, 'CONTRACTOR_12_PHONE, 'CONTRACTOR_13_TYPE, 'CONTRACTOR_13_NAME, 'CONTRACTOR_13_ADDRESS, 'CONTRACTOR_13_CITY, 'CONTRACTOR_13_STATE, 'CONTRACTOR_13_ZIPCODE, 'CONTRACTOR_13_PHONE, 'CONTRACTOR_14_TYPE, 'CONTRACTOR_14_NAME, 'CONTRACTOR_14_ADDRESS, 'CONTRACTOR_14_CITY, 'CONTRACTOR_14_STATE, 'CONTRACTOR_14_ZIPCODE, 'CONTRACTOR_14_PHONE, 'CONTRACTOR_15_TYPE, 'CONTRACTOR_15_NAME, 'CONTRACTOR_15_ADDRESS, 'CONTRACTOR_15_CITY, 'CONTRACTOR_15_STATE, 'CONTRACTOR_15_ZIPCODE, 'CONTRACTOR_15_PHONE, """ """ Explanation: Miscellaneous notes The estimated cost spread is predictably non-linear, so further direction could be to filter out the "$0" as unestimated (which they likely are)! Suggested work Map estimated costs overlaid with actual cost Map permits by # of contractors involved and cost Plot estimated cost accuracy based on contractor count Map permits by contractors Chloropleth maps by permit count, cost, etc. Include population density (Census data) or distance from major routes Etc... End of explanation """
root-mirror/training
OldSummerStudentsCourse/2016/notebooks/FillHistogram_Example_py.ipynb
gpl-2.0
import ROOT """ Explanation: Access TTree in Python using PyROOT and fill a histogram <hr style="border-top-width: 4px; border-top-color: #34609b;"> First import the ROOT Python module. End of explanation """ %jsroot on """ Explanation: Optional: activate the JavaScript visualisation to produce interactive plots. End of explanation """ f = ROOT.TFile.Open("http://indico.cern.ch/event/395198/material/0/0.root"); """ Explanation: Open a file which is located on the web. No type is to be specified for "f". End of explanation """ h = ROOT.TH1F("TracksPt","Tracks;Pt [GeV/c];#",128,0,64) for event in f.events: for track in event.tracks: h.Fill(track.Pt()) c = ROOT.TCanvas() h.Draw() c.Draw() """ Explanation: Loop over the TTree called "events" in the file. It is accessed with the dot operator. Same holds for the access to the branches: no need to set them up - they are just accessed by name, again with the dot operator. End of explanation """
opensanca/trilha-python
04-python-prat/data_science/Python and Data Science.ipynb
mit
import pandas as pd import matplotlib %matplotlib inline """ Explanation: Python and Data Science Mariana Lopes 28/07/2016 Trabalhando com o Jupyter Ferramenta que permite criação de código, visualização de resultados e documentação no mesmo documento (.ipynb) Modo de comando: esc para ativar, o cursor fica inativo Modo de edição: enter para ativar, modo de inserção Atalhos do teclado (MUITO úteis) Para usar os atalhos descritos abaixo a célula deve estar selecionada porém não pode estar no modo de edição. Para entrar do modo de comando: esc Criar nova célula abaixo: b (elow) Criar nova célula acima: a (bove) Recortar uma célula: x Copiar uma célula: c Colar uma cálula: v Executar uma célula e permanecer nela mesma: ctrl + enter Executar uma célula e mover para a próxima: shift + enter Para ver todos os atalhos, tecle h Tipos de célula Code: Para código Python Markdown: Para documentação Também existem Raw NBConverter e Heading Pandas (http://pandas.pydata.org/) Biblioteca Python para análise de dados Provê ferramentas de alta performance e fácil usabilidade para análise de dados Como instalar Anaconda (http://pandas.pydata.org/pandas-docs/stable/install.html#installing-pandas-with-anaconda) Download anaconda: https://www.continuum.io/downloads Instalar Anaconda: https://docs.continuum.io/anaconda/install Disponível para osx-64, linux-64, linux-32, win-64, win-32 e Python 2.7, Python 3.4, e Python 3.5 conda install pandas Pip pip install pandas Matplotlib (http://matplotlib.org/) Biblioteca Python para plotar gráficos 2D Como instalar Anaconda (http://pandas.pydata.org/pandas-docs/stable/install.html#installing-pandas-with-anaconda) Download anaconda: https://www.continuum.io/downloads Instalar Anaconda: https://docs.continuum.io/anaconda/install Disponível para osx-64, linux-64, linux-32, win-64, win-32 e Python 2.7, Python 3.4, e Python 3.5 conda install matplotlib Pip pip install matplotlib End of explanation """ %%time cast = pd.DataFrame.from_csv('data/cast.csv', index_col=None, encoding='utf-8') """ Explanation: Carregando um arquivo csv em um DataFrame do Pandas pd.DataFrame.from_csv(file_name) Se, ao usar este comando, você se deparar com um UnicodeDecodingError, adicione o parâmetro encoding='utf-8' cast.csv End of explanation """ %%time release_dates = pd.read_csv('data/release_dates.csv', index_col=None, parse_dates=['date'], infer_datetime_format=True) """ Explanation: release_dates.csv End of explanation """ cast.columns titles = cast[['title', 'year']].drop_duplicates().reset_index(drop=True) titles.head() """ Explanation: titles End of explanation """ cast.head() release_dates.head() """ Explanation: df.head(n): * Visualizar as primeiras n linhas. * Default: n = 5. End of explanation """ cast.tail() release_dates.tail() """ Explanation: df.tail(n): * Visualizar as últimas n linhas. * Default: n = 5. End of explanation """ len(cast), len(release_dates) """ Explanation: Quantos registros há no conjunto? len(df): * Tamanho do df End of explanation """ cast['type'] cast.type.head() c = 'type' cast[c].head() #cast.c.head() não vai funcionar! """ Explanation: Quais são os possíveis valores para a coluna type? df[col]: * Visualizar uma coluna do df ou df.col: * Se o nome da coluna não tiver, espaços, caracteres especiais ou for uma variável Obs: Ao selecionar uma coluna e manipulá-la fora de um DataFrame, a mesma é tratada como uma Série. End of explanation """ cast['type'].unique() """ Explanation: df[col].unique(): * Mostrar os possíveis valores de uma coluna End of explanation """ cast['type'].value_counts() """ Explanation: Quantos atores e quantas atrizes há no conjunto? df[col].value_counts(): * Contagem de quantos registros há para cada valor possível da coluna col (somente se col for categórica) End of explanation """ h = cast.head() h """ Explanation: Operações com colunas End of explanation """ h.year // 10 * 10 # Década h """ Explanation: Operações Aritméticas End of explanation """ h.year > 2000 """ Explanation: Comparações End of explanation """ cast[cast.character == 'Macduff Child'] """ Explanation: Filtrar Por valor específico de uma coluna End of explanation """ h[['title', 'year']] """ Explanation: Por colunas End of explanation """ h[h.n.isnull()] h[h.n.notnull()] h """ Explanation: Por valor nulo ou não nulo End of explanation """ h[[True, False, True, False, False]] h.year > 2000 h[h.year > 2000] h[(h.year > 2000) & (h.year < 2016)] # & para 'and', | para 'or' """ Explanation: Por vetor de booleanos End of explanation """ h.fillna(0) """ Explanation: Preencher valores nulos Por DataFrame End of explanation """ h.n.fillna(0) """ Explanation: Por coluna End of explanation """ cast.year.value_counts()#.head(10) cast.year.value_counts().plot() cast.year.value_counts().sort_index()#.head() cast.year.value_counts().sort_index().plot() bins = pd.np.arange(1880, 2040, 2) cast.year.hist(bins=bins) """ Explanation: Quantos atores atuaram em cada ano? End of explanation """ g = cast.groupby([cast.year // 10 * 10, 'type']).size() g u = g.unstack() u a = u['actor'] - u['actress'] a a.plot() """ Explanation: Qual foi a diferença entre o número de atores e atrizes que atuaram em cada década? End of explanation """ release_dates.head() release_dates.date.dt.year.head() release_dates.date.dt.dayofyear.head() # segunda=0, domingo=6 """ Explanation: Datas End of explanation """ len(release_dates[release_dates.date.dt.dayofweek == 4])*100/len(release_dates) """ Explanation: Quanto % dos filmes foram lançados na sexta-feira? End of explanation """ cast.head() release_dates.head() c = cast[cast.name == 'Ellen Page'] c = c.merge(release_dates) c.head() """ Explanation: Merge End of explanation """ titles.sort_values('year').head(1) """ Explanation: Qual o nome e ano do filme mais antigo? End of explanation """ len(titles[titles.year == 1960]) """ Explanation: Quantos filmes são de 1960? End of explanation """ for y in range(1970, 1980): print(y, (titles.year == y).sum()) titles[titles.year // 10 == 197].year.value_counts().sort_index() titles.groupby('year').size().loc[1970:1979] """ Explanation: Quantos filmes são de cada ano dos anos 70? End of explanation """ birth = 1990 len(titles[(titles.year >= birth) & (titles.year <= 2016)]) """ Explanation: Quantos filmes foram lançados desde o ano que você nasceu até hoje? End of explanation """ titles[titles.year <= 1906][['title']] titles.year.min() titles.set_index('year').sort_index().loc[1894:1906] """ Explanation: Quais são os nomes dos filmes até 1906? End of explanation """ titles.title.value_counts().head(15) """ Explanation: Quais são os 15 nomes de filmes mais comuns? End of explanation """ len(cast[cast.name == 'Judi Dench']) """ Explanation: Em quantos filmes Judi Dench atuou? End of explanation """ c = cast c = c[c.name == 'Judi Dench'] c = c[c.n == 1] c.sort_values('year') c =cast c = c[c.name == 'Judi Dench'] c """ Explanation: Liste os filmes nos quais Judi Dench atuou como o ator número 1, ordenado por ano. End of explanation """ c = cast c = c[c.title == 'Sleuth'] c = c[c.year == 1972] c.sort_values('n') """ Explanation: Liste os atores da versão de 1972 de Sleuth pela ordem do rank n. End of explanation """ cast[cast.year == 1985].name.value_counts().head(10) """ Explanation: Quais atores mais atuaram em 1985? End of explanation """ from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import confusion_matrix from sklearn.cross_validation import train_test_split import pickle import time time1=time.strftime('%Y-%m-%d_%H-%M-%S') """ Explanation: SciKit Learn (http://scikit-learn.org) Biblioteca Python para mineração e análise de dados Como instalar Anaconda (http://pandas.pydata.org/pandas-docs/stable/install.html#installing-pandas-with-anaconda) Download anaconda: https://www.continuum.io/downloads Instalar Anaconda: https://docs.continuum.io/anaconda/install Disponível para osx-64, linux-64, linux-32, win-64, win-32 e Python 2.7, Python 3.4, e Python 3.5 conda install scikit-learn Pip pip install -U scikit-learn End of explanation """ iris = pd.DataFrame.from_csv('iris.csv', index_col=None, encoding='utf-8') iris.columns target_data = iris['species'] features = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width'] feature_data = iris[features] features_train, features_test, target_train, target_test = train_test_split(feature_data, target_data, test_size=0.33, random_state=42) """ Explanation: iris.csv End of explanation """ dt = DecisionTreeClassifier() target_data = iris['species'] features = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width'] feature_data = iris[features] dt = dt.fit(features_train, target_train) """ Explanation: Treinar modelo de Árvore de Decisão End of explanation """ with open('iris-dt_'+time1, 'bw') as f: pickle.dump(dt, f) """ Explanation: Salvar modelo End of explanation """ with open('iris-dt_'+time1, 'br') as f: pickle.load(f) """ Explanation: Carregar modelo End of explanation """ predictions = dt.predict(features_test) confusion_matrix(target_test, predictions) """ Explanation: Predição para casos de teste End of explanation """
Upward-Spiral-Science/team1
code/ScrapingImageData_Jay.ipynb
apache-2.0
import matplotlib.pyplot as plt %matplotlib inline import numpy as np import urllib2 from __future__ import division plt.style.use('ggplot') np.random.seed(1) url = ('https://raw.githubusercontent.com/Upward-Spiral-Science' '/data/master/syn-density/output.csv') data = urllib2.urlopen(url) csv = np.genfromtxt(data, delimiter=",",dtype='int')[1:] # don't want first row (labels) data = csv print data[::1000] sizes = [len(np.unique(data[:, i])) for i in range(3)] ranges = [(np.max(data[:, i]), np.min(data[:,i])) for i in range(3)] ranges_diff = [np.max(data[:, i])-np.min(data[:,i]) for i in range(3)] print np.max(data[:,3]) print sizes print ranges print ranges_diff for i, ax in zip(range(3), ['x', 'y', 'z']): print ax + '-axis: ' print 'unique bins (in data): ', np.unique(data[:, i]).size print np.unique(data[:, i]) print """ Explanation: Working with image data from viz.neurodata preliminary End of explanation """ xPix = 135424 yPix = 119808 xPixPerBin = xPix/108.0 yPixPerBin = yPix/86.0 print xPixPerBin, yPixPerBin # Now since each bin is 40 data coordinates we can define the following function to convert from coordinates to pixels. def coords_to_px(xcoord, ycoord): c_vec = np.array([xcoord, ycoord], dtype='float') c_vec /= 39.0 return (c_vec[0]*xPixPerBin, c_vec[1]*yPixPerBin) # check that max coordinate values are close to max pixels print coords_to_px(4192, 3358) # how big is a bin? (just a sanity check, should obviously # be identical to xPixPerBin and yPixPerBin) print coords_to_px(39.0, 39.0) """ Explanation: Using image data Where's the origin (xy-plane)? See blue marker (y axis points down) <img style="width:300px" src="origin.png"> converting from data units (cx, cy, cz) to pixels From the data given, it seems that in the x-direction, we have 108 bins, and in the y-direction, 86. Further support for this claim: 108/85 ~= 450/350; 450x350 the stated dimensions (µm) in Bock 2011). End of explanation """ def get_tilenums_at_res(xcoord, ycoord, res): x, y = coords_to_px(xcoord, ycoord) x = np.floor(float(x)/(512*(2**res))) y = np.floor(float(y)/(512*(2**res))) return x,y def get_image_url1(xcoord, ycoord, res, z): x, y = get_tilenums_at_res(xcoord, ycoord, res) x = int(x) y = int(y) end = '/'+reduce(lambda x, y: str(x) +'_'+str(y), [y, x, res]) end += '.png' imgurl = 'http://openconnecto.me/ocp/catmaid/bock11/image/xy/'+str(z) +end return imgurl print get_image_url1(2000, 2000, 0, 2917) """ Explanation: Grabbing images from website Looking through the JavaScript code on viz.neurodata.io, we see that its tiling with 512x512 .png images. At resolution 0, each image has 512x512 "pixels" (correspdonding to the brain images) in it. This can be seen b/c total tiles in the x_direction is 264, and 264x512=135168, approximately our max pixels, and for y-direction, max tiles is 233, 233x512=119296, approximately the max pixels. Similarly for resolution 1, each 512x512 pmg now holds 1024x1024 "pixels", and so on as resolution goes up. Also note that at resolution 1, a single tile is very close in size to a single bin (looking at the second line of output of the previous code block, we see that bins are slightly larger). End of explanation """ print (2917-4156)*-1 """ Explanation: Now just need to figure out z-axis. The z values in the image data go from 2917-4156, which is a range of End of explanation """ # NOTE: you can copy just this block into your notebook to use the get_image_url() function xPix = 135424 yPix = 119808 xPixPerBin = xPix/108.0 yPixPerBin = yPix/86.0 max_tiles_x = 264 # found via inspection of viz html/JS code max_tiles_y = 233 # found via inspection of viz html/JS code def coords_to_px(xcoord, ycoord): c_vec = np.array([xcoord, ycoord], dtype='float') c_vec /= 39.0 return (c_vec[0]*xPixPerBin, c_vec[1]*yPixPerBin) def get_tilenums_at_res(xcoord, ycoord, res): x, y = coords_to_px(xcoord, ycoord) if(res == 0): x = np.round(float(x)/512) y = np.round(float(y)/512) else: x = np.round(float(x)/(512*(2**res))) y = np.round(float(y)/(512*(2**res))) return x,y def get_image_url(xcoord, ycoord, zcoord, res=1): """ params: - xcoord, ycoord, zcoord all in terms of coordinates in original data file - res = image resolution, default = 1 since 1024x1024 pixels, approx. the size of a bin--i think returns: (string) url of image """ zcoord += 2917 z = int(zcoord) x, y = get_tilenums_at_res(int(xcoord), int(ycoord), res) x = int(x) y = int(y) if(x > max_tiles_x//(2**res)): x = max_tiles_x//(2**res) if(y > max_tiles_y//(2**res)): y = max_tiles_y//(2**res) end = '/' + reduce(lambda x, y: str(x) +'_'+str(y), [y, x, res]) end += '.png' imgurl = 'http://openconnecto.me/ocp/catmaid/bock11/image/xy/' + str(z) return imgurl+end """ Explanation: So it seems that the z-values in the data correspond approximately to the z-values in the image data, other than a translation of 2917. So let's redefine our function, and put it in one code block so its easy for other people to use... End of explanation """ from IPython.display import Image, HTML, display disp_dim = {'width': 200, 'height': 200} # just for quickly setting image width/height m = np.max(data[:, -1]) a = np.where(data[:, -1]==m) args = list(*data[a, (0, 1, 2)])+[0] imgs = [] for r in range(3): args[-1] = r u = get_image_url(*args) print u imgs.append(Image(url=u, **disp_dim)) display(*imgs) """ Explanation: Testing the image scraper w/ some exploratory questions End of explanation """ dens_data = np.copy(data).astype(float) dens_data = dens_data[np.where(dens_data[:,3] != 0)] dens_data[:, 3] = dens_data[:, 4]/dens_data[:, 3] dens_data = dens_data[:,:-1] print np.average(dens_data[:,-1]) a = np.argsort(dens_data[:, -1]) urlsMin, urlsMax = zip(*[(get_image_url(*dens_data[a[i],:-1]), get_image_url(*dens_data[a[-1-i],:-1])) for i in range(9)]) tagsMin = ''.join(["<img style='width: 80px; margin: 0px; padding-right: 3px; float: left;' src='%s' />" % str(u) for u in urlsMin ]) tagsMin += '<br> <br>' tagsMax = ''.join(["<img style='width: 80px; margin: 0px; padding-right: 3px; float: left;' src='%s' />" % str(u) for u in urlsMax ]) display(HTML(tagsMin)) display(HTML(tagsMax)) """ Explanation: The above should be images of the bin where maximal number of synapses occured. Note changes in resolution let us zoom in and out. What do high density and low density regions look like? End of explanation """ # get the clean data x_bounds = (409, 3529) y_bounds = (1564, 3124) def check_in_bounds(row, x_bounds, y_bounds): if row[0] < x_bounds[0] or row[0] > x_bounds[1]: return False if row[1] < y_bounds[0] or row[1] > y_bounds[1]: return False if row[3] == 0: return False return True indices_in_bound, = np.where(np.apply_along_axis(check_in_bounds, 1, csv, x_bounds, y_bounds)) data_clean = csv[indices_in_bound] dens_data = np.copy(data_clean).astype(float) dens_data = dens_data[np.where(dens_data[:,3] != 0)] dens_data[:, 3] = dens_data[:, 4]/dens_data[:, 3] dens_data = dens_data[:,:-1] print np.average(dens_data[:,-1]) a = np.argsort(dens_data[:, -1]) urlsMin, urlsMax = zip(*[(get_image_url(*dens_data[a[i],:-1]), get_image_url(*dens_data[a[-1-i],:-1])) for i in range(9)]) tagsMin = ''.join(["<img style='width: 80px; margin: 0px; padding-right: 3px; float: left;' src='%s' />" % str(u) for u in urlsMin ]) tagsMin += '<br> <br>' tagsMax = ''.join(["<img style='width: 80px; margin: 0px; padding-right: 3px; float: left;' src='%s' />" % str(u) for u in urlsMax ]) display(HTML(tagsMin)) display(HTML(tagsMax)) """ Explanation: Black regions most likely are masked regions, thus it is actually not surprising too see large amounts of masked for both high and low density areas (since low unmasked increases density, but at the same time lowers synaptic probability). Furthermore, note that the data is binned across many z-slices, while here, we are only looking at one z-slice at a time, thus it is plausible for a high density bin to have an entire slice masked. This also indicates that it would be beneficial to write a function that computes pixel-wise average across z-slices for a bin and returns the corresponding image. We can also only look at the more 'cleaned' data, as many boundary points are likely to be picked up here. End of explanation """ from itertools import count avg_unmasked = np.average(data[:,3]) high_unmasked = data[np.where(data[:,3] > avg_unmasked)] low_synapses = [] for s in count(): low_synapses = high_unmasked[np.where(high_unmasked[:,-1]==s)] if low_synapses.size > 0: print s break d_low = low_synapses[0] print d_low imgs = [] imgs.append(Image(url=get_image_url(*d_low[:3]), **disp_dim)) max_s = np.max(data[:, 4]) print max_s high_synapses = [] for s in range(max_s, 0, -1): high_synapses = high_unmasked[np.where(high_unmasked[:,-1]==s)] if high_unmasked.size > 0: print s break d_high = high_synapses[0] print d_high imgs.append(Image(url=get_image_url(*d_high[:3]), **disp_dim)) display(*imgs) # zoom in a resolution display(Image(url=get_image_url(*d_low[:3], res=0), **disp_dim), Image(url=get_image_url(*d_high[:3], res=0), **disp_dim)) """ Explanation: How about regions with high unmasked, and low synapses, and high unmasked with high synapses? End of explanation """ # fairly arbitrarily, look at midpoint for x and z midx, midz = [np.median(np.unique(data[:, i])) for i in [0,2]] y = np.min(data[:, 1]) print midx, y, midz Image(url=get_image_url(midx, y, midz, 2), **disp_dim) # nothing apparently notable, lets view across entire x-axis from itertools import groupby urls = [k for k,_ in groupby( [get_image_url(x, y, midz, 3) for x in np.sort(np.unique(data[:, 0]))])] imgTags = ''.join( ["<img style='width: 20px; margin: 0px; padding-bottom: 3px; float: left;' src='%s' />" % str(u) for u in urls ]) display(HTML(imgTags)) # y value below cutoff urls = [k for k,_ in groupby( [get_image_url(x, 39*10, midz, 3) for x in np.sort(np.unique(data[:, 0]))])] imgTags = ''.join( ["<img style='width: 20px; margin: 0px; padding-bottom: 3px; float: left;' src='%s' />" % str(u) for u in urls ]) display(HTML(imgTags)) """ Explanation: A significant number of the bins were cut off below a given threshold on the y-axis before the data was given to us... What does that line look like? Since it appears that the coordinates in our data correspond to the center of a bin, we can see that there was no cut applied on the x-axis, since data starts at 19 and bin "length" is 40, and similarly no cut along z since data starts at 55 and bin "depth" is 110. But along the y-axis data starts at 1369; floor(1369/40)=34, meaning the first 34 bins along the y-axis was cut across all data. End of explanation """ # what bins along this y value have unmasked = 0? for row in data[np.where(data[:, 1] == y)]: if row[3] == 0: print row print np.average(data[np.where(data[:, 1] == y+39*2), 3]) # do same thing for z = 1165, since we observe this is where all the unmasked = 0 bins occur urls = [k for k,_ in groupby( [get_image_url(x, y, 1165, 3) for x in np.sort(np.unique(data[:, 0]))])] imgTags = ''.join( ["<img style='width: 20px; margin: 0px; padding-bottom: 3px; float: left;' src='%s' />" % str(u) for u in urls ]) display(HTML(imgTags)) """ Explanation: First row shows where the data was sliced, second is somewhere before it was sliced (that is, data not included in the set)... Since these black regions probably correspond to regions that are heavily masked, perhaps this is why data split here? End of explanation """ a, = np.where(data[:, 3] == 0) # first unmasked = 0 args = list(data[a[0], (0, 1, 2)]) u = get_image_url(*args) print u Image(url=u, **disp_dim) # middle one args = list(data[a[a.size//2], (0, 1, 2)]) u = get_image_url(*args) print u Image(url=u, **disp_dim) # zoom out middle args += [7] u = get_image_url(*args) print u Image(url=u, **disp_dim) """ Explanation: Let's confirm that black regions do infact correspond to low unmasked. What's unmasked = 0 look like? End of explanation """ # last args = list(data[a[-1], (0, 1, 2)]) u = get_image_url(*args) print u Image(url=u, **disp_dim) """ Explanation: Looks like regions where unmasked = 0 are corresponding to edges.. End of explanation """
nkoep/pymanopt
examples/MoG.ipynb
bsd-3-clause
import autograd.numpy as np np.set_printoptions(precision=2) import matplotlib.pyplot as plt %matplotlib inline # Number of data points N = 1000 # Dimension of each data point D = 2 # Number of clusters K = 3 pi = [0.1, 0.6, 0.3] mu = [np.array([-4, 1]), np.array([0, 0]), np.array([2, -1])] Sigma = [np.array([[3, 0],[0, 1]]), np.array([[1, 1.], [1, 3]]), 0.5 * np.eye(2)] components = np.random.choice(K, size=N, p=pi) samples = np.zeros((N, D)) # For each component, generate all needed samples for k in range(K): # indices of current component in X indices = k == components # number of those occurrences n_k = indices.sum() if n_k > 0: samples[indices, :] = np.random.multivariate_normal(mu[k], Sigma[k], n_k) colors = ['r', 'g', 'b', 'c', 'm'] for k in range(K): indices = k == components plt.scatter(samples[indices, 0], samples[indices, 1], alpha=0.4, color=colors[k%K]) plt.axis('equal') plt.show() """ Explanation: Riemannian Optimisation with Pymanopt for Inference in MoG models The Mixture of Gaussians (MoG) model assumes that datapoints $\mathbf{x}_i\in\mathbb{R}^d$ follow a distribution described by the following probability density function: $p(\mathbf{x}) = \sum_{m=1}^M \pi_m p_\mathcal{N}(\mathbf{x};\mathbf{\mu}m,\mathbf{\Sigma}_m)$ where $\pi_m$ is the probability that the data point belongs to the $m^\text{th}$ mixture component and $p\mathcal{N}(\mathbf{x};\mathbf{\mu}_m,\mathbf{\Sigma}_m)$ is the probability density function of a multivariate Gaussian distribution with mean $\mathbf{\mu}_m \in \mathbb{R}^d$ and psd covariance matrix $\mathbf{\Sigma}_m \in {\mathbf{M}\in\mathbb{R}^{d\times d}: \mathbf{M}\succeq 0}$. As an example consider the mixture of three Gaussians with means $\mathbf{\mu}_1 = \begin{bmatrix} -4 \ 1 \end{bmatrix}$, $\mathbf{\mu}_2 = \begin{bmatrix} 0 \ 0 \end{bmatrix}$ and $\mathbf{\mu}_3 = \begin{bmatrix} 2 \ -1 \end{bmatrix}$, covariances $\mathbf{\Sigma}_1 = \begin{bmatrix} 3 & 0 \ 0 & 1 \end{bmatrix}$, $\mathbf{\Sigma}_2 = \begin{bmatrix} 1 & 1 \ 1 & 3 \end{bmatrix}$ and $\mathbf{\Sigma}_3 = \begin{bmatrix} 0.5 & 0 \ 0 & 0.5 \end{bmatrix}$ and mixture probability vector $\boldsymbol{\pi}=\left[0.1, 0.6, 0.3\right]^\top$. Let's generate $N=1000$ samples of that MoG model and scatter plot the samples: End of explanation """ import sys sys.path.insert(0,"..") import autograd.numpy as np from autograd.scipy.special import logsumexp from pymanopt.manifolds import Product, Euclidean, SymmetricPositiveDefinite from pymanopt import Problem from pymanopt.solvers import SteepestDescent # (1) Instantiate the manifold manifold = Product([SymmetricPositiveDefinite(D+1, k=K), Euclidean(K-1)]) # (2) Define cost function # The parameters must be contained in a list theta. def cost(theta): # Unpack parameters nu = np.concatenate([theta[1], [0]], axis=0) S = theta[0] logdetS = np.expand_dims(np.linalg.slogdet(S)[1], 1) y = np.concatenate([samples.T, np.ones((1, N))], axis=0) # Calculate log_q y = np.expand_dims(y, 0) # 'Probability' of y belonging to each cluster log_q = -0.5 * (np.sum(y * np.linalg.solve(S, y), axis=1) + logdetS) alpha = np.exp(nu) alpha = alpha / np.sum(alpha) alpha = np.expand_dims(alpha, 1) loglikvec = logsumexp(np.log(alpha) + log_q, axis=0) return -np.sum(loglikvec) problem = Problem(manifold=manifold, cost=cost, verbosity=1) # (3) Instantiate a Pymanopt solver solver = SteepestDescent() # let Pymanopt do the rest Xopt = solver.solve(problem) """ Explanation: Given a data sample the de facto standard method to infer the parameters is the expectation maximisation (EM) algorithm that, in alternating so-called E and M steps, maximises the log-likelihood of the data. In arXiv:1506.07677 Hosseini and Sra propose Riemannian optimisation as a powerful counterpart to EM. Importantly, they introduce a reparameterisation that leaves local optima of the log-likelihood unchanged while resulting in a geodesically convex optimisation problem over a product manifold $\prod_{m=1}^M\mathcal{PD}^{(d+1)\times(d+1)}$ of manifolds of $(d+1)\times(d+1)$ symmetric positive definite matrices. The proposed method is on par with EM and shows less variability in running times. The reparameterised optimisation problem for augmented data points $\mathbf{y}_i=[\mathbf{x}_i^\top, 1]^\top$ can be stated as follows: $$\min_{(\mathbf{S}1, ..., \mathbf{S}_m, \boldsymbol{\nu}) \in \mathcal{D}} -\sum{n=1}^N\log\left( \sum_{m=1}^M \frac{\exp(\nu_m)}{\sum_{k=1}^M\exp(\nu_k)} q_\mathcal{N}(\mathbf{y}_n;\mathbf{S}_m) \right)$$ where $\mathcal{D} := \left(\prod_{m=1}^M \mathcal{PD}^{(d+1)\times(d+1)}\right)\times\mathbb{R}^{M-1}$ is the search space $\mathcal{PD}^{(d+1)\times(d+1)}$ is the manifold of symmetric positive definite $(d+1)\times(d+1)$ matrices $\mathcal{\nu}_m = \log\left(\frac{\alpha_m}{\alpha_M}\right), \ m=1, ..., M-1$ and $\nu_M=0$ $q_\mathcal{N}(\mathbf{y}_n;\mathbf{S}_m) = 2\pi\exp\left(\frac{1}{2}\right) |\operatorname{det}(\mathbf{S}_m)|^{-\frac{1}{2}}(2\pi)^{-\frac{d+1}{2}} \exp\left(-\frac{1}{2}\mathbf{y}_i^\top\mathbf{S}_m^{-1}\mathbf{y}_i\right)$ Optimisation problems like this can easily be solved using Pymanopt – even without the need to differentiate the cost function manually! So let's infer the parameters of our toy example by Riemannian optimisation using Pymanopt: End of explanation """ mu1hat = Xopt[0][0][0:2,2:3] Sigma1hat = Xopt[0][0][:2, :2] - mu1hat.dot(mu1hat.T) mu2hat = Xopt[0][1][0:2,2:3] Sigma2hat = Xopt[0][1][:2, :2] - mu2hat.dot(mu2hat.T) mu3hat = Xopt[0][2][0:2,2:3] Sigma3hat = Xopt[0][2][:2, :2] - mu3hat.dot(mu3hat.T) pihat = np.exp(np.concatenate([Xopt[1], [0]], axis=0)) pihat = pihat / np.sum(pihat) """ Explanation: Once Pymanopt has finished the optimisation we can obtain the inferred parameters as follows: End of explanation """ print(mu[0]) print(Sigma[0]) print(mu[1]) print(Sigma[1]) print(mu[2]) print(Sigma[2]) print(pi[0]) print(pi[1]) print(pi[2]) """ Explanation: And convince ourselves that the inferred parameters are close to the ground truth parameters. The ground truth parameters $\mathbf{\mu}_1, \mathbf{\Sigma}_1, \mathbf{\mu}_2, \mathbf{\Sigma}_2, \mathbf{\mu}_3, \mathbf{\Sigma}_3, \pi_1, \pi_2, \pi_3$: End of explanation """ print(mu1hat) print(Sigma1hat) print(mu2hat) print(Sigma2hat) print(mu3hat) print(Sigma3hat) print(pihat[0]) print(pihat[1]) print(pihat[2]) """ Explanation: And the inferred parameters $\hat{\mathbf{\mu}}_1, \hat{\mathbf{\Sigma}}_1, \hat{\mathbf{\mu}}_2, \hat{\mathbf{\Sigma}}_2, \hat{\mathbf{\mu}}_3, \hat{\mathbf{\Sigma}}_3, \hat{\pi}_1, \hat{\pi}_2, \hat{\pi}_3$: End of explanation """ class LineSearchMoG: """ Back-tracking line-search that checks for close to singular matrices. """ def __init__(self, contraction_factor=.5, optimism=2, suff_decr=1e-4, maxiter=25, initial_stepsize=1): self.contraction_factor = contraction_factor self.optimism = optimism self.suff_decr = suff_decr self.maxiter = maxiter self.initial_stepsize = initial_stepsize self._oldf0 = None def search(self, objective, manifold, x, d, f0, df0): """ Function to perform backtracking line-search. Arguments: - objective objective function to optimise - manifold manifold to optimise over - x starting point on the manifold - d tangent vector at x (descent direction) - df0 directional derivative at x along d Returns: - stepsize norm of the vector retracted to reach newx from x - newx next iterate suggested by the line-search """ # Compute the norm of the search direction norm_d = manifold.norm(x, d) if self._oldf0 is not None: # Pick initial step size based on where we were last time. alpha = 2 * (f0 - self._oldf0) / df0 # Look a little further alpha *= self.optimism else: alpha = self.initial_stepsize / norm_d alpha = float(alpha) # Make the chosen step and compute the cost there. newx, newf, reset = self._newxnewf(x, alpha * d, objective, manifold) step_count = 1 # Backtrack while the Armijo criterion is not satisfied while (newf > f0 + self.suff_decr * alpha * df0 and step_count <= self.maxiter and not reset): # Reduce the step size alpha = self.contraction_factor * alpha # and look closer down the line newx, newf, reset = self._newxnewf(x, alpha * d, objective, manifold) step_count = step_count + 1 # If we got here without obtaining a decrease, we reject the step. if newf > f0 and not reset: alpha = 0 newx = x stepsize = alpha * norm_d self._oldf0 = f0 return stepsize, newx def _newxnewf(self, x, d, objective, manifold): newx = manifold.retr(x, d) try: newf = objective(newx) except np.linalg.LinAlgError: replace = np.asarray([np.linalg.matrix_rank(newx[0][k, :, :]) != newx[0][0, :, :].shape[0] for k in range(newx[0].shape[0])]) x[0][replace, :, :] = manifold.rand()[0][replace, :, :] return x, objective(x), True return newx, newf, False """ Explanation: Et voilà – this was a brief demonstration of how to do inference for MoG models by performing Manifold optimisation using Pymanopt. When Things Go Astray A well-known problem when fitting parameters of a MoG model is that one Gaussian may collapse onto a single data point resulting in singular covariance matrices (cf. e.g. p. 434 in Bishop, C. M. "Pattern Recognition and Machine Learning." 2001). This problem can be avoided by the following heuristic: if a component's covariance matrix is close to being singular we reset its mean and covariance matrix. Using Pymanopt this can be accomplished by using an appropriate line search rule (based on LineSearchBackTracking) -- here we demonstrate this approach: End of explanation """
arogozhnikov/einops
docs/3-einmix-layer.ipynb
mit
from einops.layers.torch import EinMix as Mix """ Explanation: EinMix: universal toolkit for advanced MLP architectures Recent progress in MLP-based architectures demonstrated that very specific MLPs can compete with convnets and transformers (and even outperform them). EinMix allows writing such architectures in a more uniform and readable way. EinMix — building block of MLPs End of explanation """ # other stuff we use import torch from torch import nn from einops.layers.torch import Rearrange, Reduce """ Explanation: Logic of EinMix is very close to the one of einsum. If you're not familiar with einsum, follow these guides first: https://rockt.github.io/2018/04/30/einsum https://towardsdatascience.com/einsum-an-underestimated-function-99ca96e2942e https://theaisummer.com/einsum-attention/ Einsum uniformly describes a number of operations, however EinMix is defined slightly differently. Here is a linear layer, a common block in sequence modelling (e.g. in NLP/speech), written with einsum python weight = &lt;...create tensor...&gt; result = torch.einsum('tbc,cd-&gt;tbd', embeddings, weight) EinMix counter-part is: python mix_channels = Mix('t b c -&gt; t b c_out', weight_shape='c c_out', ...) result = mix_channels(embeddings) Main differences compared to plain einsum are: layer takes care of the weight initialization & management hassle weight is not in the comprehension We'll discuss other changes a bit later, now let's implement ResMLP End of explanation """ # No norm layer class Affine(nn.Module): def __init__(self, dim): super().__init__() self.alpha = nn.Parameter(torch.ones(dim)) self.beta = nn.Parameter(torch.zeros(dim)) def forward(self, x): return self.alpha * x + self.beta class Mlp(nn.Module): def __init__(self, dim): super().__init__() self.fc1 = nn.Linear(dim, 4 * dim) self.act = nn.GELU() self.fc2 = nn.Linear(4 * dim, dim) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.fc2(x) return x class ResMLP_Blocks(nn.Module): def __init__(self, nb_patches, dim, layerscale_init): super().__init__() self.affine_1 = Affine(dim) self.affine_2 = Affine(dim) self.linear_patches = nn.Linear(nb_patches, nb_patches) #Linear layer on patches self.mlp_channels = Mlp(dim) #MLP on channels self.layerscale_1 = nn.Parameter(layerscale_init * torch.ones((dim))) # LayerScale self.layerscale_2 = nn.Parameter(layerscale_init * torch.ones((dim))) # parameters def forward(self, x): res_1 = self.linear_patches(self.affine_1(x).transpose(1,2)).transpose(1,2) x = x + self.layerscale_1 * res_1 res_2 = self.mlp_channels(self.affine_2(x)) x = x + self.layerscale_2 * res_2 return x """ Explanation: ResMLP — original implementation Building blocks of ResMLP consist only of linear/affine layers and one activation (GELU). <br /> Let's see how we can rewrite all of the components with Mix. We start from a reference code for ResMLP block published in the paper: End of explanation """ def Mlp(dim): return nn.Sequential( nn.Linear(dim, 4 * dim), nn.GELU(), nn.Linear(4 * dim, dim), ) def init(Mix_layer, scale=1.): Mix_layer.weight.data[:] = scale if Mix_layer.bias is not None: Mix_layer.bias.data[:] = 0 return Mix_layer class ResMLP_Blocks2(nn.Module): def __init__(self, nb_patches, dim, layerscale_init): super().__init__() self.affine1 = init(Mix('b t c -> b t c', weight_shape='c', bias_shape='c', c=dim)) self.affine2 = init(Mix('b t c -> b t c', weight_shape='c', bias_shape='c', c=dim)) self.mix_patches = Mix('b t c -> b t0 c', weight_shape='t t0', bias_shape='t0', t=nb_patches, t0=nb_patches) self.mlp_channels = Mlp(dim) self.linear1 = init(Mix('b t c -> b t c', weight_shape='c', c=dim), scale=layerscale_init) self.linear2 = init(Mix('b t c -> b t c', weight_shape='c', c=dim), scale=layerscale_init) def forward(self, x): res1 = self.mix_patches(self.affine1(x)) x = x + self.linear1(res1) res2 = self.mlp_channels(self.affine2(x)) x = x + self.linear2(res2) return x """ Explanation: ResMLP &mdash; rewritten Code below is the result of first rewriting: - combination [transpose -> linear -> transpose back] got nicely packed into a single EinMix (mix_patches) <br /> Mix('b t c -&gt; b t0 c', weight_shape='t t0', bias_shape='t0', t=nb_patches, t0=nb_patches) - pattern 'b t c -&gt; b t0 c' tells that b and c are unperturbed, while tokens t-&gt;t0 were mixed - explicit parameter shapes are also quite insightful In new implementation affine layer is also handled by EinMix: <br /> Mix('b t c -&gt; b t c', weight_shape='c', bias_shape='c', c=dim) from the pattern you can see that there is no mixing at all, only multiplication and shift multiplication and shift are defined by weight and bias - and those depend only on a channel thus affine transform is per-channel Linear layer is also handled by EinMix, the only difference compared to affine layer is absence of bias We specified that input is 3d and order is btc, not tbc - this is not written explicitly in the original code The only step back that we had to do is change an initialization schema for EinMix for affine and linear layers End of explanation """ def init(layer: Mix, scale=1.): layer.weight.data[:] = scale if layer.bias is not None: layer.bias.data[:] = 0 return layer class ResMLP_Blocks3(nn.Module): def __init__(self, nb_patches, dim, layerscale_init): super().__init__() self.branch_patches = nn.Sequential( init(Mix('b t c -> b t c', weight_shape='c', c=dim), scale=layerscale_init), Mix('b t c -> b t0 c', weight_shape='t t0', bias_shape='t0', t=nb_patches, t0=nb_patches), init(Mix('b t c -> b t c', weight_shape='c', bias_shape='c', c=dim)), ) self.branch_channels = nn.Sequential( init(Mix('b t c -> b t c', weight_shape='c', c=dim), scale=layerscale_init), nn.Linear(dim, 4 * dim), nn.GELU(), nn.Linear(4 * dim, dim), init(Mix('b t c -> b t c', weight_shape='c', bias_shape='c', c=dim)), ) def forward(self, x): x = x + self.branch_patches(x) x = x + self.branch_channels(x) return x """ Explanation: ResMLP &mdash; rewritten more Since here in einops-land we care about code being easy to follow, let's make one more transformation. We group layers from both branches, and now the order of operations matches the order as they are written in the code. Could we go further? Actually, yes - nn.Linear layers can also be replaced by EinMix, however they are very organic here since first and last operations in branch_channels show components. Brevity of nn.Linear is benefitial when the context specifies tensor shapes. Other interesing observations: - hard to notice in the original code nn.Linear is preceded by a linear layer (thus latter is redundant or can be fused in the former) - hard to notice in the original code second nn.Linear is followed by an affine layer (thus latter is again redundant) Take time to reorganize your code. This may be quite insightful. End of explanation """ x = torch.zeros([32, 128, 128]) for layer in [ ResMLP_Blocks(128, dim=128, layerscale_init=1.), ResMLP_Blocks2(128, dim=128, layerscale_init=1.), ResMLP_Blocks3(128, dim=128, layerscale_init=1.), # scripted versions torch.jit.script(ResMLP_Blocks(128, dim=128, layerscale_init=1.)), torch.jit.script(ResMLP_Blocks2(128, dim=128, layerscale_init=1.)), torch.jit.script(ResMLP_Blocks3(128, dim=128, layerscale_init=1.)), ]: %timeit -n 10 y = layer(x) """ Explanation: ResMLP &mdash; performance There is some fear of using einsum because historically it lagged in performance. Below we run a test and verify that performace didn't change after transition to EinMix End of explanation """ from torch.nn import functional as F class MLP(nn.Module): def __init__(self, num_features, expansion_factor, dropout): super().__init__() num_hidden = num_features * expansion_factor self.fc1 = nn.Linear(num_features, num_hidden) self.dropout1 = nn.Dropout(dropout) self.fc2 = nn.Linear(num_hidden, num_features) self.dropout2 = nn.Dropout(dropout) def forward(self, x): x = self.dropout1(F.gelu(self.fc1(x))) x = self.dropout2(self.fc2(x)) return x class TokenMixer(nn.Module): def __init__(self, num_features, num_patches, expansion_factor, dropout): super().__init__() self.norm = nn.LayerNorm(num_features) self.mlp = MLP(num_patches, expansion_factor, dropout) def forward(self, x): # x.shape == (batch_size, num_patches, num_features) residual = x x = self.norm(x) x = x.transpose(1, 2) # x.shape == (batch_size, num_features, num_patches) x = self.mlp(x) x = x.transpose(1, 2) # x.shape == (batch_size, num_patches, num_features) out = x + residual return out """ Explanation: TokenMixer from MLPMixer — original code Let's now delve into MLPMixer. We start from pytorch implementation by Jake Tae. We'll focus on two components of MLPMixer that don't exist in convnets. First component is TokenMixer: End of explanation """ def TokenMixer(num_features: int, n_patches: int, expansion_factor: int, dropout: float): n_hidden = n_patches * expansion_factor return nn.Sequential( nn.LayerNorm(num_features), Mix('b hw c -> b hid c', weight_shape='hw hid', bias_shape='hid', hw=n_patches, hidden=n_hidden), nn.GELU(), nn.Dropout(dropout), Mix('b hid c -> b hw c', weight_shape='hid hw', bias_shape='hw', hw=n_patches, hidden=n_hidden), nn.Dropout(dropout), ) """ Explanation: TokenMixer from MLPMixer — reimplemented We can significantly reduce amount of code by using EinMix. Main caveat addressed by original code is that nn.Linear mixes only last axis. EinMix can mix any axis. Sequential structure is always preferred as it is easier to follow Intentionally there is no residual connection in TokenMixer, because honestly it's not work of Mixer and should be done by caller End of explanation """ def check_sizes(image_size, patch_size): sqrt_num_patches, remainder = divmod(image_size, patch_size) assert remainder == 0, "`image_size` must be divisibe by `patch_size`" num_patches = sqrt_num_patches ** 2 return num_patches class Patcher(nn.Module): def __init__( self, image_size=256, patch_size=16, in_channels=3, num_features=128, ): num_patches = check_sizes(image_size, patch_size) super().__init__() # per-patch fully-connected is equivalent to strided conv2d self.patcher = nn.Conv2d( in_channels, num_features, kernel_size=patch_size, stride=patch_size ) def forward(self, x): patches = self.patcher(x) batch_size, num_features, _, _ = patches.shape patches = patches.permute(0, 2, 3, 1) patches = patches.view(batch_size, -1, num_features) return patches """ Explanation: You may also like independent implementation of MLPMixer from Phil Wang. <br /> Phil solves the issue by repurposing nn.Conv1d to mix on the second dimension. Hacky, but does the job MLPMixer's patch embeddings — original Second interesting part of MLPMixer is derived from vision transformers. In the very beginning an image is split into patches, and each patch is linearly projected into embedding. I've taken the part of Jake's code responsible for embedding patches: End of explanation """ def patcher(patch_size=16, in_channels=3, num_features=128): return Mix('b c_in (h hp) (w wp) -> b (h w) c', weight_shape='c_in hp wp c', bias_shape='c', c=num_features, hp=patch_size, wp=patch_size, c_in=in_channels) """ Explanation: MLPMixer's patch embeddings — reimplemented EinMix does this in a single operation. This may require some training at first to understand. Let's go step-by-step: b c_in (h hp) (w wp) -&gt; - 4-dimensional input tensor (BCHW-ordered) is split into patches of shape hp x wp weight_shape='c_in hp wp c'. Axes c_in, hp and wp are all absent in the output: three dimensional patch tensor was mixed to produce a vector of length c -&gt; b (h w) c - output is 3-dimensional. All patches were reorganized from h x w grid to one-dimensional sequence of vectors We don't need to provide image_size beforehead, new implementation handles images of different dimensions as long as they can be divided into patches End of explanation """ class WeightedPermuteMLP(nn.Module): def __init__(self, H, W, C, S): super().__init__() self.proj_h = nn.Linear(H * S, H * S) self.proj_w = nn.Linear(W * S, W * S) self.proj_c = nn.Linear(C, C) self.proj = nn.Linear(C, C) self.S = S def forward(self, x): B, H, W, C = x.shape S = self.S N = C // S x_h = x.reshape(B, H, W, N, S).permute(0, 3, 2, 1, 4).reshape(B, N, W, H*S) x_h = self.proj_h(x_h).reshape(B, N, W, H, S).permute(0, 3, 2, 1, 4).reshape(B, H, W, C) x_w = x.reshape(B, H, W, N, S).permute(0, 1, 3, 2, 4).reshape(B, H, N, W*S) x_w = self.proj_w(x_w).reshape(B, H, N, W, S).permute(0, 1, 3, 2, 4).reshape(B, H, W, C) x_c = self.proj_c(x) x = x_h + x_w + x_c x = self.proj(x) return x """ Explanation: Vision Permutator As a third example we consider pytorch-like code from ViP paper. Vision permutator is only slightly more nuanced than previous models, because 1. it operates on spatial dimensions separately, while MLPMixer and its friends just pack all spatial info into one axis. 2. it splits channels into groups called 'segments' Paper provides pseudo-code, so I reworked that to complete module with minimal changes. Enjoy: End of explanation """ class WeightedPermuteMLP_new(nn.Module): def __init__(self, H, W, C, seg_len): super().__init__() assert C % seg_len == 0, f"can't divide {C} into segments of length {seg_len}" self.mlp_c = Mix('b h w c -> b h w c0', weight_shape='c c0', bias_shape='c0', c=C, c0=C) self.mlp_h = Mix('b h w (n c) -> b h0 w (n c0)', weight_shape='h c h0 c0', bias_shape='h0 c0', h=H, h0=H, c=seg_len, c0=seg_len) self.mlp_w = Mix('b h w (n c) -> b h w0 (n c0)', weight_shape='w c w0 c0', bias_shape='w0 c0', w=W, w0=W, c=seg_len, c0=seg_len) self.proj = nn.Linear(C, C) def forward(self, x): x = self.mlp_c(x) + self.mlp_h(x) + self.mlp_w(x) return self.proj(x) """ Explanation: That didn't look readable, right? This code is also very inflexible: code in the paper did not support batch dimension, and multiple changes were necessary to allow batch processing. <br /> This process is fragile and easily can result in virtually uncatchable bugs. Now good news: each of these long method chains can be replaced with a single EinMix layer: End of explanation """ x = torch.zeros([32, 32, 32, 128]) for layer in [ WeightedPermuteMLP(H=32, W=32, C=128, S=4), WeightedPermuteMLP_new(H=32, W=32, C=128, seg_len=4), # scripted versions torch.jit.script(WeightedPermuteMLP(H=32, W=32, C=128, S=4)), torch.jit.script(WeightedPermuteMLP_new(H=32, W=32, C=128, seg_len=4)), ]: %timeit -n 10 y = layer(x) """ Explanation: Great, now let's confirm that performance did not deteriorate. End of explanation """
peendebak/SPI-rack
examples/D4.ipynb
mit
# Import SPI Rack, D5a module and D4 module from spirack import SPI_rack, D4_module, D5a_module from time import sleep import numpy as np %matplotlib notebook import matplotlib.pyplot as plt """ Explanation: D4 example notebook Example notebook of the D4 2 channel, 24-bit ADC module. To use this notebook, we need a D4 module, D5a module and a controller module. The controller can either be the C1b/C2 combination or the C1. To use the D4 module, we also need to import the SPI rack class. All communication with the SPI Rack will go through this object. Only one SPI_rack object can be active at a time on the PC. So before running another script, the connection in this one should be closed. We will also import some other libraries for processing and plotting. Matplotlib is used as it is the most common plotting library. End of explanation """ COM_speed = 1e6 # Baud rate, doesn't matter much timeout = 1 # In seconds spi_rack = SPI_rack('COM4', COM_speed, timeout) spi_rack.unlock() # Unlock the controller to be able to send data """ Explanation: Initialisation Open the SPI rack connection and unlock the controller. This is necessary after bootup of the controller module. If not unlocked, no communication with the modules can take place. The virtual COM port baud rate is irrelevant as it doesn't change the actual speed. Timeout can be changed, but 1 second is a good value. End of explanation """ D5a = D5a_module(spi_rack, module=2, reset_voltages=True) """ Explanation: Create a new D5a module object at the correct (set) module address using the SPI object. Here we reset the voltages to zero Volt. For information on the D5a module, see the corresponding webpage and the D5a example notebook. We keep the D5a at the default span of +-4 Volt. End of explanation """ D4 = D4_module(spi_rack, 6) """ Explanation: We now create a new D4 module object in the same way. Make sure that the module number corresponds to the address set in the hardware. End of explanation """ setting = 16 D4.set_filter(adc=0, filter_type='sinc3', filter_setting=setting) D4.set_filter(adc=1, filter_type='sinc3', filter_setting=setting) """ Explanation: Next we set the filters inside the ADCs. These filters determine the analog bandwidth, the 50 Hz rejection, the resolution and the data rate. There are two filter types: sinc3 and sinc5. They both have ups and down, as a rule of thumb: for low frequency/high resolution use sinc3, for high frequency use sinc5. The filter setting sets the other variables, it ranges from 0 to 20. How this value relates to the bandwidth etc. can be seen from the filter table on the D4 module webpage. End of explanation """ # First the offset, put 50 Ohm termination on the inputs D4.offset_calibration(0) D4.offset_calibration(1) """ Explanation: Calibration For optimal performance we can calibrate the D4 module for both offset and gain errors. To calibrate the offset error, the user has to put a short or 50 ohm short on the input of the ADC channel that needs to be calibrated. To calibrate the gain error, the user needs to put a 4 Volt signal (from the D5a) on the ADC channel that needs to be calibrated. Everytime the module/rack is powered down this procedure needs to be performed again. It is recommended to let the system 'warm up' before running the calibration routines. End of explanation """ # Next the gain error, apply 4V from the D5a module D5a.set_voltage(0, 4) D5a.set_voltage(1, 4) sleep(1) D4.gain_calibration(0) D4.gain_calibration(1) """ Explanation: For the gain calibration we connect DAC output 1 to ADC input 1, and DAC output 2 to ADC input 2. End of explanation """ no_points = 20 input_voltage = np.linspace(-4, 4, no_points) data_ADC1 = np.zeros(no_points) data_ADC2 = np.zeros(no_points) for i, value in enumerate(input_voltage): D5a.set_voltage(0, value) D5a.set_voltage(1, -value) D4.start_conversion(0) D4.start_conversion(1) data_ADC1[i] = D4.get_result(0) data_ADC2[i] = D4.get_result(1) """ Explanation: Measurements We can now perform measurements. For both measurements we connect D5a output 1 and 2 to D4 input 1 and 2 respectively. For the first measurement we will just sweep the two DACs in opposite directions and measure with the D4. Here we use the start_conversion and get_results functions of the D4. The start conversion function starts a new ADC conversion but does not wait for the result to be ready. This allows us to trigger both ADC channel at (roughly) the same time. After they are both trigger we wait for the results. The get_results function will wait until a result comes in, it will block the rest of the code. After triggering and converting, the ADC will keep running. This means that if the user waits a long time, the sample that will be read out will not be from the first conversion! End of explanation """ plt.figure() plt.plot(input_voltage, data_ADC1, '.-', label='ADC1') plt.plot(input_voltage, data_ADC2, '.-', label='ADC2') plt.xlabel('D5a Voltage (V)') plt.ylabel('D4 Voltage (V)') plt.legend() plt.show() """ Explanation: Now we just plot the results to see the expected lines. End of explanation """ no_points = 100 D5a.set_voltage(0, 1) data_ADC1 = np.zeros(no_points) for i in range(no_points): data_ADC1[i] = D4.single_conversion(0) """ Explanation: For the second measurement we only look at channel 1. We put the DAC channel at a fixed voltage and take a number of samples. This allows us to take a look at the noise performance. The ADC filter is in setting 16 out of 20 (higher values give better performance with longer conversion times). Setting 16 should give us a bandwith of 13 Hz, 100 dB 50 hz suppression, 23.5 bit resolution and a data rate of 16.67 samples per second. To read out the ADC, we will use the single_conversion function. This function will both trigger and readout the ADC. It will wait for a result to be returned and will block anything else from running. End of explanation """ plt.figure() plt.plot(np.arange(no_points), data_ADC1/1e-6, '.-', label='ADC1') plt.xlabel('Sample') plt.ylabel('ADC1 Voltage (uV)') plt.show() """ Explanation: Now we plot the results on uV scale. End of explanation """ spi_rack.close() """ Explanation: When done with the measurement, it is recommended to close the SPI Rack connection. This will allow other measurement scripts to acces the device. End of explanation """
idies/pyJHTDB
examples/isotropic_spectra_1D.ipynb
apache-2.0
import numpy as np import pyJHTDB from pyJHTDB.dbinfo import mhd1024, isotropic1024coarse from pyJHTDB import libJHTDB import time as tt #import mkl_fft """ Explanation: import numpy and pyJHTDB stuff End of explanation """ %matplotlib inline import matplotlib.pyplot as plt """ Explanation: now import matplotlib and require plots to be shown inline End of explanation """ info = isotropic1024coarse nlines = 4 ntimes = 16 spacing = info['dx'] x = np.zeros((nlines, info['nx'], 3), dtype = np.float32) x0 = np.linspace(0, info['nx']*spacing, num = info['nx'], endpoint=False) e = np.random.randint(3, size=nlines) #line direction. e=0, x-direction; e=1 y-direction; e=2 z-direction time = np.random.choice(info['time'], size = ntimes) """ Explanation: Generate points adequate for the isotropic Navier Stokes data set. End of explanation """ lJHTDB = libJHTDB() lJHTDB.initialize() #Add token auth_token = "edu.jhu.pha.turbulence.testing-201311" #Replace with your own token here lJHTDB.add_token(auth_token) u = [] for t in range(time.shape[0]): print (t) d = 0 for i in e: x[d, :, e] = x0; if i == 0: x[d, :, [1, 2]] = np.random.random(size=(2,1))*info['nx']*spacing elif i == 1: x[d, :, [0, 2]] = np.random.random(size=(2,1))*info['nx']*spacing elif i == 2: x[d, :, [0, 1]] = np.random.random(size=(2,1))*info['nx']*spacing d = d + 1 u.append(lJHTDB.getData(time[t], x, data_set = info['name'], getFunction = 'getVelocity')) lJHTDB.finalize() u = np.array(u).reshape(nlines*ntimes, x.shape[1], -1) """ Explanation: Get the velocity field at the above points, i.e. on lines. Since the flow is quasistationary, whether we're averaging over space or time shouldn't really matter, therefore we're reshaping the array on the last line to make things easier. End of explanation """ uk = np.fft.rfft(u, axis = 1) / u.shape[1] k0 = 2*np.pi / (spacing * x.shape[1]) k = k0*np.linspace(1, uk.shape[1]+1, num = uk.shape[1]) ek = .5*np.average(np.sum(np.abs(uk)**2, axis = 2), axis = 0) fig = plt.figure(figsize=(10, 6)) ax = fig.add_subplot(111) etaK = ((info['nu']**3)/info['diss'])**.25 ax.plot(k*etaK, ek / ((info['diss']**(2./3)) * (etaK**(5./3))), label = '$E(k)\\varepsilon^{-2/3}\\eta_K^{-5/3}$') ax.plot(k*etaK, 2*(k*etaK)**(-5./3) / 3, label = '$\\frac{2}{3}(k \\eta_K)^{-5/3}$') ax.set_xscale('log') ax.set_yscale('log') ax.legend(loc = 'best') """ Explanation: Perform the inverse Fourier transform, and construct an array for the corresponding wavenumbers. While not technically essential, this gives a starting point for the proper treatment of anisotropic grids (such as the channel flow grid). End of explanation """
mbohlool/client-python
examples/notebooks/intro_notebook.ipynb
apache-2.0
from kubernetes import client, config """ Explanation: Managing kubernetes objects using common resource operations with the python client Some of these operations include; create_xxxx : create a resource object. Ex create_namespaced_pod and create_namespaced_deployment, for creation of pods and deployments respectively. This performs operations similar to kubectl create. read_xxxx : read the specified resource object. Ex read_namespaced_pod and read_namespaced_deployment, to read pods and deployments respectively. This performs operations similar to kubectl describe. list_xxxx : retrieve all resource objects of a specific type. Ex list_namespaced_pod and list_namespaced_deployment, to list pods and deployments respectively. This performs operations similar to kubectl get. patch_xxxx : apply a change to a specific field. Ex patch_namespaced_pod and patch_namespaced_deployment, to update pods and deployments respectively. This performs operations similar to kubectl patch, kubectl label, kubectl annotate etc. replace_xxxx : replacing a resource object will update the resource by replacing the existing spec with the provided one. Ex replace_namespaced_pod and replace_namespaced_deployment, to update pods and deployments respectively, by creating new replacements of the entire object. This performs operations similar to kubectl rolling-update, kubectl apply and kubectl replace. delete_xxxx : delete a resource. This performs operations similar to kubectl delete. For Futher information see the Documentation for API Endpoints section in https://github.com/kubernetes-incubator/client-python/blob/master/kubernetes/README.md End of explanation """ config.load_kube_config() """ Explanation: Load config from default location. End of explanation """ api_instance = client.ExtensionsV1beta1Api() dep = client.ExtensionsV1beta1Deployment() spec = client.ExtensionsV1beta1DeploymentSpec() """ Explanation: Create API endpoint instance as well as API resource instances (body and specification). End of explanation """ name = "my-busybox" dep.metadata = client.V1ObjectMeta(name=name) spec.template = client.V1PodTemplateSpec() spec.template.metadata = client.V1ObjectMeta(name="busybox") spec.template.metadata.labels = {"app":"busybox"} spec.template.spec = client.V1PodSpec() dep.spec = spec container = client.V1Container() container.image = "busybox:1.26.1" container.args = ["sleep", "3600"] container.name = name spec.template.spec.containers = [container] """ Explanation: Fill required object fields (apiVersion, kind, metadata and spec). End of explanation """ api_instance.create_namespaced_deployment(namespace="default",body=dep) """ Explanation: Create Deployment using create_xxxx command for Deployments. End of explanation """ deps = api_instance.list_namespaced_deployment(namespace="default") for item in deps.items: print("%s %s" % (item.metadata.namespace, item.metadata.name)) """ Explanation: Use list_xxxx command for Deployment, to list Deployments. End of explanation """ api_instance.read_namespaced_deployment(namespace="default",name=name) """ Explanation: Use read_xxxx command for Deployment, to display the detailed state of the created Deployment resource. End of explanation """ dep.metadata.labels = {"key": "value"} api_instance.patch_namespaced_deployment(name=name, namespace="default", body=dep) """ Explanation: Use patch_xxxx command for Deployment, to make specific update to the Deployment. End of explanation """ dep.spec.template.spec.containers[0].image = "busybox:1.26.2" api_instance.replace_namespaced_deployment(name=name, namespace="default", body=dep) """ Explanation: Use replace_xxxx command for Deployment, to update Deployment with a completely new version of the object. End of explanation """ api_instance.delete_namespaced_deployment(name=name, namespace="default", body=client.V1DeleteOptions(propagation_policy="Foreground", grace_period_seconds=5)) """ Explanation: Use delete_xxxx command for Deployment, to delete created Deployment. End of explanation """
fcollonval/coursera_data_visualization
BasicLinearRegression.ipynb
mit
# Magic command to insert the graph directly in the notebook %matplotlib inline # Load a useful Python libraries for handling data import pandas as pd import numpy as np import statsmodels.formula.api as smf import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt from IPython.display import Markdown, display # Read the data data_filename = r'gapminder.csv' data = pd.read_csv(data_filename, low_memory=False) data = data.set_index('country') """ Explanation: Regression Modeling in Practice Assignment: Test a Basic Linear Regression Model Following is the Python program I wrote to fulfill the second assignment of the Regression Modeling in Practice online course. I decided to use Jupyter Notebook as it is a pretty way to write code and present results. Assignment research question Using the Gapminder database, I would like to see if there is a linear relationship between the income per person (explanatory variable) and the residential consumption of electricity (response variable). Data management For the question I'm interested in, the countries for which data are missing will be discarded. As missing data in Gapminder database are replace directly by NaN no special data treatment is needed. End of explanation """ display(Markdown("Number of countries: {}".format(len(data)))) display(Markdown("Number of variables: {}".format(len(data.columns)))) """ Explanation: General information on the Gapminder data End of explanation """ subdata2 = (data[['incomeperperson', 'relectricperperson']] .assign(income=lambda x: pd.to_numeric(data['incomeperperson'], errors='coerce'), electricity=lambda x: pd.to_numeric(data['relectricperperson'], errors='coerce')) .dropna()) """ Explanation: Variables distribution Before computing the linear regression between the icome per person and the residential electricity consumption, let's have a look at the distributions of the variables. End of explanation """ sns.distplot(subdata2['income'], kde=False) plt.xlabel('Income per person (2000 US$)'); """ Explanation: Income per person End of explanation """ subdata2.loc[subdata2['income'] > 45000] subdata2['income'].describe() """ Explanation: From the distribution graph, we can see that the distribution is skewed-right and bimodal. There is also a singular case with high income per capita. That country is : End of explanation """ sns.distplot(subdata2['electricity'], kde=False) plt.xlabel('Residential electricity consumption (kWh)'); subdata2['electricity'].describe() """ Explanation: For this assignment the explanatory variable income per person will have to be centered as its mean is 8784.5. Residential electricity consumption End of explanation """ subdata2.loc[subdata2['electricity'] > 6000] """ Explanation: The residential electricity consumption is also skewed-right. And there are also a couple of countries presenting unusual higher values. Those countries are : End of explanation """ sns.regplot(x='income', y='electricity', data=subdata2) plt.xlabel('Income per person (2000 US$)') plt.ylabel('Residential electricity consumption (kWh)') plt.title('Scatterplot for the association between the income and the residential electricity consumption'); """ Explanation: Bivariate distribution If we look now at the distribution of the two variables in a scatter plot. We can see that our singular countries for the two variables of study result in outlier points. Nothing special was done about them as there are no managment error on them and no reason to discard them. Moreover, the distribution of the data along the regression line shows that the homoscedasticity hypothesis is not well met as the spread of the data along the line is bigger for higher values of income. End of explanation """ subdata3 = subdata2.assign(income_centered=lambda x: x['income']-subdata2['income'].mean()) display(Markdown("Income mean after centereing : {:3g}".format(subdata3['income_centered'].mean()))) sns.regplot(x='income_centered', y='electricity', data=subdata3) plt.xlabel('Centered income per person (2000 US$)') plt.ylabel('Residential electricity consumption (kWh)') plt.title('Scatterplot for the association between the income and the residential electricity consumption'); """ Explanation: Centering the explanatory variable For the assignment, the explanatory variable (here the income per person) has to be centered by subracting its mean. End of explanation """ reg1 = smf.ols('electricity ~ income_centered', data=subdata3).fit() reg1.summary() """ Explanation: OLS regression model End of explanation """
pablormier/yabox
notebooks/yabox-de-animations.ipynb
apache-2.0
%matplotlib inline # Load local version of yabox import sys sys.path.insert(0, '../') from yabox import DE, PDE import numpy as np # Imports required for 3d animations import matplotlib import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D from matplotlib import animation, rcParams from IPython.display import HTML """ Explanation: <div style="clear: both; width: 100%; overflow: auto"><img src="img/yabox.png" style="width: 250px; float: left"/></div> Yabox: Yet another black-box optimization library for Python - https://github.com/pablormier/yabox This notebook shows how to generate 3d animations of Differential Evolution exploring two dimensional problems Author: Pablo Rodríguez-Mier End of explanation """ # There is a bug in matplotlib that prevents the use of ffmpeg and avconv! # https://github.com/matplotlib/matplotlib/pull/8743 avail_writers = matplotlib.animation.writers.list() # Functions to generate 3d animations def display_animation(anim): plt.close(anim._fig) return HTML(anim.to_html5_video()) def generate_video(problem, algorithm, figsize=(12, 8), frames=100, interval=100): # Try to use tqdm to show progress use_tqdm = True try: from tqdm.auto import tqdm except: use_tqdm = False figure = plt.figure(figsize=figsize) ax = Axes3D(figure) problem.plot3d(ax3d=ax) minz = min(ax.get_zlim()) it = algorithm.geniterator() if use_tqdm: it = iter(tqdm(it, total=frames)) def animate(i): ax.clear() ax.autoscale(enable=False) problem.plot3d(ax3d=ax) status = next(it) population = status.population P = algorithm.denormalize(population) fitness = status.fitness idx = status.best_idx PT = P.T # Individuals ax.scatter(PT[0], PT[1], fitness, s=30, c='#930039', marker='o', depthshade=False, zorder=999) # Shadow projections ax.scatter(PT[0], PT[1], np.full_like(PT[0], minz), alpha=0.5, s=50, c='black', marker='o', edgecolors='none', depthshade=False, zorder=999) anim = animation.FuncAnimation(figure, animate, frames=frames, interval=interval, blit=False) return anim """ Explanation: Main functions for plotting and generating the animations End of explanation """ from yabox.problems import Ackley, Levy problem = Ackley() algorithm = DE(problem, problem.bounds) anim = generate_video(problem, algorithm, figsize=(12, 8), frames=10, interval=100) display_animation(anim) """ Explanation: Usage example End of explanation """
jhconning/Dev-II
notebooks/Stata_in_jupyter.ipynb
bsd-3-clause
%matplotlib inline import seaborn as sns import pandas as pd import statsmodels.formula.api as smf import ipystata """ Explanation: Stata and R in a jupyter notebook The jupyter notebook project is now designed to be a 'language agnostic' web-application front-end for any one of many possible software language kernels. We've been mostly using python but there are in fact several dozen other language kernels that can be made to work with it including Julia, R, Matlab, C, Go, Fortran and Stata. The ecosystem of libraries and packages for scientific computing with python is huge and constantly growing but there are still many statistics and econometrics applications that are available as built-in or user-written modules in Stata that have not yet been ported to python or are just simply easier to use in Stata. On the other hand there are some libraries such as python pandas and different visualization libraries such as seaborn or matplotlib that give features that are not available in Stata. Fortunately you don't have to choose between using Stata or python, you can use them both together, to get the best of both worlds. Jupyter running an R kernel R is a powerful open source software environment for statistical computing. R has R markdown which allows you to create R-markdown notebooks similar in concept to jupyter notebooks. But you can also run R inside a jupyter notebook (indeed the name 'Jupyter' is from Julia, iPython and R). See my notebook with notes on Research Discontinuity Design for an example of a jupyter notebook running R. To install an R kernel see the IRkernel project. Jupyter with a Stata Kernel Kyle Barron has created a stata_kernel that offers several useful features including code-autocompletion, inline graphics, and generally fast responses. For this to work you must have a working licensed copy of Stata version 14 or greater on your machine. Python and Stata combined in the same notebook Sometimes it may be useful to combine python and Stata in the same notebook. Ties de Kok has written a nice python library called ipystata that allows one to execute Stata code in codeblocks inside an ipython notebook when preceded by a %%stata magic command. This workflow allows you to pass data between python and Stata sessions and to display Stata plots inline. Compared to the stata_kernel option the response times are not quite as fast. The remainder of this notebook illustrates the use of ipystata. A sample ipystata session For more details see the example notebook and documentation on the ipystata repository. End of explanation """ %%stata -o life_df sysuse lifeexp.dta summarize """ Explanation: The following opens a Stata session where we load a dataset and summarize the data. The -o flag following the `%%Stata``` magic instructs it to output or return the dataset in Stata memory as a pandas dataframe in python. End of explanation """ life_df.head(3) """ Explanation: Let's confirm the data was returned as a pandas dataframe: End of explanation """ %%stata -o life_df gen lngnppc = ln(gnppc) regress lexp lngnppc """ Explanation: A simple generate variable command and ols regression in Stata: End of explanation """ model = smf.ols(formula = 'lexp ~ lngnppc', data = life_df) results = model.fit() print(results.summary()) """ Explanation: And the same regression using statsmodels and pandas: End of explanation """ life_df.popgrowth = life_df.popgrowth * 100 life_df.popgrowth.mean() """ Explanation: Back to ipython Let's change one of the variables in the dataframe in python: End of explanation """ %%stata -d life_df summarize """ Explanation: And now let's push the modified dataframe into the Stata dataset with the -d flag: End of explanation """ %%stata -d life_df --graph graph twoway (scatter lexp lngnppc) (lfit lexp lngnppc) """ Explanation: A Stata plot: End of explanation """ sns.set_style("whitegrid") g=sns.lmplot(y='lexp', x='lngnppc', col='region', data=life_df,col_wrap=2) """ Explanation: Now on the python side use lmplot from the seaborn library to graph a similar scatter and fitted line but by region. End of explanation """
carian2996/big_data
capstone_project/clustering/scripts/Week 3 pySpark MLlib Clustering.ipynb
gpl-2.0
import pandas as pd from pyspark.mllib.clustering import KMeans, KMeansModel from numpy import array """ Explanation: <br><br><br><br><br><h1 style="font-size:4em;color:#2467C0">Welcome to Week 3</h1><br><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">This document provides a running example of completing the Week 3 assignment : </p> <ul class="simple"> <li style="line-height:31px;">A shorter version with fewer comments is available as script: sparkMLlibClustering.py</li> <li style="line-height:31px;">To run these commands in Cloudera VM: first run the setup script: setupWeek3.sh</li> <li style="line-height:31px;">You can then copy paste these commands in pySpark. </li> <li style="line-height:31px;">To open pySpark, refer to : <a class="reference external" href="https://www.coursera.org/learn/machinelearningwithbigdata/supplement/GTFQ0/slides-module-2-lesson-3">Week 2</a> and <a class="reference external" href="https://www.coursera.org/learn/machinelearningwithbigdata/supplement/RH1zz/download-lesson-2-slides-spark-mllib-clustering">Week 4</a> of the Machine Learning course</li> <li style="line-height:31px;">Note that your dataset may be different from what is used here, so your results may not match with those shown here</li> </ul></div> End of explanation """ adclicksDF = pd.read_csv('./ad-clicks.csv') adclicksDF = adclicksDF.rename(columns=lambda x: x.strip()) #remove whitespaces from headers """ Explanation: <br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <h1 style="font-family: Arial; font-size:1.5em;color:#2462C0">Step 1: Attribute Selection</h1> <br><br><h1 style="font-family: Arial; font-size:1.5em;color:#2462C0">Import Data</h1><br><br> <p style="line-height:31px;">First let us read the contents of the file ad-clicks.csv. The following commands read in the CSV file in a table format and removes any extra whitespaces. So, if the CSV contained ' userid ' it becomes 'userid'. <br><br> Note that you must change the path to ad-clicks.csv to the location on your machine, if you want to run this command on your machine. </p> </div> <br><br><br><br> End of explanation """ adclicksDF.head(n=5) """ Explanation: <br><br><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">Let us display the first 5 lines of adclicksDF:</p> </div> <br><br><br><br> End of explanation """ adclicksDF['adCount'] = 1 """ Explanation: <br><br><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">Next, We are going to add an extra column to the ad-clicks table and make it equal to 1. We do so to record the fact that each ROW is 1 ad-click. You will see how this will become useful when we sum up this column to find how many ads did a user click.</p> </div> <br><br><br><br> End of explanation """ adclicksDF.head(n=5) """ Explanation: <br><br><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">Let us display the first 5 lines of adclicksDF and see if a new column has been added:</p> </div> <br><br><br><br> End of explanation """ buyclicksDF = pd.read_csv('./buy-clicks.csv') buyclicksDF = buyclicksDF.rename(columns=lambda x: x.strip()) #removes whitespaces from headers """ Explanation: <br><br><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">Next, let us read the contents of the file buy-clicks.csv. As before, the following commands read in the CSV file in a table format and removes any extra whitespaces. So, if the CSV contained ' userid ' it becomes 'userid'. <br><br> Note that you must change the path to buy-clicks.csv to the location on your machine, if you want to run this command on your machine. </p> </div> <br><br><br><br> End of explanation """ buyclicksDF.head(n=5) """ Explanation: <br><br><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">Let us display the first 5 lines of buyclicksDF:</p> </div> <br><br><br><br> End of explanation """ userPurchases = buyclicksDF[['userId','price']] #select only userid and price userPurchases.head(n=5) """ Explanation: <br><br> <br><br><h1 style="font-family: Arial; font-size:1.5em;color:#2462C0">Feature Selection</h1><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">For this exercise, we can choose from buyclicksDF, the 'price' of each app that a user purchases as an attribute that captures user's purchasing behavior. The following command selects 'userid' and 'price' and drops all other columns that we do not want to use at this stage.</p> </div> <br><br><br><br> End of explanation """ useradClicks = adclicksDF[['userId','adCount']] useradClicks.head(n=5) #as we saw before, this line displays first five lines """ Explanation: <br><br><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">Similarly, from the adclicksDF, we will use the 'adCount' as an attribute that captures user's inclination to click on ads. The following command selects 'userid' and 'adCount' and drops all other columns that we do not want to use at this stage.</p> </div> <br><br><br><br> End of explanation """ adsPerUser = useradClicks.groupby('userId').sum() adsPerUser = adsPerUser.reset_index() adsPerUser.columns = ['userId', 'totalAdClicks'] #rename the columns """ Explanation: <br><br> <h1 style="font-family: Arial; font-size:1.5em;color:#2462C0; font-style:bold">Step 2: Training Data Set Creation</h1> <br><br><h1 style="font-family: Arial; font-size:1.5em;color:#2462C0">Create the first aggregate feature for clustering</h1><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">From each of these single ad-clicks per row, we can now generate total ad clicks per user. Let's pick a user with userid = 3. To find out how many ads this user has clicked overall, we have to find each row that contains userid = 3, and report the total number of such rows. The following commands sum the total number of ads per user and rename the columns to be called 'userid' and 'totalAdClicks'. <b> Note that you may not need to aggregate (e.g. sum over many rows) if you choose a different feature and your data set already provides the necessary information. </b> In the end, we want to get one row per user, if we are performing clustering over users. </div> <br><br><br><br> End of explanation """ adsPerUser.head(n=5) """ Explanation: <br><br><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">Let us display the first 5 lines of 'adsPerUser' to see if there is a column named 'totalAdClicks' containing total adclicks per user.</p> </div> <br><br><br><br> End of explanation """ revenuePerUser = userPurchases.groupby('userId').sum() revenuePerUser = revenuePerUser.reset_index() revenuePerUser.columns = ['userId', 'revenue'] #rename the columns revenuePerUser.head(n=5) """ Explanation: <br><br> <br><br><h1 style="font-family: Arial; font-size:1.5em;color:#2462C0">Create the second aggregate feature for clustering</h1><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">Similar to what we did for adclicks, here we find out how much money in total did each user spend on buying in-app purchases. As an example, let's pick a user with userid = 9. To find out the total money spent by this user, we have to find each row that contains userid = 9, and report the sum of the column'price' of each product they purchased. The following commands sum the total money spent by each user and rename the columns to be called 'userid' and 'revenue'. <br><br> <p style="line-height:31px;"> <b> Note: </b> that you can also use other aggregates, such as sum of money spent on a specific ad category by a user or on a set of ad categories by each user, game clicks per hour by each user etc. You are free to use any mathematical operations on the fields provided in the CSV files when creating features. </p> </div> <br><br><br><br> End of explanation """ combinedDF = adsPerUser.merge(revenuePerUser, on='userId') #userId, adCount, price """ Explanation: <br><br> <br><br><h1 style="font-family: Arial; font-size:1.5em;color:#2462C0">Merge the two tables</h1><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">Lets see what we have so far. We have a table called revenuePerUser, where each row contains total money a user (with that 'userid') has spent. We also have another table called adsPerUser where each row contains total number of ads a user has clicked. We will use revenuePerUser and adsPerUser as features / attributes to capture our users' behavior.<br><br> Let us combine these two attributes (features) so that each row contains both attributes per user. Let's merge these two tables to get one single table we can use for K-Means clustering. </div> <br><br><br><br> End of explanation """ combinedDF.head(n=5) #display how the merged table looks """ Explanation: <br><br><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">Let us display the first 5 lines of the merged table. <b> Note: Depending on what attributes you choose, you may not need to merge tables. You may get all your attributes from a single table. </b></p> </div> <br><br><br><br> End of explanation """ trainingDF = combinedDF[['totalAdClicks','revenue']] trainingDF.head(n=5) """ Explanation: <br><br> <br><br><h1 style="font-family: Arial; font-size:1.5em;color:#2462C0">Create the final training dataset</h1><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">Our training data set is almost ready. At this stage we can remove the 'userid' from each row, since 'userid' is a computer generated random number assigned to each user. It does not capture any behavioral aspect of a user. One way to drop the 'userid', is to select the other two columns. </p> </div> <br><br><br><br> End of explanation """ trainingDF.shape """ Explanation: <br><br> <br><br><h1 style="font-family: Arial; font-size:1.5em;color:#2462C0">Display the dimensions of the training dataset</h1><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">Display the dimension of the training data set. To display the dimensions of the trainingDF, simply add .shape as a suffix and hit enter.</p> </div> <br><br><br><br> End of explanation """ sqlContext = SQLContext(sc) pDF = sqlContext.createDataFrame(trainingDF) parsedData = pDF.rdd.map(lambda line: array([line[0], line[1]])) #totalAdClicks, revenue """ Explanation: <br><br><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">The following two commands convert the tables we created into a format that can be understood by the KMeans.train function. <br><br> line[0] refers to the first column. line[1] refers to the second column. If you have more than 2 columns in your training table, modify this command by adding line[2], line[3], line[4] ...</p> </div> <br><br><br><br> End of explanation """ my_kmmodel = KMeans.train(parsedData, 2, maxIterations=10, runs=10, initializationMode="random") """ Explanation: <br> <h1 style="font-family: Arial; font-size:1.5em;color:#2462C0">Step 3: Train to Create Cluster Centers</h1> <br><br><h1 style="font-family: Arial; font-size:1.5em;color:#2462C0">Train KMeans model</h1><br><br> <br><br><br><br> <div style="color:black;font-family: Arial; font-size:1.1em;line-height:65%"> <p style="line-height:31px;">Here we are creating two clusters as denoted in the second argument.</p> </div> <br><br><br><br> End of explanation """ print(my_kmmodel.centers) """ Explanation: <br><br><h1 style="font-family: Arial; font-size:1.5em;color:#2462C0">Display the centers of two clusters formed</h1><br><br> End of explanation """
statsmodels/statsmodels.github.io
v0.12.1/examples/notebooks/generated/robust_models_1.ipynb
bsd-3-clause
%matplotlib inline from statsmodels.compat import lmap import numpy as np from scipy import stats import matplotlib.pyplot as plt import statsmodels.api as sm """ Explanation: M-Estimators for Robust Linear Modeling End of explanation """ norms = sm.robust.norms def plot_weights(support, weights_func, xlabels, xticks): fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.plot(support, weights_func(support)) ax.set_xticks(xticks) ax.set_xticklabels(xlabels, fontsize=16) ax.set_ylim(-.1, 1.1) return ax """ Explanation: An M-estimator minimizes the function $$Q(e_i, \rho) = \sum_i~\rho \left (\frac{e_i}{s}\right )$$ where $\rho$ is a symmetric function of the residuals The effect of $\rho$ is to reduce the influence of outliers $s$ is an estimate of scale. The robust estimates $\hat{\beta}$ are computed by the iteratively re-weighted least squares algorithm We have several choices available for the weighting functions to be used End of explanation """ help(norms.AndrewWave.weights) a = 1.339 support = np.linspace(-np.pi*a, np.pi*a, 100) andrew = norms.AndrewWave(a=a) plot_weights(support, andrew.weights, ['$-\pi*a$', '0', '$\pi*a$'], [-np.pi*a, 0, np.pi*a]); """ Explanation: Andrew's Wave End of explanation """ help(norms.Hampel.weights) c = 8 support = np.linspace(-3*c, 3*c, 1000) hampel = norms.Hampel(a=2., b=4., c=c) plot_weights(support, hampel.weights, ['3*c', '0', '3*c'], [-3*c, 0, 3*c]); """ Explanation: Hampel's 17A End of explanation """ help(norms.HuberT.weights) t = 1.345 support = np.linspace(-3*t, 3*t, 1000) huber = norms.HuberT(t=t) plot_weights(support, huber.weights, ['-3*t', '0', '3*t'], [-3*t, 0, 3*t]); """ Explanation: Huber's t End of explanation """ help(norms.LeastSquares.weights) support = np.linspace(-3, 3, 1000) lst_sq = norms.LeastSquares() plot_weights(support, lst_sq.weights, ['-3', '0', '3'], [-3, 0, 3]); """ Explanation: Least Squares End of explanation """ help(norms.RamsayE.weights) a = .3 support = np.linspace(-3*a, 3*a, 1000) ramsay = norms.RamsayE(a=a) plot_weights(support, ramsay.weights, ['-3*a', '0', '3*a'], [-3*a, 0, 3*a]); """ Explanation: Ramsay's Ea End of explanation """ help(norms.TrimmedMean.weights) c = 2 support = np.linspace(-3*c, 3*c, 1000) trimmed = norms.TrimmedMean(c=c) plot_weights(support, trimmed.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]); """ Explanation: Trimmed Mean End of explanation """ help(norms.TukeyBiweight.weights) c = 4.685 support = np.linspace(-3*c, 3*c, 1000) tukey = norms.TukeyBiweight(c=c) plot_weights(support, tukey.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]); """ Explanation: Tukey's Biweight End of explanation """ x = np.array([1, 2, 3, 4, 500]) """ Explanation: Scale Estimators Robust estimates of the location End of explanation """ x.mean() """ Explanation: The mean is not a robust estimator of location End of explanation """ np.median(x) """ Explanation: The median, on the other hand, is a robust estimator with a breakdown point of 50% End of explanation """ x.std() """ Explanation: Analogously for the scale The standard deviation is not robust End of explanation """ stats.norm.ppf(.75) print(x) sm.robust.scale.mad(x) np.array([1,2,3,4,5.]).std() """ Explanation: Median Absolute Deviation $$ median_i |X_i - median_j(X_j)|) $$ Standardized Median Absolute Deviation is a consistent estimator for $\hat{\sigma}$ $$\hat{\sigma}=K \cdot MAD$$ where $K$ depends on the distribution. For the normal distribution for example, $$K = \Phi^{-1}(.75)$$ End of explanation """ sm.robust.scale.iqr(x) """ Explanation: Another robust estimator of scale is the Interquartile Range (IQR) $$\left(\hat{X}{0.75} - \hat{X}{0.25}\right),$$ where $\hat{X}_{p}$ is the sample p-th quantile and $K$ depends on the distribution. The standardized IQR, given by $K \cdot \text{IQR}$ for $$K = \frac{1}{\Phi^{-1}(.75) - \Phi^{-1}(.25)} \approx 0.74,$$ is a consistent estimator of the standard deviation for normal data. End of explanation """ np.random.seed(12345) fat_tails = stats.t(6).rvs(40) kde = sm.nonparametric.KDEUnivariate(fat_tails) kde.fit() fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.plot(kde.support, kde.density); print(fat_tails.mean(), fat_tails.std()) print(stats.norm.fit(fat_tails)) print(stats.t.fit(fat_tails, f0=6)) huber = sm.robust.scale.Huber() loc, scale = huber(fat_tails) print(loc, scale) sm.robust.mad(fat_tails) sm.robust.mad(fat_tails, c=stats.t(6).ppf(.75)) sm.robust.scale.mad(fat_tails) """ Explanation: The IQR is less robust than the MAD in the sense that it has a lower breakdown point: it can withstand 25\% outlying observations before being completely ruined, whereas the MAD can withstand 50\% outlying observations. However, the IQR is better suited for asymmetric distributions. See Rousseeuw & Croux (1993), 'Alternatives to the Median Absolute Deviation' for more details. The default for Robust Linear Models is MAD another popular choice is Huber's proposal 2 End of explanation """ from statsmodels.graphics.api import abline_plot from statsmodels.formula.api import ols, rlm prestige = sm.datasets.get_rdataset("Duncan", "carData", cache=True).data print(prestige.head(10)) fig = plt.figure(figsize=(12,12)) ax1 = fig.add_subplot(211, xlabel='Income', ylabel='Prestige') ax1.scatter(prestige.income, prestige.prestige) xy_outlier = prestige.loc['minister', ['income','prestige']] ax1.annotate('Minister', xy_outlier, xy_outlier+1, fontsize=16) ax2 = fig.add_subplot(212, xlabel='Education', ylabel='Prestige') ax2.scatter(prestige.education, prestige.prestige); ols_model = ols('prestige ~ income + education', prestige).fit() print(ols_model.summary()) infl = ols_model.get_influence() student = infl.summary_frame()['student_resid'] print(student) print(student.loc[np.abs(student) > 2]) print(infl.summary_frame().loc['minister']) sidak = ols_model.outlier_test('sidak') sidak.sort_values('unadj_p', inplace=True) print(sidak) fdr = ols_model.outlier_test('fdr_bh') fdr.sort_values('unadj_p', inplace=True) print(fdr) rlm_model = rlm('prestige ~ income + education', prestige).fit() print(rlm_model.summary()) print(rlm_model.weights) """ Explanation: Duncan's Occupational Prestige data - M-estimation for outliers End of explanation """ dta = sm.datasets.get_rdataset("starsCYG", "robustbase", cache=True).data from matplotlib.patches import Ellipse fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111, xlabel='log(Temp)', ylabel='log(Light)', title='Hertzsprung-Russell Diagram of Star Cluster CYG OB1') ax.scatter(*dta.values.T) # highlight outliers e = Ellipse((3.5, 6), .2, 1, alpha=.25, color='r') ax.add_patch(e); ax.annotate('Red giants', xy=(3.6, 6), xytext=(3.8, 6), arrowprops=dict(facecolor='black', shrink=0.05, width=2), horizontalalignment='left', verticalalignment='bottom', clip_on=True, # clip to the axes bounding box fontsize=16, ) # annotate these with their index for i,row in dta.loc[dta['log.Te'] < 3.8].iterrows(): ax.annotate(i, row, row + .01, fontsize=14) xlim, ylim = ax.get_xlim(), ax.get_ylim() from IPython.display import Image Image(filename='star_diagram.png') y = dta['log.light'] X = sm.add_constant(dta['log.Te'], prepend=True) ols_model = sm.OLS(y, X).fit() abline_plot(model_results=ols_model, ax=ax) rlm_mod = sm.RLM(y, X, sm.robust.norms.TrimmedMean(.5)).fit() abline_plot(model_results=rlm_mod, ax=ax, color='red') """ Explanation: Hertzprung Russell data for Star Cluster CYG 0B1 - Leverage Points Data is on the luminosity and temperature of 47 stars in the direction of Cygnus. End of explanation """ infl = ols_model.get_influence() h_bar = 2*(ols_model.df_model + 1 )/ols_model.nobs hat_diag = infl.summary_frame()['hat_diag'] hat_diag.loc[hat_diag > h_bar] sidak2 = ols_model.outlier_test('sidak') sidak2.sort_values('unadj_p', inplace=True) print(sidak2) fdr2 = ols_model.outlier_test('fdr_bh') fdr2.sort_values('unadj_p', inplace=True) print(fdr2) """ Explanation: Why? Because M-estimators are not robust to leverage points. End of explanation """ l = ax.lines[-1] l.remove() del l weights = np.ones(len(X)) weights[X[X['log.Te'] < 3.8].index.values - 1] = 0 wls_model = sm.WLS(y, X, weights=weights).fit() abline_plot(model_results=wls_model, ax=ax, color='green') """ Explanation: Let's delete that line End of explanation """ yy = y.values[:,None] xx = X['log.Te'].values[:,None] """ Explanation: MM estimators are good for this type of problem, unfortunately, we do not yet have these yet. It's being worked on, but it gives a good excuse to look at the R cell magics in the notebook. End of explanation """ params = [-4.969387980288108, 2.2531613477892365] # Computed using R print(params[0], params[1]) abline_plot(intercept=params[0], slope=params[1], ax=ax, color='red') """ Explanation: Note: The R code and the results in this notebook has been converted to markdown so that R is not required to build the documents. The R results in the notebook were computed using R 3.5.1 and robustbase 0.93. ```ipython %load_ext rpy2.ipython %R library(robustbase) %Rpush yy xx %R mod <- lmrob(yy ~ xx); %R params <- mod$coefficients; %Rpull params ``` ipython %R print(mod) Call: lmrob(formula = yy ~ xx) \--&gt; method = "MM" Coefficients: (Intercept) xx -4.969 2.253 End of explanation """ np.random.seed(12345) nobs = 200 beta_true = np.array([3, 1, 2.5, 3, -4]) X = np.random.uniform(-20,20, size=(nobs, len(beta_true)-1)) # stack a constant in front X = sm.add_constant(X, prepend=True) # np.c_[np.ones(nobs), X] mc_iter = 500 contaminate = .25 # percentage of response variables to contaminate all_betas = [] for i in range(mc_iter): y = np.dot(X, beta_true) + np.random.normal(size=200) random_idx = np.random.randint(0, nobs, size=int(contaminate * nobs)) y[random_idx] = np.random.uniform(-750, 750) beta_hat = sm.RLM(y, X).fit().params all_betas.append(beta_hat) all_betas = np.asarray(all_betas) se_loss = lambda x : np.linalg.norm(x, ord=2)**2 se_beta = lmap(se_loss, all_betas - beta_true) """ Explanation: Exercise: Breakdown points of M-estimator End of explanation """ np.array(se_beta).mean() all_betas.mean(0) beta_true se_loss(all_betas.mean(0) - beta_true) """ Explanation: Squared error loss End of explanation """
bjshaw/phys202-project
galaxy_project/Ib) Base Question Visualization.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import numpy as np from scipy.integrate import odeint from IPython.html.widgets import interact, interactive, fixed from IPython.display import YouTubeVideo from plotting_function import plotter,static_plot,com_plot,static_plot_com """ Explanation: Base Question Visualization End of explanation """ f = open('base_question_data.npz','r') r = np.load('base_question_data.npz') sol_base = r['arr_0'] ic_base = r['arr_1'] f.close() """ Explanation: Reading data back from npz file End of explanation """ interact(plotter,ic=fixed(ic_base),sol=fixed(sol_base),n=(0,len(np.linspace(0,1.2,100))-1,1)); """ Explanation: I use interact on my plotter function to plot the positions of the stars and galaxies in my system at every time value, with a slider to choose which time value to view End of explanation """ YouTubeVideo('C1RoQTVU-ao',width=600,height=600) """ Explanation: As can be seen, the stars behave similarly to the stars from Toomre and Toomre's paper For an easier visual experience, I also created an animation, featured in the Animator notebook. The animation was uploaded to Youtube and is shown below: End of explanation """ specific_t = [0,25,30,35,40,45,50,55,60,70,80,90,100] plt.figure(figsize=(20,30)) i = 1 for n in specific_t: if i > 13: break else: plt.subplot(5,3,i) static_plot(ic_base,sol_base,n) i += 1 plt.tight_layout() """ Explanation: Static plots at certain times: End of explanation """ interact(com_plot,ic=fixed(ic_base),sol=fixed(sol_base),M=fixed(1e11),S=fixed(1e11),n=(0,len(np.linspace(0,1.2,100))-1,1)); """ Explanation: Interactive plot around center of mass between the two galaxies: End of explanation """ YouTubeVideo('O1_HkrwtvPw',width=600,height=600) """ Explanation: Animation around center of mass: End of explanation """ specific_t = [0,25,30,35,40,45,50,55,60,70,80,90,100] plt.figure(figsize=(20,30)) i = 1 for n in specific_t: if i > 13: break else: plt.subplot(5,3,i) static_plot_com(ic_base,sol_base,1e11,1e11,n) i += 1 plt.tight_layout() """ Explanation: Static plots around center of mass: End of explanation """
NREL/bifacial_radiance
docs/tutorials/8 - Advanced topics - Calculating Power Output and Electrical Mismatch.ipynb
bsd-3-clause
import bifacial_radiance import os from pathlib import Path testfolder = str(Path().resolve().parent.parent / 'bifacial_radiance' / 'TEMP'/ 'Tutorial_08') if not os.path.exists(testfolder): os.makedirs(testfolder) simulationName = 'tutorial_8' moduletype = "test-module" albedo = 0.25 lat = 37.5 lon = -77.6 # Scene variables nMods = 20 nRows = 7 hub_height = 1.5 # meters gcr = 0.33 # Traking parameters cumulativesky = False limit_angle = 60 angledelta = 0.01 backtrack = True #makeModule parameters x = 1 y = 2 xgap = 0.01 zgap = 0.05 ygap = 0.0 # numpanels=1 anyways so it doesnt matter anyway numpanels = 1 axisofrotationTorqueTube = True diameter = 0.1 tubetype = 'Oct' material = 'black' tubeParams = {'diameter':diameter, 'tubetype':tubetype, 'material':material, 'axisofrotation':axisofrotationTorqueTube, 'visible':True} # Analysis parmaeters startdate = '11_06_08' # Options: mm_dd, mm_dd_HH, mm_dd_HHMM, YYYY-mm-dd_HHMM enddate = '11_06_10' sensorsy = 12 demo = bifacial_radiance.RadianceObj(simulationName, path=testfolder) demo.setGround(albedo) epwfile = demo.getEPW(lat,lon) metdata = demo.readWeatherFile(epwfile, starttime=startdate, endtime=enddate) mymodule = demo.makeModule(name=moduletype, x=x, y=y, xgap=xgap, ygap = ygap, zgap=zgap, numpanels=numpanels, tubeParams=tubeParams) pitch = mymodule.sceney/gcr sceneDict = {'pitch':pitch,'hub_height':hub_height, 'nMods': nMods, 'nRows': nRows} demo.set1axis(limit_angle = limit_angle, backtrack = backtrack, gcr = gcr, cumulativesky = cumulativesky) demo.gendaylit1axis() demo.makeScene1axis(module=mymodule, sceneDict=sceneDict) demo.makeOct1axis() demo.analysis1axis(sensorsy = sensorsy); """ Explanation: 8 - Advanced topics - Calculating Power Output and Electrical Mismatch Nonuniform rear-irradiance on bifacial PV systems can cause additional mismatch loss, which may not be appropriately captured in PV energy production estimates and software. <img src="../images_wiki/AdvancedJournals/Mismatch_Definition_Example.PNG" width="600"> The analysis.py module in bifacial_radiance comes with functions to calculate power output, electrical mismatch, and some other irradiance calculations. This is the procedure used for this proceedings and submitted journals, which have much more detail on the procedure. • Deline, C., Ayala Pelaez, S., MacAlpine, S., Olalla, C. Estimating and Parameterizing Mismatch Power Loss in Bifacial Photovoltaic Systems. (submitted Progress in PV on Sept. 30, 2019) • Deline C, Ayala Pelaez S, MacAlpine S, Olalla C. Bifacial PV System Mismatch Loss Estimation &amp; Parameterization. Presented in: 36th EU PVSEC, Marseille Fr. Slides: https://www.nrel.gov/docs/fy19osti/74885.pdf. Proceedings: https://www.nrel.gov/docs/fy20osti/73541.pdf • Ayala Pelaez S, Deline C, MacAlpine S, Olalla C. Bifacial PV system mismatch loss estimation. Poster presented at the 6th BifiPV Workshop, Amsterdam 2019. https://www.nrel.gov/docs/fy19osti/74831.pdf and http://bifipv-workshop.com/index.php?id=amsterdam-2019-program Ideally mismatch losses M should be calculated for the whole year, and then the mismatch loss factor to apply to Grear "Lrear" required by due diligence softwares can be calculated: <img src="../images_wiki/AdvancedJournals/Lrear_solving.PNG" width="400"> In this journal we will explore calculating mismatch loss M for a reduced set of hours. A procedure similar to that in Tutorial 3 will be used to generate various hourly irradiance measurements in the results folder, which the mismatch.py module will load and analyze. Analysis is done with PVMismatch, so this must be installed. STEPS: 1. Run an hourly simulation 2. Do mismatch analysis on the results. <a id='step1'></a> 1. Run an hourly simulation This will generate the results over which we will perform the mismatch analysis. Here we are doing only 1 day to make this faster. End of explanation """ resultfolder = os.path.join(testfolder, 'results') writefiletitle = "Mismatch_Results.csv" portraitorlandscape='portrait' # Options are 'portrait' or 'landscape' bififactor=0.9 # Bifaciality factor DOES matter now, as the rear irradiance values will be multiplied by this factor. numcells= 72# Options are 72 or 96 at the moment. downsamplingmethod = 'byCenter' # Options are 'byCenter' or 'byAverage'. bifacial_radiance.mismatch.analysisIrradianceandPowerMismatch(testfolder=resultfolder, writefiletitle=writefiletitle, portraitorlandscape=portraitorlandscape, bififactor=bififactor, numcells=numcells) print ("Your hourly mismatch values are now saved in the file above! :D") """ Explanation: <a id='step2'></a> 2. Do mismatch analysis on the results There are various things that we need to know about the module at this stage. <ul> <li> Orientation: If it was simulated in portrait or landscape orientation. </li> <li> Number of cells in the module: options right now are 72 or 96 </li> <li> Bifaciality factor: this is how well the rear of the module performs compared to the front of the module, and is a spec usually found in the datasheet. </li> </ul> Also, if the number of sampling points (sensorsy) from the result files does not match the number of cells along the panel orientation, downsampling or upsamplinb will be peformed. For this example, the module is in portrait mode (y > x), so there will be 12 cells along the collector width (numcellsy), and that's why we set sensorsy = 12 during the analysis above. These are the re-sampling options. To downsample, we suggest sensorsy >> numcellsy (for example, we've tested sensorsy = 100,120 and 200) - Downsamping by Center - Find the center points of all the sensors passed - Downsampling by Average - averages irradiances that fall on what would consist on the cell - Upsample End of explanation """
rvuduc/cse6040-ipynbs
26--logreg-mle-numopt.ipynb
bsd-3-clause
import pandas as pd import seaborn as sns import numpy as np from IPython.display import display %matplotlib inline import plotly.plotly as py from plotly.graph_objs import * # @YOUSE: Fill in your credentials (user ID, API key) for Plotly here py.sign_in ('USERNAME', 'APIKEY') %reload_ext autoreload %autoreload 2 import cse6040utils """ Explanation: CSE 6040, Fall 2015 [26]: Logistic regression, Part 2 Maximum likelihood estimation and numerical optimization 101 In Lab 25, we looked at geometric solutions to the binary classification problem. In particular, the data is a set of labeled points in some vector space; our goal is to model how points are labeled by dividing (or partitioning) the space. Recall that you did a quick experiment in which you partitioned the space for a set of synthetic data points manually; in this lab, you will see a technique to determine this partitioning automatically. Some of today's code snippets use Plotly, so you may need to refer back to how to make plots in Plotly. The most important one is how to log-in to the service, for which you'll need to look up your Plotly API key. Anyway, you may need to refer to the references below. Our Jupyter notebook where we did stuff using plotly: ipynb Plotly Python reference on line and scatter plots: https://plot.ly/python/line-and-scatter/ Also, this lab builds on the iterative numerical optimization idea from Lab 24, which is known as gradient ascent or gradient descent (also, steepest ascent/descent), depending on whether one is maximizing or minimizing some quantity. Lastly, several of the routines we used last time to manipulate points and visualize them have been moved into the cse6040utils module; so please update that: cse6040utils.py Preliminaries End of explanation """ from cse6040utils import lin_discr """ Explanation: Notation Recall the basic notation used in Lab 25. Definition 1. There are $m$ labeled data points in a $d$-dimensional vector space. The $i$-th observation is represented by its label, $l_i$, and its augmented coordinates ("point"), $x_i \equiv (1.0, x_{i,1}, x_{i,2}, \ldots, x_{i,d})^T$; that is, the point $x_i$ is a column vector of length $d+1$ with a dummy value of 1.0 in the very first (0-th) entry. The matrix of data coordinates, $X$, stacks these points rowwise into a $m \times (d+1)$ matrix, i.e., $$ \begin{array}{rcl} X \equiv \left(\begin{array}{c} x_0^T \ x_1^T \ \vdots \ x_{m-1}^T \end{array}\right) & = & \left(\begin{array}{ccccc} 1 & x_{0,1} & x_{0,2} & \cdots & x_{0,d} \ 1 & x_{1,1} & x_{1,2} & \cdots & x_{1,d} \ & & & \vdots & \ 1 & x_{m-1,1} & x_{m-1,2} & \cdots & x_{m-1,d} \ \end{array}\right). \end{array} $$ We will take the labels to be a binary column vector, $l \equiv \left(l_0, l_1, \ldots, l_{m-1}\right)^T$. Definition 2. A linear discriminant is a function that assigns a "score" to a given point $x$ by linearly transforming its coordinates. It may be specified by a $d+1$-dimensional column vector of coefficients, $\theta \equiv (\theta_0, \theta_1, \ldots, \theta_d)^T$; the score is then taken to be $\theta^T x$. In the cse6040utils module, use lin_discr (X, theta) to evaluate a linear discriminant given by theta on an augmented coordinate matrix X. End of explanation """ from cse6040utils import heaviside """ Explanation: Definition 3. The heaviside function maps strictly positive values to the value 1 and non-positive values to 0: $$ \begin{array}{rcl} H(y) & \equiv & \left{\begin{array}{ll} 1 & \mathrm{if}\ y > 0 \ 0 & \mathrm{if}\ y \leq 0 \end{array}\right.. \end{array} $$ In the cse6040utils module, use heaviside (Y) to apply $H(y)$ elementwise to any Numpy multidimensional array object. End of explanation """ from cse6040utils import logistic """ Explanation: Definition 4. The logistic function maps a real number continuously into the interval (0, 1): $$ \begin{array}{rcl} G(y) & \equiv & \frac{1}{1 + e^{-y}}. \end{array} $$ Recall that $G(y) \rightarrow 0$ as $y \rightarrow -\infty$, and $G(y) \rightarrow 1$ as $y \rightarrow +\infty$. In the cse6040utils module, use logistic (Y) to apply $G(y)$ elementwise to any Numpy multidimensional array object. End of explanation """ df = pd.read_csv ('http://vuduc.org/cse6040/logreg_points_train.csv') points = np.insert (df.as_matrix (['x_1', 'x_2']), 0, 1.0, axis=1) labels = df.as_matrix (['label']) from cse6040utils import make_2d_scatter_traces print "Number of points:", len (points) traces = make_2d_scatter_traces (points, labels) py.iplot (traces) """ Explanation: Properties of the logistic function. The following is a handy list of properties of $G(y)$. $$ \begin{array}{rcll} G(y) & = & \frac{e^y}{e^y + 1} & \mathrm{(P1)} \ G(-y) & = & 1 - G(y) & \mathrm{(P2)} \ \dfrac{G(y)}{G(-y)} = \dfrac{G(y)}{1 - G(y)} & = & e^y & \mathrm{(P3)} \ \dfrac{dG}{dy} & = & G(y) G(-y) & \mathrm{(P4)} \ {\dfrac{d}{dy}} {\left[ \ln G(y) \right]} & = & G(-y) & \mathrm{(P5)} \ {\dfrac{d}{dy}} {\ln \left[ 1 - G(y) \right]} & = & -G(y) & \mathrm{(P6)} \ \end{array} $$ The sample data We'll use the same synthetic data set of labeled points as in Lab 25. The following code cells loads them in, creating a matrix points corresponding to $X$ and a column vector labels corresponding to $l$. End of explanation """ from cse6040utils import check_labels from cse6040utils import np_col_vec from cse6040utils import gen_lin_discr_trace #theta = np_col_vec ([0., -1., 3.]) #theta = np_col_vec ([-0.55, -2., -0.5]) theta = np_col_vec ([-1.35, -6.5, -1.]) # Generate 0/1 labels for your discriminant: is_correct = check_labels (points, labels, fun=lambda X: heaviside (lin_discr (X, theta))) print "Number of misclassified points:", (len (points) - sum (is_correct))[0] print "\n(Run the code cell below to visualize the results.)" # Visually inspect the above results traces = make_2d_scatter_traces (points, is_correct) traces.append (gen_lin_discr_trace (points, theta)) # Plot it! layout = Layout (xaxis=dict (range=[-1.25, 2.25]), yaxis=dict (range=[-3.25, 2.25])) fig = Figure (data=traces, layout=layout) py.iplot (fig) """ Explanation: Demo: Manual classification Recall that in the last class you generated label predictions using $H(\theta^T x)$, that is, by using the heaviside function with a linear discriminant. You determined the discriminant coefficients, $\theta$, manually. End of explanation """ # Use Numpy's handy meshgrid() to create a regularly-spaced grid of values. # http://docs.scipy.org/doc/numpy/reference/generated/numpy.meshgrid.html x1 = np.linspace (-2., +2., 100) x2 = np.linspace (-2., +2., 100) x1_grid, x2_grid = np.meshgrid (x1, x2) h_grid = heaviside (theta[0] + theta[1]*x1_grid + theta[2]*x2_grid) trace_grid = Contour (x=x1, y=x2, z=h_grid) py.iplot ([trace_grid]) x_logit_1d = np.linspace (-6.0, +6.0, 101) y_logit_1d = logistic (x_logit_1d) trace_logit_1d = Scatter (x=x_logit_1d, y=y_logit_1d) py.iplot ([trace_logit_1d]) g_grid = logistic (theta[0] + theta[1]*x1_grid + theta[2]*x2_grid) trace_logit_grid = Contour (x=x1, y=x2, z=g_grid) py.iplot ([trace_logit_grid]) """ Explanation: Heaviside vs. logistic functions Where we ended in the last class was comparing the use of the heaviside function against use of the logistic function, both assuming a linear discriminant scoring function. Here is that comparison visually. End of explanation """ def log_likelihood (theta, l, X): # @YOUSE: Complete this function to evaluate the log-likelihood pass """ Explanation: Determining $\theta$ via Maximum Likelihood Estimation Previously, you determined $\theta$ for our synthetic dataset experimentally. Can you compute a good $\theta$ automatically? One of the standard techniques in statistics is to perform a maximum likelihood estimation (MLE) of a model's parameters, $\theta$. Indeed, MLE is basis for the "statistical" way to derive the normal equations in the case of linear regression, though that is of course not how we encountered it in this class. "Likelihood" as an objective function MLE derives from the following idea. Consider the joint probability of observing all of the labels, given the points and the parameters, $\theta$: $$ \mathrm{Pr}[l\,|\,X, \theta]. $$ Suppose these observations are independent and identically distributed (i.i.d.). Then the joint probability can be factored as the product of individual probabilities, $$ \begin{array}{rcl} \mathrm{Pr}[l\,|\,X,\theta] = \mathrm{Pr}[l_0, \ldots, l_{m-1}\,|\,x_0, \ldots, x_{m-1}, \theta] & = & \mathrm{Pr}[l_0\,|\,x_0, \theta] \cdots \mathrm{Pr}[l_{m-1}\,|\,x_{m-1}, \theta] \ & = & \displaystyle \prod_{i=0}^{m-1} \mathrm{Pr}[l_i\,|\,x_i,\theta]. \end{array} $$ The maximum likelihood principle says that you should try to choose a parameter $\theta$ that maximizes the chances ("likelihood") of seeing these particular observations. Thus, we can simply reinterpret the preceding probability as an objective function to optimize. Mathematically, it is equivalent and convenient to consider the logarithm of the likelihood, or log-likelihood, as the objective function, defining it by, $$ \begin{array}{rcl} \mathcal{L}(\theta; l, X) & \equiv & \log \left{ \displaystyle \prod_{i=0}^{m-1} \mathrm{Pr}[l_i\,|\,x_i,\theta] \right} \ & = & \displaystyle \sum_{i=0}^{m-1} \log \mathrm{Pr}[l_i\,|\,x_i,\theta]. \end{array} $$ We are using the symbol $\log$, which could be taken in any convenient base, such as the natural logarithm ($\ln y$) or the information theoretic base-two logarithm ($\log_2 y$). The MLE procedure then consists of two steps: For the problem at hand, determine a suitable choice for $\mathrm{Pr}[l_i\,|\,x_i,\theta]$. Run any optimization procedure to find the $\theta$ that maximizes $\mathcal{L}(\theta; l, X)$. Example: Logistic regression Let's say you have decided that the logistic function, $G(\theta^T x_i) = G(x_i^T \theta)$, is a good model of the probability of producing a label $l_i$ given the point $x_i$. Under the i.i.d. assumption, we can interpret the label $l_i$ as being the result of a Bernoulli trial (e.g., a biased coin flip), where the probability of success ($l_i=1$) is defined as $g_i = g_i(\theta) \equiv G(x_i^T \theta)$. Thus, $$ \begin{array}{rcl} \mathrm{Pr}[l_i \, | \, x_i, \theta] & \equiv & g_i^{l_i} \cdot \left(1 - g_i\right)^{1 - l_i}. \end{array} $$ The log-likelihood in turn becomes, $$ \begin{array}{rcl} \mathcal{L}(\theta; l, X) & = & \displaystyle \sum_{i=0}^{m-1} l_i \ln g_i + (1-l_i) \ln (1-g_i) \ & = & \displaystyle \sum_{i=0}^{m-1} l_i \ln \dfrac{g_i}{1-g_i} + \ln (1-g_i) \ & = & \sum_{i=0}^{m-1} l_i \theta^T x_i + \ln (1-g_i). \end{array} $$ You can write the log-likelihood more compactly in a matrix-vector notation, with the following notational conventions. Convention 1. Let $u \equiv (1, \ldots, 1)^T$ be a column vector of all ones, with its length inferred from context. Then, the sum of the coordinate vectors (${x_i}$) could be written as $$\sum_{i=0}^{m-1} x_i = \left(x_0\ x_1\ \cdots\ x_{m-1}\right) \cdot \left(\begin{array}{c} 1 \ 1 \ \vdots \ 1 \end{array}\right) = X^T u. $$ Convention 2. Let $A = \left(a_{ij}\right)$ be any matrix and let $f(y)$ be any function that we have defined by default to accept a scalar argument $y$ and produce a scalar result. For instance, $f(y) = \ln y$ or $f(y) = G(y)$. Then, assume that $B = f(A)$ applies $f(\cdot)$ elementwise to $A$, returning a matrix $B$ whose elements $b_{ij} = f(a_{ij})$. With these notational conventions, here are two different ways to write the log-likelihood. Exercise. Show the following. $$ \begin{array}{rrcl} (\mathrm{V1}) & \mathcal{L}(\theta; l, X) & = & l^T \ln G(X \theta) + (1-l)^T \ln [1 - G(X \theta)] \ (\mathrm{V2}) & \mathcal{L}(\theta; l, X) & = & l^T X \theta + u^T \ln G(-X \theta) \end{array} $$ Exercise. Implement the log-likelihood function in Python. End of explanation """ def gradient_log_likelihood (theta, l, X): """Returns the gradient of the log-likelihood.""" # @YOUSE: Implement the gradient for the logistic regression # model's log-likelihood pass MAX_STEP = 500 ALPHA = 0.5 # Get the data coordinate matrix, X, and labels vector, l X = points l = labels.astype (dtype=float) # Store *all* guesses, for subsequent analysis thetas = np.zeros ((3, MAX_STEP+1)) for t in range (MAX_STEP): theta_t = thetas[:, t:t+1] # @YOUSE: Fill in the code to compute thetas[:, t+1:t+2] pass thetas[:, t+1:t+2] = theta_t + ALPHA*delta_t theta_gd = thetas[:, MAX_STEP:] print "Your (hand) solution:", theta.T.flatten () print "Computed solution:", theta_gd print "\n=== Comparisons ===" print "\n\\theta_0/\\theta_2:", \ "manual =", theta[0]/theta[2], \ ", vs. MLE (via gradient ascent) =", theta_gd[0]/theta_gd[2] print "\n\\theta_1/\\theta_2:", \ "manual =", theta[1]/theta[2], \ ", vs. MLE (via gradient ascent) =", theta_gd[1]/theta_gd[2] # Generate 0/1 labels for computed discriminant using the logistic function def gen_label_logreg (X, theta): return heaviside (logistic (lin_discr (X, theta)) - 0.5) def check_correct_logreg (l, X, theta): return check_labels (X, l, fun=lambda X: gen_label_logreg (X, theta)) def count_correct_logreg (l, X, thetas): num_steps = thetas.shape[1] num_correct = np.zeros (num_steps, dtype=int) for t in range (num_steps): theta_t = thetas[:, t:t+1] is_correct = check_correct_logreg (l, X, theta_t) num_correct[t] = sum (is_correct)[0] return num_correct num_correct_gd = count_correct_logreg (l, X, thetas) is_correct_gd = check_correct_logreg (l, X, thetas[:, -1:]) print "Number of misclassified points using MLE via gradient ascent:", \ (len (points) - num_correct_gd[-1]) print "\n(Run the code cell below to visualize the results.)" # Visually inspect the above results traces_gd = make_2d_scatter_traces (points, is_correct_gd) traces_gd.append (gen_lin_discr_trace (points, theta_gd)) # Plot it! layout_gd = Layout (xaxis=dict (range=[-1.25, 2.25]), yaxis=dict (range=[-3.25, 2.25])) fig_gd = Figure (data=traces_gd, layout=layout_gd) py.iplot (fig_gd) """ Explanation: To optimize the log-likelihood with respect to the parameters, $\theta$, you'd like to do the moral equivalent of taking its derivative, setting it to zero, and then solving for $\theta$. For example, recall that in the case of linear regression via least squares minimization, carrying out this process produced an analytic solution for the parameters, which was to solve the normal equations. Unfortunately, for logistic regression---or for most log-likelihoods you are likely to ever write down---you cannot usually derive an analytic solution. Therefore, you will need to resort to numerical optimization procedures. Numerical optimization via gradient (or steepest) ascent/descent The simplest procedure to maximize a function is gradient ascent (or steepest ascent). If instead you are minimizing the function, then the equivalent procedure is gradient (or steepest) descent. The basic idea, in 1-D Suppose we wish to find the maximum of a scalar function $f(x)$ in one dimension, i.e., $x$ is also a scalar. At the maximum, $\dfrac{df(x)}{dx} = 0$. Suppose instead that $\dfrac{df}{dx} \neq 0$ and consider the value of $f$ at a nearby point, $x + p$, as given approximately by a truncated Taylor series: $$ \begin{array}{rcl} f(x + p) & \approx & f(x) + p \dfrac{df(x)}{dx} + \frac{1}{2} p^2 \dfrac{d^2 f(x)}{dx^2}. \end{array} $$ To make progress toward maximizing $f(x)$, you'd like to choose $p$ so that $f(x+p) > f(x)$. One way is to choose $p=\alpha \cdot \mathrm{sign} \left(\dfrac{df}{dx}\right)$, where $0 < \alpha \ll 1$ is "small:" $$ \begin{array}{rcl} f \left(x + \alpha \cdot \mathrm{sign} \dfrac{df}{dx} \right) & \approx & f(x) - \alpha \left|\dfrac{df}{dx}\right| + \frac{1}{2} \alpha^2 \cdot \mathrm{sign} \left(\dfrac{df}{dx}\right) \dfrac{d^2 f}{dx^2}. \end{array} $$ If $\alpha$ is small enough, then you can neglect the $\mathcal{O}(\alpha^2)$ term and $f(x + p)$ will be larger than $f(x)$, thus making progress toward a maximum. This scheme is the basic idea: starting from some initial guess $x$, refine the guess by taking a small step $p$ in the direction of the derivative, i.e., $\mathrm{sign} \dfrac{df}{dx}$. The basic idea in higher dimensions Now consider $f(x)$ where $x$ is instead a vector, rather than a scalar. Then the value of $f$ at a nearby point $f(x + p)$, where $p$ is a vector, becomes $$ \begin{array}{rcl} f(x + p) \approx f(x) + p^T \nabla_x f(x), \end{array} $$ where $\nabla_x f(x)$ is the vector derivative, or gradient, of $f$. Just as in the 1-D case, you want a step $p$ such that $f(x + p) > f(x)$. To make as much progress as possible, it would seem reasonable to choose $p$ to be parallel to $\nabla_x\,f(x)$, that is, proportional to the gradient. This intuition motivates the following choice of $p$: $$ \begin{array}{rcl} p \equiv \alpha \frac{\nabla_x\,f(x)}{\|\nabla_x\,f(x)\|}. \end{array} $$ Again, $\alpha$ is a kind of fudge factor. You need to choose it to be small enough that the high-order terms of the Taylor approximation become negligible, yet large enough that you can make reasonable progress. Applying the gradient ascent algorithm to MLE The procedure applied to maximizing the log-likelihood is as follows. Start with some initial guess, $\theta(0)$. At each iteration $t \geq 0$ of the procedure, let $\theta(t)$ be the current guess. Compute the direction of steepest ascent by evaluating the gradient, $\Delta_t \equiv \nabla_{\theta(t)} \left{\mathcal{L}(\theta(t); l, X)\right}$. Take a step in the direction of the gradient, $\theta(t+1) \leftarrow \theta(t) + \alpha \dfrac{\Delta_t}{\|\Delta_t\|}$, where $\alpha$ is a suitably chosen fudge factor. This procedure should smell eerily like the one in Lab 24! And just as in Lab 24, the tricky bit is how to choose $\alpha$. One additional and slight distinction between this procedure and the Lab 24 procedure is that here we are optimizing using the full dataset, rather than processing data points one at a time. (That is, the step iteration variable $t$ used above is not used in exactly the same way as the step iteration $k$ was used in Lab 24.) Another question is, how do we know this procedure will converge to the global maximum, rather than, say, a local maximum? For that you need a deeper analysis of a specific $\mathcal{L}(\theta; l, X)$, to show, for instance, that it is convex in $\theta$. Example: A gradient ascent algorithm for logistic regression Let's apply the gradient ascent procedure to the logistic regression problem, in order to determine a good $\theta$. Exercise. Show the following: $$ \begin{array}{rcl} \nabla_\theta \left{\mathcal{L}(\theta; l, X)\right} & = & X^T \left[ l - G(X \cdot \theta)\right]. \end{array} $$ Exercise. Implement the gradient ascent procedure to determine $\theta$, and try it out on the sample data. In your solution, we'd like you to store all guesses in the matrix thetas, so that you can later see how the $\theta(t)$ values evolve. To extract a particular column t, use the notation, theta[:, t:t+1]. This notation is necessary to preserve the "shape" of the column as a column vector. End of explanation """ n1_ll = 100 x1_ll = np.linspace (-20., 0., n1_ll) n2_ll = 100 x2_ll = np.linspace (-20., 0., n2_ll) x1_ll_grid, x2_ll_grid = np.meshgrid (x1_ll, x2_ll) ll_grid = np.zeros ((n1_ll, n2_ll)) for i1 in range (n1_ll): for i2 in range (n2_ll): theta_i1_i2 = np.array ([[thetas[0, MAX_STEP]], [x1_ll_grid[i1][i2]], [x2_ll_grid[i1][i2]] ]) ll_grid[i1][i2] = log_likelihood (theta_i1_i2, l, X) trace_ll_grid = Contour (x=x1_ll, y=x2_ll, z=ll_grid) trace_thetas = Scatter (x=thetas[1, :], y=thetas[2, :], mode='markers+lines') py.iplot ([trace_ll_grid, trace_thetas]) """ Explanation: Exercise. Make a contour plot of the log-likelihood and draw the trajectory taken by the $\theta(t)$ values laid on top of it. In particular, your function, log_likelihood (theta, l, X), should return the value of the log-likelihood given a column vector of discriminant coefficients, theta; a column vector of labels, l; and a coordinate matrix, X. The gradient ascent trajectory Let's take a look at how the gradient ascent algorithm's progress looks. Try changing the $\phi$ parameter and see how it affects the results. End of explanation """ A = np.array ([[1, 2, 3], [4, 5, 6]]) B = np.array ([[-1, 2, -3], [4, -5, 6]]) print np.multiply (A, B) # elementwise product print np.multiply (A, B[:, 0:1]) # "auto-extend" version """ Explanation: Numerical optimization via Newton's method The fudge factor in gradient ascent might be troubling. Can you choose the step size or direction in a better or more principled way? One idea is Newton's method, summarized below. The basic idea, in 1-D Suppose you start at a point $x$ and, assuming you are not yet at the optimum, you wish to take a step $x + p$ so that $f(x + p)$ is the maximum. However, instead of trying to maximize $f(x + p)$ directly, let's replace $f(x + p)$ with some approximation $q(p)$, and then choose a $p$ to maximize $q(p)$. A simple choice for $q(p)$ is a quadratic function in $p$. This choice is motivated by two factors: (a) since it's quadratic, it should have some sort of extreme point (and hopefully an actual maximum), and (b) it is a higher-order approximation than a linear one, and so hopefully more accurate than a linear one as well. $$ \begin{array}{rcl} f(x + p) & \approx & f(x) + p \dfrac{df}{dx} + \frac{1}{2} p^2 \dfrac{d^2 f}{dx^2} & \equiv & q(p). \end{array} $$ To maximize $q(p)$, take its derivative and then solve for the $p_$ such that $q(p_) = 0$: $$ \begin{array}{rcl} \left.\dfrac{dq}{dp}\right|{p=p} & = & \dfrac{df}{dx} + p_ \dfrac{d^2 f}{dx^2} = 0 \ \implies p_* & = & -\left(\dfrac{d^2 f}{dx^2}\right)^{-1} \dfrac{df}{dx}. \end{array} $$ Generalizing to higher dimensions To see how this procedure works in higher dimensions, you will need not only the gradient of $f(x)$, but also its Hessian, which is the moral equivalent of a second derivative. Definition: the Hessian. Let $f(v)$ be a function that takes a vector $v$ of length $n$ as input and returns a scalar. The Hessian of $f(v)$ is an $n \times n$ matrix, $H_v(f)$, whose entries are all $n^2$ possible second derivatives with respect to the components of $v$. That is, the $(i, j)$ element of $H_v(f)$ is given by $h_{ij}$ such that $$ \begin{array}{rcl} h_{ij} & \equiv & \dfrac{\partial^2}{\partial v_i \partial v_j} f(v). \end{array} $$ Armed with a Hessian, the Newton step is defined as follows, by direct analogy to the 1-D case. First, the Taylor series approximation of $f(x + p)$ for multidimensional variables becomes, $$ \begin{array}{rcl} f(x + p) & \approx & f(x) + {p^T \, \nabla_x \, f} + {\frac{1}{2}\,p^T H_x(f) \, p} & \equiv & q(p). \end{array} $$ As in the 1-D case, we want to find an extreme point of $q(p)$. Taking its "derivative" (gradient), $\nabla_p q$, and setting it to 0 yields, $$ \begin{array}{rcl} \nabla_p \, q(p) & = & \nabla_x \, f(x) + H_x(f) \, p = 0 \ \implies H_x(f) \cdot p & = & -\, \nabla_x \, f(x). \end{array} $$ In other words, to choose the next step $p$, Newton's method suggests you must solve a system of linear equations, where the matrix is the Hessian of $f$ and the right-hand side is the negative gradient of $f$. Summary: Newton's method Summarizing the main ideas from above, Newton's method to maximize the scalar objective function $f(x)$ where $x$ is a vector, consists of the following steps: Start with some initial guess $x(0)$. At step $t$, compute the search direction $p(t)$ by solving $H_{x(t)}(f) \cdot p(t) = -\, \nabla_x \, f(x(t))$. Compute a new (and hopefully improved) guess by the update, $x(t+1) \leftarrow x(t) + p(t)$. Example: Newton's method for logistic regression To perform MLE for the logistic regression model using Newton's method, you need both the gradient of the log-likelihood as well as the Hessian. You already know how to compute the gradient from the preceding exercises; so what about the Hessian? Notationally, that calculation will be a little bit easier to write down and program with the following definition. Definition: Elementwise product. Let $A \equiv (a_{ij})$ and $B \equiv (b_{ij})$ be $m \times n$ matrices. Denote the elementwise product of $A$ and $B$ by $A \odot B$. That is, if $C = A \odot B$, then element $c_{ij} = a_{ij} \cdot b_{ij}$. If $A$ is $m \times n$ but $B$ is instead just $m \times 1$, then we will "auto-extend" $B$. Put differently, if $B$ has the same number of rows as $A$ but only 1 column, then we will take $C = A \odot B$ to have elements $c_{ij} = a_{ij} \cdot b_{i}$. In Python, you can use np.multiply() for elementwise multiplication of Numpy arrays. End of explanation """ def hessian_log_likelihood (theta, l, X): """Returns the Hessian of the log-likelihood.""" # @YOUSE: Implement the Hessian pass """ Explanation: Exercise. Show that the Hessian of the log-likelihood for logistic regression is, $$ \begin{array}{rcl} H_{\theta} \left( \mathcal{L}(\theta; l, X) \right) & = & \left( X \odot G(X \theta) \right)^T \left( X \odot G(-X \theta) \right). \end{array} $$ Exercise. Implement a function to compute the Hessian of the log-likelihood. End of explanation """ MAX_STEP = 10 # Get the data coordinate matrix, X, and labels vector, l X = points l = labels.astype (dtype=float) # Store *all* guesses, for subsequent analysis thetas_newt = np.zeros ((3, MAX_STEP+1)) for t in range (MAX_STEP): theta_t = thetas_newt[:, t:t+1] # @YOUSE: Fill in this code pass thetas_newt[:, t+1:t+2] = theta_t + delta_t theta_newt = thetas_newt[:, MAX_STEP:] print "Your (hand) solution:", theta.T.flatten () print "Computed solution:", theta_newt num_correct_newt = count_correct_logreg (l, X, thetas_newt) is_correct_newt = check_correct_logreg (l, X, thetas_newt[:, -1:]) print "\nNumber of misclassified points using MLE:", (len (points) - num_correct_newt[-1]) print "\n(Run the code cell below to visualize the results.)" print "\n=== Comparisons ===" print "\n\\theta_0/\\theta_2:", \ "manual =", theta[0]/theta[2], \ ", vs. Newton =", theta_newt[0]/theta_newt[2] print "\n\\theta_1/\\theta_2:", \ "manual =", theta[1]/theta[2], \ ", vs. Newton =", theta_newt[1]/theta_newt[2] # Visually inspect the above results traces_newt = make_2d_scatter_traces (points, is_correct_newt) traces_newt.append (gen_lin_discr_trace (points, theta_newt)) # Plot it! layout_newt = Layout (xaxis=dict (range=[-1.25, 2.25]), yaxis=dict (range=[-3.25, 2.25])) fig_newt = Figure (data=traces_newt, layout=layout_newt) py.iplot (fig_newt) trace_thetas_newt = Scatter (x=thetas_newt[1, :], y=thetas_newt[2, :], mode='markers+lines') py.iplot ([trace_ll_grid, trace_thetas_newt]) I_gd = range (len (num_correct_gd)) trace_gd_mistakes = Scatter (x=I_gd, y=len (points) - num_correct_gd, mode='markers+lines', name='Gradient descent' ) I_newt = range (len (num_correct_newt)) trace_newt_mistakes = Scatter (x=I_newt, y=len (points) - num_correct_newt, mode='markers+lines', name='Newton' ) layout_mistakes = Layout (xaxis=dict (type='log'), yaxis=dict (type='log')) fig_mistakes = Figure (data=[trace_gd_mistakes, trace_newt_mistakes], layout=layout_mistakes) py.iplot (fig_mistakes) """ Explanation: Exercise. Complete the code below, which implements Newton's method. End of explanation """
bowenliu16/deepchem
examples/broken/protein_ligand_complex_notebook.ipynb
gpl-3.0
%load_ext autoreload %autoreload 2 %pdb off # set DISPLAY = True when running tutorial DISPLAY = False # set PARALLELIZE to true if you want to use ipyparallel PARALLELIZE = False import warnings warnings.filterwarnings('ignore') dataset_file= "../datasets/pdbbind_core_df.pkl.gz" from deepchem.utils.save import load_from_disk dataset = load_from_disk(dataset_file) """ Explanation: deepchem: Machine Learning models for Drug Discovery Tutorial 1: Basic Protein-Ligand Complex Featurized Models Written by Evan Feinberg and Bharath Ramsundar Copyright 2016, Stanford University Welcome to the deepchem tutorial. In this iPython Notebook, one can follow along with the code below to learn how to fit machine learning models with rich predictive power on chemical datasets. Overview: In this tutorial, you will trace an arc from loading a raw dataset to fitting a cutting edge ML technique for predicting binding affinities. This will be accomplished by writing simple commands to access the deepchem Python API, encompassing the following broad steps: Loading a chemical dataset, consisting of a series of protein-ligand complexes. Featurizing each protein-ligand complexes with various featurization schemes. Fitting a series of models with these featurized protein-ligand complexes. Visualizing the results. First, let's point to a "dataset" file. This can come in the format of a CSV file or Pandas DataFrame. Regardless of file format, it must be columnar data, where each row is a molecular system, and each column represents a different piece of information about that system. For instance, in this example, every row reflects a protein-ligand complex, and the following columns are present: a unique complex identifier; the SMILES string of the ligand; the binding affinity (Ki) of the ligand to the protein in the complex; a Python list of all lines in a PDB file for the protein alone; and a Python list of all lines in a ligand file for the ligand alone. This should become clearer with the example. (Make sure to set DISPLAY = True) End of explanation """ print("Type of dataset is: %s" % str(type(dataset))) print(dataset[:5]) print("Shape of dataset is: %s" % str(dataset.shape)) """ Explanation: Let's see what dataset looks like: End of explanation """ import nglview import tempfile import os import mdtraj as md import numpy as np import deepchem.utils.visualization from deepchem.utils.visualization import combine_mdtraj, visualize_complex, convert_lines_to_mdtraj first_protein, first_ligand = dataset.iloc[0]["protein_pdb"], dataset.iloc[0]["ligand_pdb"] protein_mdtraj = convert_lines_to_mdtraj(first_protein) ligand_mdtraj = convert_lines_to_mdtraj(first_ligand) complex_mdtraj = combine_mdtraj(protein_mdtraj, ligand_mdtraj) if DISPLAY: ngltraj = visualize_complex(complex_mdtraj) ngltraj """ Explanation: One of the missions of deepchem is to form a synapse between the chemical and the algorithmic worlds: to be able to leverage the powerful and diverse array of tools available in Python to analyze molecules. This ethos applies to visual as much as quantitative examination: End of explanation """ from deepchem.featurizers.fingerprints import CircularFingerprint from deepchem.featurizers.basic import RDKitDescriptors from deepchem.featurizers.nnscore import NNScoreComplexFeaturizer from deepchem.featurizers.grid_featurizer import GridFeaturizer grid_featurizer = GridFeaturizer(voxel_width=16.0, feature_types="voxel_combined", voxel_feature_types=["ecfp", "splif", "hbond", "pi_stack", "cation_pi", "salt_bridge"], ecfp_power=5, splif_power=5, parallel=True, flatten=True) compound_featurizers = [CircularFingerprint(size=128)] # TODO(rbharath, enf): The grid featurizer breaks. Need to debug before code release complex_featurizers = [] #complex_featurizers = [grid_featurizer] """ Explanation: Now that we're oriented, let's use ML to do some chemistry. So, step (2) will entail featurizing the dataset. The available featurizations that come standard with deepchem are ECFP4 fingerprints, RDKit descriptors, NNScore-style bdescriptors, and hybrid binding pocket descriptors. Details can be found on deepchem.io. End of explanation """ #Make a directory in which to store the featurized complexes. import tempfile, shutil base_dir = "./tutorial_output" if not os.path.exists(base_dir): os.makedirs(base_dir) data_dir = os.path.join(base_dir, "data") if not os.path.exists(data_dir): os.makedirs(data_dir) featurized_samples_file = os.path.join(data_dir, "featurized_samples.joblib") feature_dir = os.path.join(base_dir, "features") if not os.path.exists(feature_dir): os.makedirs(feature_dir) samples_dir = os.path.join(base_dir, "samples") if not os.path.exists(samples_dir): os.makedirs(samples_dir) train_dir = os.path.join(base_dir, "train") if not os.path.exists(train_dir): os.makedirs(train_dir) valid_dir = os.path.join(base_dir, "valid") if not os.path.exists(valid_dir): os.makedirs(valid_dir) test_dir = os.path.join(base_dir, "test") if not os.path.exists(test_dir): os.makedirs(test_dir) model_dir = os.path.join(base_dir, "model") if not os.path.exists(model_dir): os.makedirs(model_dir) import deepchem.featurizers.featurize from deepchem.featurizers.featurize import DataFeaturizer featurizers = compound_featurizers + complex_featurizers featurizer = DataFeaturizer(tasks=["label"], smiles_field="smiles", protein_pdb_field="protein_pdb", ligand_pdb_field="ligand_pdb", compound_featurizers=compound_featurizers, complex_featurizers=complex_featurizers, id_field="complex_id", verbose=False) if PARALLELIZE: from ipyparallel import Client c = Client() dview = c[:] else: dview = None featurized_samples = featurizer.featurize(dataset_file, feature_dir, samples_dir, worker_pool=dview, shard_size=32) from deepchem.utils.save import save_to_disk, load_from_disk save_to_disk(featurized_samples, featurized_samples_file) featurized_samples = load_from_disk(featurized_samples_file) """ Explanation: Note how we separate our featurizers into those that featurize individual chemical compounds, compound_featurizers, and those that featurize molecular complexes, complex_featurizers. Now, let's perform the actual featurization. Calling featurizer.featurize() will return an instance of class FeaturizedSamples. Internally, featurizer.featurize() (a) computes the user-specified features on the data, (b) transforms the inputs into X and y NumPy arrays suitable for ML algorithms, and (c) constructs a FeaturizedSamples() instance that has useful methods, such as an iterator, over the featurized data. End of explanation """ splittype = "random" train_samples, test_samples = featurized_samples.train_test_split( splittype, train_dir, test_dir, seed=2016) """ Explanation: Now, we conduct a train-test split. If you'd like, you can choose splittype="scaffold" instead to perform a train-test split based on Bemis-Murcko scaffolds. End of explanation """ from deepchem.utils.dataset import Dataset train_dataset = Dataset(data_dir=train_dir, samples=train_samples, featurizers=compound_featurizers, tasks=["label"]) test_dataset = Dataset(data_dir=test_dir, samples=test_samples, featurizers=compound_featurizers, tasks=["label"]) """ Explanation: We generate separate instances of the Dataset() object to hermetically seal the train dataset from the test dataset. This style lends itself easily to validation-set type hyperparameter searches, which we will illustate in a separate section of this tutorial. End of explanation """ from deepchem.transformers import NormalizationTransformer from deepchem.transformers import ClippingTransformer input_transformers = [NormalizationTransformer(transform_X=True, dataset=train_dataset), ClippingTransformer(transform_X=True, dataset=train_dataset)] output_transformers = [NormalizationTransformer(transform_y=True, dataset=train_dataset)] transformers = input_transformers + output_transformers for transformer in transformers: transformer.transform(train_dataset) for transformer in transformers: transformer.transform(test_dataset) """ Explanation: The performance of many ML algorithms hinges greatly on careful data preprocessing. Deepchem comes standard with a few options for such preprocessing. End of explanation """ from sklearn.ensemble import RandomForestRegressor from deepchem.models.standard import SklearnModel task_types = {"label": "regression"} model_params = {"data_shape": train_dataset.get_data_shape()} model = SklearnModel(task_types, model_params, model_instance=RandomForestRegressor()) model.fit(train_dataset) model_dir = tempfile.mkdtemp() model.save(model_dir) from deepchem.utils.evaluate import Evaluator import pandas as pd evaluator = Evaluator(model, train_dataset, output_transformers, verbose=True) with tempfile.NamedTemporaryFile() as train_csv_out: with tempfile.NamedTemporaryFile() as train_stats_out: _, train_r2score = evaluator.compute_model_performance( train_csv_out, train_stats_out) evaluator = Evaluator(model, test_dataset, output_transformers, verbose=True) test_csv_out = tempfile.NamedTemporaryFile() with tempfile.NamedTemporaryFile() as test_stats_out: _, test_r2score = evaluator.compute_model_performance( test_csv_out, test_stats_out) print test_csv_out.name train_test_performance = pd.concat([train_r2score, test_r2score]) train_test_performance["split"] = ["train", "test"] train_test_performance """ Explanation: Now, we're ready to do some learning! To set up a model, we will need: (a) a dictionary task_types that maps a task, in this case label, i.e. the Ki, to the type of the task, in this case regression. For the multitask use case, one will have a series of keys, each of which is a different task (Ki, solubility, renal half-life, etc.) that maps to a different task type (regression or classification). To fit a deepchem model, first we instantiate one of the provided (or user-written) model classes. In this case, we have a created a convenience class to wrap around any ML model available in Sci-Kit Learn that can in turn be used to interoperate with deepchem. To instantiate an SklearnModel, you will need (a) task_types, (b) model_params, another dict as illustrated below, and (c) a model_instance defining the type of model you would like to fit, in this case a RandomForestRegressor. End of explanation """ predictions = pd.read_csv(test_csv_out.name) predictions = predictions.sort(['label'], ascending=[0]) from deepchem.utils.visualization import visualize_ligand top_ligand = predictions.iloc[0]['ids'] ligand1 = convert_lines_to_mdtraj(dataset.loc[dataset['complex_id']==top_ligand]['ligand_pdb'].values[0]) if DISPLAY: ngltraj = visualize_ligand(ligand1) ngltraj worst_ligand = predictions.iloc[predictions.shape[0]-2]['ids'] ligand1 = convert_lines_to_mdtraj(dataset.loc[dataset['complex_id']==worst_ligand]['ligand_pdb'].values[0]) if DISPLAY: ngltraj = visualize_ligand(ligand1) ngltraj """ Explanation: In this simple example, in few yet intuitive lines of code, we traced the machine learning arc from featurizing a raw dataset to fitting and evaluating a model. Here, we featurized only the ligand. The signal we observed in R^2 reflects the ability of circular fingerprints and random forests to learn general features that make ligands "drug-like." End of explanation """ import deepchem.models.standard from deepchem.models.standard import SklearnModel from deepchem.utils.dataset import Dataset from deepchem.utils.evaluate import Evaluator from deepchem.hyperparameters import HyperparamOpt train_dir, validation_dir, test_dir = tempfile.mkdtemp(), tempfile.mkdtemp(), tempfile.mkdtemp() splittype="random" train_samples, validation_samples, test_samples = featurized_samples.train_valid_test_split( splittype, train_dir, validation_dir, test_dir, seed=2016) task_types = {"label": "regression"} performance = pd.DataFrame() def model_builder(task_types, params_dict, verbosity): n_estimators = params_dict["n_estimators"] return SklearnModel( task_types, params_dict, model_instance=RandomForestRegressor(n_estimators=n_estimators)) params_dict = { "n_estimators": [10, 20, 40, 80, 160], "data_shape": [train_dataset.get_data_shape()], } optimizer = HyperparamOpt(model_builder, {"pIC50": "regression"}) for feature_type in (complex_featurizers + compound_featurizers): train_dataset = Dataset(data_dir=train_dir, samples=train_samples, featurizers=[feature_type], tasks=["label"]) validation_dataset = Dataset(data_dir=validation_dir, samples=validation_samples, featurizers=[feature_type], tasks=["label"]) for transformer in transformers: transformer.transform(train_dataset) for transformer in transformers: transformer.transform(test_dataset) best_rf, best_rf_hyperparams, all_rf_results = optimizer.hyperparam_search( params_dict, train_dataset, test_dataset, output_transformers, metric="r2_score") %matplotlib inline import matplotlib import numpy as np import matplotlib.pyplot as plt # TODO(rbharath, enf): Need to fix this to work with new hyperparam-opt framework. #df = pd.DataFrame(performance[['r2_score','split','featurizer']].values, index=performance['n_trees'].values, columns=['r2_score', 'split', 'featurizer']) #df = df.loc[df['split']=="validation"] #df = df.drop('split', 1) #fingerprint_df = df[df['featurizer'].str.contains('fingerprint')].drop('featurizer', 1) #print fingerprint_df #fingerprint_df.columns = ['ligand fingerprints'] #grid_df = df[df['featurizer'].str.contains('grid')].drop('featurizer', 1) #grid_df.columns = ['complex features'] #df = pd.concat([fingerprint_df, grid_df], axis=1) #print(df) #plt.clf() #df.plot() #plt.ylabel("$R^2$") #plt.xlabel("Number of trees") train_dir, validation_dir, test_dir = tempfile.mkdtemp(), tempfile.mkdtemp(), tempfile.mkdtemp() splittype="random" train_samples, validation_samples, test_samples = featurized_samples.train_valid_test_split( splittype, train_dir, validation_dir, test_dir, seed=2016) feature_type = complex_featurizers train_dataset = Dataset(data_dir=train_dir, samples=train_samples, featurizers=feature_type, tasks=["label"]) validation_dataset = Dataset(data_dir=validation_dir, samples=validation_samples, featurizers=feature_type, tasks=["label"]) test_dataset = Dataset(data_dir=test_dir, samples=test_samples, featurizers=feature_type, tasks=["label"]) for transformer in transformers: transformer.transform(train_dataset) for transformer in transformers: transformer.transform(valid_dataset) for transformer in transformers: transformer.transform(test_dataset) model_params = {"data_shape": train_dataset.get_data_shape()} rf_model = SklearnModel(task_types, model_params, model_instance=RandomForestRegressor(n_estimators=20)) rf_model.fit(train_dataset) model_dir = tempfile.mkdtemp() rf_model.save(model_dir) evaluator = Evaluator(rf_model, train_dataset, output_transformers, verbose=True) with tempfile.NamedTemporaryFile() as train_csv_out: with tempfile.NamedTemporaryFile() as train_stats_out: _, train_r2score = evaluator.compute_model_performance( train_csv_out, train_stats_out) evaluator = Evaluator(rf_model, test_dataset, output_transformers, verbose=True) test_csv_out = tempfile.NamedTemporaryFile() with tempfile.NamedTemporaryFile() as test_stats_out: predictions, test_r2score = evaluator.compute_model_performance( test_csv_out, test_stats_out) train_test_performance = pd.concat([train_r2score, test_r2score]) train_test_performance["split"] = ["train", "test"] train_test_performance["featurizer"] = [str(feature_type.__class__), str(feature_type.__class__)] train_test_performance["n_trees"] = [n_trees, n_trees] print(train_test_performance) import deepchem.models.deep from deepchem.models.deep import SingleTaskDNN import numpy.random from operator import mul import itertools params_dict = {"activation": ["relu"], "momentum": [.9], "batch_size": [50], "init": ["glorot_uniform"], "data_shape": [train_dataset.get_data_shape()], "learning_rate": np.power(10., np.random.uniform(-5, -2, size=5)), "decay": np.power(10., np.random.uniform(-6, -4, size=5)), "nb_hidden": [1000], "nb_epoch": [40], "nesterov": [False], "dropout": [.5], "nb_layers": [1], "batchnorm": [False], } optimizer = HyperparamOpt(SingleTaskDNN, task_types) best_dnn, best_hyperparams, all_results = optimizer.hyperparam_search( params_dict, train_dataset, valid_dataset, output_transformers, metric="r2_score", verbosity=None) dnn_test_csv_out = tempfile.NamedTemporaryFile() dnn_test_stats_out = tempfile.NamedTemporaryFile() dnn_test_evaluator = Evaluator(best_dnn, test_dataset) dnn_test_df, dnn_test_r2score = dnn_test_evaluator.compute_model_performance( dnn_test_csv_out, dnn_test_stats_out) dnn_test_r2_score = dnn_test_r2score.iloc[0]["r2_score"] print("DNN Test set R^2 %f" % (dnn_test_r2_score)) task = "label" dnn_predicted_test = np.array(dnn_test_df[task + "_pred"]) dnn_true_test = np.array(dnn_test_df[task]) plt.clf() plt.scatter(dnn_true_test, dnn_predicted_test) plt.xlabel('Predicted Ki') plt.ylabel('True Ki') plt.title(r'DNN predicted vs. true Ki') plt.xlim([-2, 2]) plt.ylim([-2, 2]) plt.plot([-3, 3], [-3, 3], marker=".", color='k') rf_test_csv_out = tempfile.NamedTemporaryFile() rf_test_stats_out = tempfile.NamedTemporaryFile() rf_test_evaluator = Evaluator(rf_model, test_dataset) rf_test_df, rf_test_r2score = rf_test_evaluator.compute_model_performance( rf_test_csv_out, rf_test_stats_out) rf_test_r2_score = rf_test_r2score.iloc[0]["r2_score"] print("RF Test set R^2 %f" % (rf_test_r2_score)) plt.show() task = "label" rf_predicted_test = np.array(rf_test_df[task + "_pred"]) rf_true_test = np.array(rf_test_df[task]) plt.scatter(rf_true_test, rf_predicted_test) plt.xlabel('Predicted Ki') plt.ylabel('True Ki') plt.title(r'RF predicted vs. true Ki') plt.xlim([-2, 2]) plt.ylim([-2, 2]) plt.plot([-3, 3], [-3, 3], marker=".", color='k') plt.show() predictions = dnn_test_df.sort(['label'], ascending=[0]) top_complex = predictions.iloc[0]['ids'] best_complex = dataset.loc[dataset['complex_id']==top_complex] protein_mdtraj = convert_lines_to_mdtraj(best_complex["protein_pdb"].values[0]) ligand_mdtraj = convert_lines_to_mdtraj(best_complex["ligand_pdb"].values[0]) complex_mdtraj = combine_mdtraj(protein_mdtraj, ligand_mdtraj) if DISPLAY: ngltraj = visualize_complex(complex_mdtraj) ngltraj top_complex = predictions.iloc[1]['ids'] best_complex = dataset.loc[dataset['complex_id']==top_complex] protein_mdtraj = convert_lines_to_mdtraj(best_complex["protein_pdb"].values[0]) ligand_mdtraj = convert_lines_to_mdtraj(best_complex["ligand_pdb"].values[0]) complex_mdtraj = combine_mdtraj(protein_mdtraj, ligand_mdtraj) if DISPLAY: ngltraj = visualize_complex(complex_mdtraj) ngltraj top_complex = predictions.iloc[predictions.shape[0]-1]['ids'] best_complex = dataset.loc[dataset['complex_id']==top_complex] protein_mdtraj = convert_lines_to_mdtraj(best_complex["protein_pdb"].values[0]) ligand_mdtraj = convert_lines_to_mdtraj(best_complex["ligand_pdb"].values[0]) complex_mdtraj = combine_mdtraj(protein_mdtraj, ligand_mdtraj) if DISPLAY: ngltraj = visualize_complex(complex_mdtraj) ngltraj """ Explanation: The protein-ligand complex view. The preceding simple example, in few yet intuitive lines of code, traces the machine learning arc from featurizing a raw dataset to fitting and evaluating a model. In this next section, we illustrate deepchem's modularity, and thereby the ease with which one can explore different featurization schemes, different models, and combinations thereof, to achieve the best performance on a given dataset. We will demonstrate this by examining protein-ligand interactions. In the previous section, we featurized only the ligand. The signal we observed in R^2 reflects the ability of circular fingerprints and random forests to learn general features that make ligands "drug-like." However, the affinity of a drug for a target is determined not only by the drug itself, of course, but the way in which it interacts with a protein. End of explanation """
Applied-Groundwater-Modeling-2nd-Ed/Chapter_4_problems-1
P4.4_Flopy_Hubbertville_areal_model_with_pumping.ipynb
gpl-2.0
%matplotlib inline import sys import os import shutil import numpy as np from subprocess import check_output # Import flopy import flopy """ Explanation: <img src="AW&H2015.tiff" style="float: left"> <img src="flopylogo.png" style="float: center"> Problem P4.4 Adding Pumping to Hubbertville Areal Model In Problem P4.3 in Anderson, Woessner and Hunt (2015), we are asked to construct an areal 2D model to assess impacts of pumping. The town of Hubbertville is planning to expand its water supply by constructing a pumping well in an unconfined gravel aquifer (Fig. P4.3). The well is designed to pump constantly at a rate of 20,000 m3/day. Well construction was halted by the State Fish and Game Service who manage the Green Swamp Conservation area. The agency claimed that pumping would “significantly reduce” groundwater discharge to the swamp and damage waterfowl habitat. The town claimed the fully penetrating river boundary to the north and the groundwater divide located near the center of the valley would prevent any change in flow to the swamp. In Problem P4.4 on page 174-175, we return to the model you designed for Problem P4.3. Place a well at the location indicated in Fig. P4.3 and pump the well at a constant rate of 20,000 m3/day. Run the model under steady-state pumping conditions three times under each of the following three representations of model boundaries: (1) physical boundaries shown in Fig. P4.3; (2) an internal no-flow boundary at the groundwater divide between the river and the swamp; the location of the divide is determined from the solution of Problem P4.3; (3) an internal specified head boundary at the groundwater divide between the river and the swamp; the location of the divide is determined from the solution of Problem P4.3. Part a. Discuss the effects of the internal boundary conditions imposed on the groundwater divide on the resulting head distributions. Compare northesouth watertable profiles drawn through the node representing the pumping well for the three pumping simulations. Compute the discharge to the Green Swamp under each set of boundary conditions. In this notebook, we will work through the problem using MODFLOW and the Python tool set Flopy. Notice how much code is reused from P4.1 because the variable names remained the same. <img src="P4.3_figure.tiff" style="float: center"> Below is an iPython Notebook that builds a Python MODFLOW model for this problem and plots results. See the Github wiki associated with this Chapter for information on one suggested installation and setup configuration for Python and iPython Notebook. [Acknowledgements: This tutorial was created by Randy Hunt and all failings are mine. The exercise here has benefited greatly from the online Flopy tutorial and example notebooks developed by Chris Langevin and Joe Hughes for the USGS Spring 2015 Python Training course GW1774] Creating the Model - GW divide simulated (not specified as BC) This first part of the problem is identical to P4.3, and we will again create a simple groundwater flow model by following the tutorial included on the Flopy website. We will make a few small changes so that the tutorial works with our file structure. Visit the tutorial website here. Setup the Notebook Environment and Import Flopy Load a few standard libraries, and then load flopy. End of explanation """ # Set the name of the path to the model working directory dirname = "P4-4_Hubbertville" datapath = os.getcwd() modelpath = os.path.join(datapath, dirname) print 'Name of model path: ', modelpath # Now let's check if this directory exists. If not, then we will create it. if os.path.exists(modelpath): print 'Model working directory already exists.' else: print 'Creating model working directory.' os.mkdir(modelpath) """ Explanation: Setup a New Directory and Change Paths For this tutorial, we will work in a new subdirectory underneath the directory where the notebook is located. We can use some fancy Python tools to help us manage the directory creation. Note that if you encounter path problems with this workbook, you can stop and then restart the kernel and the paths will be reset. End of explanation """ # model domain and grid definition # for clarity, user entered variables are all caps; python syntax are lower case or mixed case # In a contrast to P4.1 and P4.2, this is an areal 2D model LX = 4500. LY = 11000. # note that there is an added 500m on the top and bottom to represent the boundary conditions,that leaves an aqufier lenght of 10000 m ZTOP = 1030. # the system is unconfined so set the top above land surface so that the water table never > layer top ZBOT = 980. NLAY = 1 NROW = 22 NCOL = 9 DELR = LX / NCOL # recall that MODFLOW convention is DELR is along a row, thus has items = NCOL; see page XXX in AW&H (2015) DELC = LY / NROW # recall that MODFLOW convention is DELC is along a column, thus has items = NROW; see page XXX in AW&H (2015) DELV = (ZTOP - ZBOT) / NLAY BOTM = np.linspace(ZTOP, ZBOT, NLAY + 1) HK = 50. VKA = 1. RCH = 0.001 #In P4.4 the proposed well is pumping (Q was set to zero in P4.3) WELLQ = -20000. #recall MODFLOW convention, negative means pumped out of the model domain (=aquifer) print "DELR =", DELR, " DELC =", DELC, ' DELV =', DELV print "BOTM =", BOTM print "Recharge =", RCH print "Pumping well rate =", WELLQ """ Explanation: Define the Model Extent, Grid Resolution, and Characteristics It is normally good practice to group things that you might want to change into a single code block. This makes it easier to make changes and rerun the code. End of explanation """ # Assign name and create modflow model object modelname = 'P4-4' #exe_name = os.path.join(datapath, 'mfnwt.exe') # for Windows OS exe_name = os.path.join(datapath, 'mfnwt') # for Mac OS print 'Model executable: ', exe_name MF = flopy.modflow.Modflow(modelname, exe_name=exe_name, model_ws=modelpath) """ Explanation: Create the MODFLOW-NWT Model Object Create a flopy MODFLOW object: flopy.modflow.Modflow. Note that in P4.3 we used the older PCG solver with no issues. Simply adding the new pumping to P4.3 will cause a dry node at the pumping well in some cases below. Therefore, we are using the MODFLOW-NWT executable instead, which handles dry node problems much better. Compare these steps below to P4.3: note how similar the input is for MODFLOW-2005 and MODFLOW-NWT. End of explanation """ # Create the discretization object TOP = ZTOP * np.ones((NROW, NCOL),dtype=np.float) DIS_PACKAGE = flopy.modflow.ModflowDis(MF, NLAY, NROW, NCOL, delr=DELR, delc=DELC, top=TOP, botm=BOTM[1:], laycbd=0) # print DIS_PACKAGE #uncomment this on far left to see information about the flopy object """ Explanation: Discretization Package Create a flopy discretization package object: flopy.modflow.ModflowDis. End of explanation """ # Variables for the BAS package IBOUND = np.ones((NLAY, NROW, NCOL), dtype=np.int32) # all nodes are active (IBOUND = 1) # make the top of the profile specified head by setting the IBOUND = -1 IBOUND[:, 0, :] = -1 #don't forget arrays are zero-based! IBOUND[:, -1, :] = -1 #-1 is Python for last in array print IBOUND STRT = 1010 * np.ones((NLAY, NROW, NCOL), dtype=np.float32) # set starting head to 1010 m through out model domain STRT[:, 0, :] = 1000. # river stage for setting constant head STRT[:, -1, :] = 1000. # wetland stage for setting constant head print STRT BAS_PACKAGE = flopy.modflow.ModflowBas(MF, ibound=IBOUND, strt=STRT) # print BAS_PACKAGE # uncomment this at far left to see the information about the flopy BAS object """ Explanation: Basic Package Create a flopy basic package object: flopy.modflow.ModflowBas. End of explanation """ LPF_PACKAGE = flopy.modflow.ModflowUpw(MF, laytyp=1, hk=HK, vka=VKA) # we defined the K and anisotropy at top of file # print LPF_PACKAGE # uncomment this at far left to see the information about the flopy LPF object """ Explanation: Upstream Weighting Package (like Layer Property Flow Package) Create a flopy layer property flow package object: flopy.modflow.ModflowUpw. End of explanation """ WEL_PACKAGE = flopy.modflow.ModflowWel(MF, stress_period_data=[0,4,4,WELLQ]) # remember python 0 index, layer 0 = layer 1 in MF #print WEL_PACKAGE # uncomment this at far left to see the information about the flopy WEL object """ Explanation: Well Package Create a flopy output control object: flopy.modflow.ModflowWel. End of explanation """ OC_PACKAGE = flopy.modflow.ModflowOc(MF) # we'll use the defaults for the model output # print OC_PACKAGE # uncomment this at far left to see the information about the flopy OC object """ Explanation: Output Control Create a flopy output control object: flopy.modflow.ModflowOc. End of explanation """ RCH_PACKAGE = flopy.modflow.ModflowRch(MF, rech=RCH) # print RCH_PACKAGE # uncomment this at far left to see the information about the flopy RCH object """ Explanation: Recharge Package Create a flopy pcg package object: flopy.modflow.ModflowRch. End of explanation """ NWT_PACKAGE = flopy.modflow.ModflowNwt(MF, headtol=0.001, fluxtol=5000, maxiterout=900) # print NWT_PACKAGE # uncomment this at far left to see the information about the flopy PCG object """ Explanation: Newton-Raphson Solver Create a flopy pcg package object: flopy.modflow.ModflowNwt. End of explanation """ #Before writing input, destroy all files in folder to prevent reusing old files #Here's the working directory print modelpath #Here's what's currently in the working directory modelfiles = os.listdir(modelpath) print modelfiles #delete these files to prevent us from reading old results modelfiles = os.listdir(modelpath) for filename in modelfiles: f = os.path.join(modelpath, filename) if modelname in f: try: os.remove(f) print 'Deleted: ', filename except: print 'Unable to delete: ', filename #Now write the model input files MF.write_input() """ Explanation: Writing the MODFLOW Input Files Before we create the model input datasets, we can do some directory cleanup to make sure that we don't accidently use old files. End of explanation """ silent = False #Print model output to screen? pause = False #Require user to hit enter? Doesn't mean much in Ipython notebook report = True #Store the output from the model in buff success, buff = MF.run_model(silent=silent, pause=pause, report=report) """ Explanation: Running the Model Flopy has several methods attached to the model object that can be used to run the model. They are run_model, run_model2, and run_model3. Here we use run_model3, which will write output to the notebook. End of explanation """ #imports for plotting and reading the MODFLOW binary output file import matplotlib.pyplot as plt import flopy.utils.binaryfile as bf #Create the headfile object and grab the results for last time. headfile = os.path.join(modelpath, modelname + '.hds') headfileobj = bf.HeadFile(headfile) #Get a list of times that are contained in the model times = headfileobj.get_times() print 'Headfile (' + modelname + '.hds' + ') contains the following list of times: ', times #Get a numpy array of heads for totim = 1.0 #The get_data method will extract head data from the binary file. HEAD = headfileobj.get_data(totim=1.0) #Print statistics on the head print 'Head statistics' print ' min: ', HEAD.min() print ' max: ', HEAD.max() print ' std: ', HEAD.std() #Create a contour plot of heads FIG = plt.figure(figsize=(15,13)) #setup contour levels and plot extent LEVELS = np.arange(995., 1008.5, 0.25) EXTENT = (DELR/2., LX - DELR/2., DELC/2., LY - DELC/2.) print 'Contour Levels: ', LEVELS print 'Extent of domain: ', EXTENT #Make a contour plot on the first axis AX1 = FIG.add_subplot(1, 2, 1, aspect='equal') AX1.set_xlabel("x") AX1.set_ylabel("y") YTICKS = np.arange(0, 11000, 500) AX1.set_yticks(YTICKS) AX1.set_title("Hubbertville contour map") AX1.text(2500, 8100, r"proposed well", fontsize=15, color="black") AX1.text(2000, 10500, r"River", fontsize=10, color="blue") AX1.text(1800, 340, r"Green Swamp", fontsize=10, color="green") AX1.contour(np.flipud(HEAD[0, :, :]), levels=LEVELS, extent=EXTENT) #Make a color flood on the second axis AX2 = FIG.add_subplot(1, 2, 2, aspect='equal') AX2.set_xlabel("x") AX2.set_ylabel("y") AX2.set_yticks(YTICKS) AX2.set_title("Hubbertville color flood") AX2.text(2500, 8100, r"proposed well", fontsize=15, color="black") AX2.text(2000, 10500, r"River", fontsize=10, color="black") AX2.text(1800, 340, r"Green Swamp", fontsize=10, color="black") cax = AX2.imshow(HEAD[0, :, :], extent=EXTENT, interpolation='nearest') cbar = FIG.colorbar(cax, orientation='vertical', shrink=0.45) """ Explanation: Post Processing the Results To read heads from the MODFLOW binary output file, we can use the flopy.utils.binaryfile module. Specifically, we can use the HeadFile object from that module to extract head data arrays. End of explanation """ #look at the head in column = 4 from headobj, and then plot it #print HEAD along a column; COL is a variable that allows us to change this easily COL = 4 print HEAD[0,:,COL] # we see this is what we want, but is flipped because MODFLOW's array does not = Python, so we reverse the order (flip them) and call it Y = np.flipud(HEAD[0,:,COL]) print Y #for our cross section create X-coordinates to match with heads XCOORD = np.arange(0, 11000, 500) + 250 print XCOORD fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(1, 1, 1) TITLE = 'cross section of head along Column = ({0})'.format(COL) ax.set_title(TITLE) ax.set_xlabel('y') ax.set_ylabel('head') ax.set_xlim(0, 11000.) ax.set_ylim(980.,1010.) ax.text(7500, 992, r"proposed well", fontsize=15, color="black") ax.text(10480, 998, r"River", fontsize=10, color="blue",rotation='vertical') ax.text(300, 998, r"Green Swamp", fontsize=10, color="green",rotation='vertical') ax.text(5300,1004.5, r"Old Groundwater Divide", fontsize=10, color="red",rotation='vertical') ax.text(4400,1005., r"New Groundwater Divide", fontsize=10, color="black",rotation='vertical') ax.plot(XCOORD, Y) #calculate the flux to Green Swamp HEAD_ADJACENT_CELLS = HEAD[0,-2,:] print "heads in cells next to Green Swamp =", HEAD_ADJACENT_CELLS FLUX_TO_SWAMP = 0 THICK = (HEAD[0,-2,5]+1000.)/2 - ZBOT #the thickness is approximated using the average saturated thickness for NODEHEAD in HEAD_ADJACENT_CELLS: NODEFLUX = (HK * (NODEHEAD-1000.)/(DELC) * DELR * THICK) # Q = KIA FLUX_TO_SWAMP += NODEFLUX print 'gradient =', (NODEHEAD-1000)/(DELC), ' Kh =', HK, ' thickness=', THICK, ' Grid spacing =', DELC, ' Node flux =', NODEFLUX print "Total Flux to Swamp =", FLUX_TO_SWAMP, "cubic meters per day" #calculate the flux to River HEAD_ADJACENT_CELLS = HEAD[0,1,:] print "heads in cells next to River =", HEAD_ADJACENT_CELLS FLUX_TO_RIVER = 0 THICK = (HEAD[0,-2,5]+1000.)/2 - ZBOT #the thickness is approximated using the average saturated thickness for NODEHEAD in HEAD_ADJACENT_CELLS: NODEFLUX = (HK * (NODEHEAD-1000.)/(DELC) * DELR * THICK) # Q = KIA FLUX_TO_RIVER += NODEFLUX print 'gradient =', (NODEHEAD-1000)/(DELC), ' Kh =', HK, ' thickness=', THICK, ' Grid spacing =', DELC, ' Node flux =', NODEFLUX print "Total Flux to River =", FLUX_TO_RIVER, "cubic meters per day" print 'Flux to Green Swamp =', FLUX_TO_SWAMP, 'Flux to River =', FLUX_TO_RIVER BCFLUX = FLUX_TO_SWAMP + FLUX_TO_RIVER Q = WELLQ * -1 print 'Flux to perimeter boundaries =', BCFLUX, ', Well pumping =', Q, ', Total Vol Out =', BCFLUX+Q, 'cubic meters per day' """ Explanation: Look at the bottom of the MODFLOW output file (ending with a *.list) and note the water balance reported. End of explanation """ # recall from P4.3 that the GW divide was located at Rows 10 and 11; # where each head value in the column was 1005.50 #like P4.3, lets set Rows 10 and 11 to no flow #(set that row to 0 in the MODFLOW IBOUND array) IBOUND[:, 10, :] = 0 IBOUND[:, 11, :] = 0 print IBOUND #we have to update the MODFLOW's BAS Package with the new IBOUND array BAS_PACKAGE = flopy.modflow.ModflowBas(MF, ibound=IBOUND, strt=STRT) #Before writing input, destroy all files in folder to prevent reusing old files #Here's the working directory print modelpath #Here's what's currently in the working directory modelfiles = os.listdir(modelpath) print modelfiles #delete these files to prevent us from reading old results modelfiles = os.listdir(modelpath) for filename in modelfiles: f = os.path.join(modelpath, filename) if modelname in f: try: os.remove(f) print 'Deleted: ', filename except: print 'Unable to delete: ', filename #Now write the model input files MF.write_input() print "New MODFLOW input files = ", modelfiles print "You can check the newly created files in", modelpath #rerun MODFLOW-2005 silent = False #Print model output to screen? pause = False #Require user to hit enter? Doesn't mean much in Ipython notebook report = True #Store the output from the model in buff success, buff = MF.run_model(silent=silent, pause=pause, report=report) #As before, let's look at the results and compare to P4-3 Part a. #imports for plotting and reading the MODFLOW binary output file import matplotlib.pyplot as plt import flopy.utils.binaryfile as bf #Create the headfile object and grab the results for last time. headfile = os.path.join(modelpath, modelname + '.hds') headfileobj = bf.HeadFile(headfile) #Get a list of times that are contained in the model times = headfileobj.get_times() print 'Headfile (' + modelname + '.hds' + ') contains the following list of times: ', times #Get a numpy array of heads for totim = 1.0 #The get_data method will extract head data from the binary file. HEAD = headfileobj.get_data(totim=1.0) #Print statistics on the head print 'Head statistics' print ' min: ', HEAD.min() print ' max: ', HEAD.max() print ' std: ', HEAD.std() #-999.99 is the Inactive node flag so we'll use our previous contour settings #Create a contour plot of heads FIG = plt.figure(figsize=(15,13)) #setup contour levels and plot extent LEVELS = np.arange(987., 1009., 0.5) EXTENT = (DELR/2., LX - DELR/2., DELC/2., LY - DELC/2.) print 'Contour Levels: ', LEVELS print 'Extent of domain: ', EXTENT #Make a contour plot on the first axis AX1 = FIG.add_subplot(1, 2, 1, aspect='equal') AX1.set_xlabel("x") AX1.set_ylabel("y") YTICKS = np.arange(0, 11000, 500) AX1.set_yticks(YTICKS) AX1.set_title("Hubbertville contour map") AX1.text(2500, 8100, r"proposed well", fontsize=15, color="black") AX1.text(2000, 10500, r"River", fontsize=10, color="blue") AX1.text(1800, 340, r"Green Swamp", fontsize=10, color="green") AX1.contour(np.flipud(HEAD[0, :, :]), levels=LEVELS, extent=EXTENT) #Make a color flood on the second axis AX2 = FIG.add_subplot(1, 2, 2, aspect='equal') AX2.set_xlabel("x") AX2.set_ylabel("y") AX2.set_yticks(YTICKS) AX2.set_title("Hubbertville color flood") AX2.text(2500, 8100, r"proposed well", fontsize=15, color="black") AX2.text(2000, 10500, r"River", fontsize=10, color="black") AX2.text(1800, 340, r"Green Swamp", fontsize=10, color="black") cax = AX2.imshow(HEAD[0, :, :], extent=EXTENT, interpolation='nearest', vmin=987.) cbar = FIG.colorbar(cax, orientation='vertical', shrink=0.45) COL = 4 # recall we need to flip because MODFLOW's array does not = Python, so we reverse the order (flip them) and call it Y = np.flipud(HEAD[0,:,COL]) print Y #for our cross section create X-coordinates to match with heads XCOORD = np.arange(0, 11000, 500) + 250 print XCOORD fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(1, 1, 1) TITLE = 'cross section of head along Column = ({0})'.format(COL) ax.set_title(TITLE) ax.set_xlabel('y') ax.set_ylabel('head') ax.set_xlim(0, 11000.) ax.set_ylim(980.,1010.) ax.text(7500, 989, r"proposed well", fontsize=15, color="black") ax.text(10480, 998, r"River", fontsize=10, color="blue",rotation='vertical') ax.text(300, 998, r"Green Swamp", fontsize=10, color="green",rotation='vertical') ax.text(5400,1002., r"Original GW Divide / Inactive cells", fontsize=10, color="black",rotation='vertical') ax.plot(XCOORD, Y) #calculate the flux to Green Swamp HEAD_ADJACENT_CELLS = HEAD[0,-2,:] print "heads in cells next to Green Swamp =", HEAD_ADJACENT_CELLS FLUX_TO_SWAMP_NO_FLOW = 0 THICK = (HEAD[0,-2,5]+1000.)/2 - ZBOT #the thickness is approximated using the average saturated thickness for NODEHEAD in HEAD_ADJACENT_CELLS: NODEFLUX = (HK * (NODEHEAD-1000.)/(DELC) * DELR * THICK) # Q = KIA FLUX_TO_SWAMP_NO_FLOW += NODEFLUX print 'gradient =', (NODEHEAD-1000)/(DELC), ' Kh =', HK, ' thickness=', THICK, ' Grid spacing =', DELC, ' Node flux =', NODEFLUX print "Total Flux to Swamp (No Flow) =", FLUX_TO_SWAMP_NO_FLOW, "cubic meters per day" """ Explanation: Testing your Skills Is the total volumetric flux out near that reported in the MODFLOW list file? Experiment with horizontal grid resolution, well location, recharge, pumping rate, and aquifer characteristics. Rerun the model and post process to evaluate the effects. P4.3 Part a. GW divide as No-flow and Specified Head BCs Using the steady-state heads derived in P4.3, run the model using first a no-flow boundary and then a specified head boundary at the location of the groundwater divide. Compare results with those in part (a). Compute the discharge to Green Swamp under each representation. What is the effect of assigning an internal boundary on the results? GW divide as No-flow BC End of explanation """ # Rows 10 and 11 had highest heads (1005.5); reset Rows 10 and 11 to a specified head boundary (set that row to -1 in the MODFLOW IBOUND array) IBOUND[:, 10, :] = -1 IBOUND[:, 11, :] = -1 print IBOUND #MODFLOW uses the starting heads to set the specified head boundary elevations #we need to reset the starting heads in Rows 10 and 11 to what they were originally # in P4.3, which is 1010.83233643 STRT[:, 10, :] = 1010.83233643 STRT[:, 11, :] = 1010.83233643 print STRT #we have to update the MODFLOW's BAS Package with the new STRT heads BAS_PACKAGE = flopy.modflow.ModflowBas(MF, ibound=IBOUND, strt=STRT) #delete old files to prevent us from reading old results modelfiles = os.listdir(modelpath) for filename in modelfiles: f = os.path.join(modelpath, filename) if modelname in f: try: os.remove(f) print 'Deleted: ', filename except: print 'Unable to delete: ', filename #Now write the model input files MF.write_input() print "New MODFLOW input files = ", modelfiles print "You can check the newly created files in", modelpath #rerun MODFLOW-2005 silent = False #Print model output to screen? pause = False #Require user to hit enter? Doesn't mean much in Ipython notebook report = True #Store the output from the model in buff success, buff = MF.run_model(silent=silent, pause=pause, report=report) #As before, let's look at the results and compare to P4-3 Part a. #imports for plotting and reading the MODFLOW binary output file import matplotlib.pyplot as plt import flopy.utils.binaryfile as bf #Create the headfile object and grab the results for last time. headfile = os.path.join(modelpath, modelname + '.hds') headfileobj = bf.HeadFile(headfile) #Get a list of times that are contained in the model times = headfileobj.get_times() print 'Headfile (' + modelname + '.hds' + ') contains the following list of times: ', times #Get a numpy array of heads for totim = 1.0 #The get_data method will extract head data from the binary file. HEAD = headfileobj.get_data(totim=1.0) #Print statistics on the head print 'Head statistics' print ' min: ', HEAD.min() print ' max: ', HEAD.max() print ' std: ', HEAD.std() #Create a contour plot of heads FIG = plt.figure(figsize=(15,13)) #setup contour levels and plot extent LEVELS = np.arange(995., 1011., 0.5) EXTENT = (DELR/2., LX - DELR/2., DELC/2., LY - DELC/2.) #Make a contour plot on the first axis AX1 = FIG.add_subplot(1, 2, 1, aspect='equal') AX1.set_xlabel("x") AX1.set_ylabel("y") YTICKS = np.arange(0, 11000, 500) AX1.set_yticks(YTICKS) AX1.set_title("Hubbertville contour map") AX1.text(2500, 8100, r"proposed well", fontsize=15, color="black") AX1.text(2000, 10500, r"River", fontsize=10, color="blue") AX1.text(1800, 340, r"Green Swamp", fontsize=10, color="green") AX1.contour(np.flipud(HEAD[0, :, :]), levels=LEVELS, extent=EXTENT) #Make a color flood on the second axis AX2 = FIG.add_subplot(1, 2, 2, aspect='equal') AX2.set_xlabel("x") AX2.set_ylabel("y") AX2.set_yticks(YTICKS) AX2.set_title("Hubbertville color flood") AX2.text(2500, 8100, r"proposed well", fontsize=15, color="black") AX2.text(2000, 10500, r"River", fontsize=10, color="black") AX2.text(1800, 340, r"Green Swamp", fontsize=10, color="black") cax = AX2.imshow(HEAD[0, :, :], extent=EXTENT, interpolation='nearest', vmin=995.) cbar = FIG.colorbar(cax, orientation='vertical', shrink=0.45) #as before let's plot a north-south cross section COL = 4 # recall we need to flip because MODFLOW's array does not = Python, so we reverse the order (flip them) and call it Y = np.flipud(HEAD[0,:,COL]) #for our cross section create X-coordinates to match with heads XCOORD = np.arange(0, 11000, 500) + 250 fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(1, 1, 1) TITLE = 'cross section of head along Column = ({0})'.format(COL) ax.set_title(TITLE) ax.set_xlabel('y') ax.set_ylabel('head') ax.set_xlim(0, 11000.) ax.set_ylim(980.,1020.) ax.text(7500, 995, r"proposed well", fontsize=15, color="black") ax.text(10480, 998, r"River", fontsize=10, color="blue",rotation='vertical') ax.text(300, 998, r"Green Swamp", fontsize=10, color="green",rotation='vertical') ax.text(5400,1006., r"Constant Heads/Original GW Divide", fontsize=10, color="black",rotation='vertical') ax.plot(XCOORD, Y) #calculate the flux to Green Swamp HEAD_ADJACENT_CELLS = HEAD[0,-2,:] print "heads in cells next to Green Swamp =", HEAD_ADJACENT_CELLS FLUX_TO_SWAMP_SPEC_HEAD = 0 THICK = (HEAD[0,-2,5]+1000.)/2 - ZBOT #the thickness is approximated using the average saturated thickness for NODEHEAD in HEAD_ADJACENT_CELLS: NODEFLUX = (HK * (NODEHEAD-1000.)/(DELC) * DELR * THICK) # Q = KIA FLUX_TO_SWAMP_SPEC_HEAD += NODEFLUX print 'gradient =', (NODEHEAD-1000)/(DELC), ' Kh =', HK, ' thickness=', THICK, ' Grid spacing =', DELC, ' Node flux =', NODEFLUX print "Total Flux to Swamp (Specified Head) =", FLUX_TO_SWAMP_SPEC_HEAD, "cubic meters per day" """ Explanation: GW divide as Specified Head BC End of explanation """ #let's compare the three formulations: #1) gw divide simulated; 2) gw divide as no flow BC; and 3) gw divide as specified head BC print "P4.3 Flux to Swamp (no pumping/simulated/specified head) =", 22570 print "Correct New Flux to Swamp (pumping/simulated divide) =", FLUX_TO_SWAMP print "Correct percent flux reduction =", 100 * (FLUX_TO_SWAMP-22570)/22570 print "" print "Incorrect New Flux to Swamp (pumping/spec head divide) = ", FLUX_TO_SWAMP_SPEC_HEAD print "Incorrect percent flux reduction =", 100 * (FLUX_TO_SWAMP_SPEC_HEAD-22570)/22570 print "" print "P4.3 Flux to Swamp (no pumping/no flow divide) =", 20302 print "Incorrect New Flux to Swamp (pumping/no flow divide) = ", FLUX_TO_SWAMP_NO_FLOW print "Incorrect percent flux reduction =", 100 * (FLUX_TO_SWAMP_NO_FLOW-20302)/20302 """ Explanation: P4.4 Part b Compare groundwater discharge to the swamp under the prepumping scenario in Problem P4.3 with the results under the pumping scenarios. In light of the modeling results, consider what might be meant by “significantly reduced” as used by the state agency (see discussion in Problem P4.3). Make a list of physical, geochemical and ecological conditions that potentially could be affected by a change in groundwater flow to the Green Swamp. End of explanation """
charlesll/RamPy
examples/Resample_and_flip_spectra.ipynb
gpl-2.0
%matplotlib inline import sys sys.path.append("../") import numpy as np import scipy from matplotlib import pyplot as plt import rampy as rp from sklearn import preprocessing """ Explanation: Use of resample and flipsp functions Spectral data are often delivered with decreasing and non-regularly sampled frequencies. This notebook shows how rampy can help you to solve this problem. End of explanation """ nb_points =500 x = np.sort(np.random.uniform(50,500,nb_points))[::-1] # gaussian peaks p1 = 20.0 * np.exp(-np.log(2) * ((x-150.0)/15.0)**2) p2 = 100.0 * np.exp(-np.log(2) * ((x-250.0)/5.0)**2) p3 = 50.0 * np.exp(-np.log(2) * ((x-450.0)/1.0)**2) p4 = 20.0 * np.exp(-np.log(2) * ((x-350.0)/30.0)**2) p5 = 30.0 * np.exp(-np.log(2) * ((x-460.0)/5.0)**2) # background: a large gaussian + linear bkg = 60.0 * np.exp(-np.log(2) * ((x-250.0)/200.0)**2) + 0.1*x #noise noise = 2.0 * np.random.normal(size=nb_points) #observation y = p1 + p2 + p3 + p4 + p5 + noise +bkg # spectrum, recorded array spectrum = np.vstack((x,y)).T plt.plot(spectrum[:,0],spectrum[:,1],"r-") plt.ylabel("Y") plt.xlabel("X") plt.show() """ Explanation: Creating a fake signal, with decreasing frequencies and irregularly sampled End of explanation """ print(spectrum[0:10,0]) print("interval 1:"+str(spectrum[1,0]-spectrum[0,0])) print("interval 2:"+str(spectrum[2,0]-spectrum[1,0])) """ Explanation: OK, makes no difference for pyplot but actually x is reversely sorted, and no regularly sampled End of explanation """ spectrum_increasing = rp.flipsp(spectrum) print(spectrum_increasing[0:10,0]) """ Explanation: We can solve the first problem by using rp.resample(). Note that we could also use numpy.interp(). We will compare both for the sack of example. We first flip the array, then resample it. End of explanation """ x_new = np.arange(round(spectrum_increasing[0,0])+1,round(spectrum_increasing[-1,0])-1,0.8) y_new_rp = rp.resample(spectrum_increasing[:,0],spectrum_increasing[:,1],x_new) y_new_np = np.interp(x_new,spectrum_increasing[:,0],spectrum_increasing[:,1]) plt.subplot(1,2,1) plt.plot(spectrum[:,0],spectrum[:,1],"k.") plt.plot(x_new,y_new_rp,"r-",label="rampy") plt.plot(x_new,y_new_np,"b-",label="np.interp") plt.ylabel("Y") plt.xlabel("X") plt.legend() plt.subplot(1,2,2) plt.plot(spectrum[:,0],spectrum[:,1],"k.") plt.plot(x_new,y_new_rp,"r-",label="rampy") plt.plot(x_new,y_new_np,"b-",label="np.interp") plt.ylabel("Y") plt.xlabel("X") plt.xlim(200,230) plt.ylim(70,90) plt.legend() plt.tight_layout() """ Explanation: OK, now the frequencies are in increasing order. This seems not important maybe, but remember than many spline algorithm (including gcvspline or the Dierckx version in scipy) required increasing x values... Now, we resample on a linearly spaced x axis. When creating x_new, remember that the boundaries should be inside those of the existing frequencies. End of explanation """ y_new_rp = rp.resample(spectrum_increasing[:,0],spectrum_increasing[:,1],x_new,kind="nearest") plt.subplot(1,2,1) plt.plot(spectrum[:,0],spectrum[:,1],"k.") plt.plot(x_new,y_new_rp,"r-",label="rampy") plt.plot(x_new,y_new_np,"b-",label="np.interp") plt.ylabel("Y") plt.xlabel("X") plt.legend() plt.subplot(1,2,2) plt.plot(spectrum[:,0],spectrum[:,1],"k.") plt.plot(x_new,y_new_rp,"r-",label="rampy") plt.plot(x_new,y_new_np,"b-",label="np.interp") plt.ylabel("Y") plt.xlabel("X") plt.xlim(200,230) plt.ylim(70,90) plt.legend() plt.tight_layout() """ Explanation: As seen below, rampy.resample return the same values as numpy.interp with the default values. However, we see that the fit is actually not really perfect. This is where rampy.resample offers you more: you can choose the type of interpolation done, and other options, as it uses scipy.interpolate.interp1d at the low level. See the documentation here. We can try to use a different algorithm and see the result: End of explanation """
samzhang111/frontpages
analysis/data_exploration.ipynb
gpl-3.0
# <help> # <api> from collections import defaultdict import datetime import pandas as pd import numpy as np def load_data(clean=True, us=True): df = pd.read_sql_table('frontpage_texts', 'postgres:///frontpages') df_newspapers = pd.read_sql_table('newspapers', 'postgres:///frontpages') if clean: df['text'] = df['text'].str.strip() df = df[df['text'].str.len() > 1] # This is the date that the Newseum had a "Day without News": # http://www.newseum.org/withoutnews/ df = df[df.date != datetime.datetime(2017, 6, 5)] df = dedupe_text(df) if us: df_newspapers = df_newspapers[df_newspapers.country == 'USA'] df = df[df.slug.isin(set(df_newspapers.slug))] df['page_height_round'] = df['page_height'].apply(int) df['page_width_round'] = df['page_width'].apply(int) df['page_width_round_10'] = df['page_width'].apply(lambda w: int(w/10)*10) df['page_height_round_10'] = df['page_height'].apply(lambda w: int(w/10)*10) df['aspect_ratio'] = np.round(df['page_width_round_10'] / df['page_height_round_10'], decimals=1) return df, df_newspapers def dedupe_text(df): text_counts = df.groupby(['slug']).text.value_counts() duplicate_text = text_counts[text_counts > 1].reset_index(name='count').drop('count', axis=1) duplicate_text_dict = defaultdict(set) duplicate_text.apply(lambda row: duplicate_text_dict[row.slug].add(row.text), axis=1) return df[df.apply(lambda row: row.text not in duplicate_text_dict[row.slug], axis=1)] df, df_newspapers = load_data() df_clean = dedupe_text(df) df_newspapers.head() us_newspapers_df = df_newspapers[df_newspapers.country == 'USA'] print('''We have metadata for {} newspapers. There are {} total countries represented. The top 5 are: {}. Within the US, there is representation from {} states. The states with the most newspapers are: {} And the least: {} '''.format( df_newspapers.shape[0], df_newspapers.country.nunique(), df_newspapers.country.value_counts()[:5], us_newspapers_df.state.nunique(), us_newspapers_df.state.value_counts()[:5], us_newspapers_df.state.value_counts()[-5:], )) df_us = df[df.slug.isin(set(us_newspapers_df.slug))] newspapers_in_df = df_newspapers[df_newspapers.slug.isin(set(df_us.slug))] print('''Currently, there are: {} rows of text {} days of scrapes (earliest: {} latest : {}) {} total newspapers (not all the pdfs were extractable). Filtering down to the US, there are now: {} newspapers {} rows of text For those newspapers that are available in the US, there are: {} states states with most newspapers: {} with least: {} with none: {} '''.format( df.shape[0], df.date.nunique(), df.date.min(), df.date.max(), df.slug.nunique(), df_us.slug.nunique(), df_us.shape[0], newspapers_in_df.state.nunique(), newspapers_in_df.state.value_counts()[:5], newspapers_in_df.state.value_counts()[-5:], set(df_newspapers.state) - set(newspapers_in_df.state) )) """ Explanation: Front pages of newspapers -- Initial discovery We have two datasets: * frontpage_texts, the text boxes extracted from pdfs of the front pages of newspapers, downloaded from the Newseum * newspapers, the metadata of the newspapers, also from the Newseum site. The text boxes contain interesting metadata for a given chunk of text, such as its bounding box, font, and size. This notebook will document some of the early exploratory attempts to understand the variety of the data, and to move toward performing an analysis of media coverage/bias. End of explanation """ print('''Fonts are often written in a format like this: {}. Out of {} rows... {} of the fonts have non-empty text {} of the fonts have a '+' {} of the fonts have a '-' '''.format( df.fontface.iloc[0], df.shape[0], (df.fontface.str.len() > 0).sum(), df.fontface.str.contains('\+').sum(), df.fontface.str.contains('-').sum() )) print('''This seems to mean that we can break apart the font into: [optional-leading-thing]+[font-family]-[font-weight] ''') font_partition = df.fontface.str.rpartition('+') df['font_family_weight'] = font_partition[2] font_family_partition = df['font_family_weight'].str.partition('-') df['font_leading_thing'] = font_partition[0] df['font_family'] = font_family_partition[0] df['font_weight'] = font_family_partition[2] print('''After doing that, There are... {} unique font families {} unique font weights {} unique optional-leading-things'''.format( df.font_family.nunique(), df.font_weight.nunique(), df.font_leading_thing.nunique() )) df_us = df[df.slug.isin(set(us_newspapers_df.slug))] """ Explanation: Fonts End of explanation """ # Let's do something with a Denver paper df_newspapers[df_newspapers.city == 'Denver'] import numpy as np df_denver_post = df_us[df_us.slug == 'CO_DP'] font_stats = df_denver_post.groupby(['font_family_weight']).fontsize.agg({'count': len, 'min': np.min, 'max': np.max, 'avg': np.mean}) print('''We have {} days of scraped Denver Post front pages. We have {} unique font-weight combos. Here is a mapping of each font family to their min, average, and max font size. {} '''.format( df_denver_post.date.nunique(), df_denver_post.groupby(['font_family_weight']).first().shape[0], font_stats )) font_days = df_denver_post.groupby(['font_family_weight']).date.nunique().sort_values(ascending=False) print('''Fonts by number of days on which they appear {} '''.format( font_days )) %matplotlib inline import matplotlib.pyplot as plt font_stats['days_present'] = font_days plt.suptitle('Number of days a font appears, vs. total font appearances') plt.scatter(font_stats.days_present, font_stats['count']) df_denver_post.sort_values(['date', 'avg_character_area'], ascending=False).groupby('date').head(5).head(10) """ Explanation: Denver Post End of explanation """ # <api> import pprint import string from nltk import word_tokenize chars = set(string.ascii_letters) def include_word(word): return sum([c in chars for c in word]) >= 3 def preprocess_text(text): lowered = text.strip().lower() lowered = ''.join(lowered.split('-\n')) lowered = lowered.replace('\n', ' ') words = word_tokenize(lowered) filtered_words = [word for word in words if include_word(word)] return filtered_words def bag_of_words(text): '''Literally, this returns a set of the bag of words for fast single-token searches''' return set(preprocess_text(text)) def preprocess_all(texts): for text in texts: yield text, preprocess_text(text) print('''For text preprocessing, we consider a few cases: * Newlines should be stripped * Everything should be lower-cased * We should return a tokenized list * Tokens without a certain number of ascii characters (US-English analysis for now) will be rejected The extraction from PDFs still contains word-continuations across line breaks. For now, we'll consider all lines that end with "-" as continuations, and link the text from before and after. Newlines without continuations will be replaced with spaces. Examples: {} '''.format( pprint.pformat(list(preprocess_all([ 'Hel-\nlo, bye\nnow\n', *df_denver_post.text.sample(3) ]))) )) df_us['bow'] = df_us.text.apply(bag_of_words) df_denver_post_latest = df_us[(df_us.slug == 'CO_DP') & (df_us.date == df_us.date.max())] def percent_of_page(unigram, one_paper_df): unigram = unigram.lower().strip() lines_with_unigram = one_paper_df[one_paper_df.bow.apply(lambda bag: unigram in bag)] return lines_with_unigram.percent_of_page.sum() print('''Now we write a method to get the percent of page that a unigram occupies, for a particular front page. Syria, Denver Post, latest day: {} garbage input, should be 0: {}'''.format( percent_of_page('Syria', df_denver_post_latest), percent_of_page('asdflkjasdflasdfkjasdf', df_denver_post_latest) )) """ Explanation: Unigram "percent of page" analysis Given an unigram like "Syria", how much of a given front page does it occupy? Notes We will consider the entire text block that contains the unigram to be related to that unigram. For example, the entire headline of "US BOMBS SYRIA" will be counted as space devoted toward "Syria". Likewise, a lengthy front-page article that mentions "Syria" in it will (naively, perhaps) be considered 100% about Syria. We're assuming that search queries will be proper nouns, so we're not going to perform any stemming or lemmatizing. Followup approaches Some newspapers contain more and smaller text, like the NYT, compared to tabloids where words are written extremely largely across the surface. This may still be of interest -- we do want to acknowledge the space devoted to "Syria" if it is splashed across the front of the tabloid -- but we may also want to develop a measure of relative importance so that a top-of-banner headline is weighted equally across all newspapers. This approach does not touch on probabilistic topic modeling yet -- these are only direct matches. We will also want to develop a method to link a headline with an article, so that a headline like "BOOTS ON THE GROUND" could possibly be linked to the followup article on Syria. This would also allow us to do some tangential but interesting accounts of which Associated Press articles get republished the most. End of explanation """ # filter down to newspapers with entries with more than 3 days days_of_newspapers = df_us.groupby('slug').date.nunique() df_us_3plus = df_us[df_us.slug.isin(set(days_of_newspapers[days_of_newspapers > 3].index))] print('''Number of newspapers with >3 days: {} (Number of total newspapers: {}) '''.format( df_us_3plus.slug.nunique(), df_us.slug.nunique() )) from functools import partial def unigram_percent_of_page(query, dataframe): return dataframe.groupby(['slug', 'date']).apply(partial(percent_of_page, query)) def _reshape_percent_of_day_series(percent_of_page): return percent_of_page.reset_index().rename(columns={0: 'percent_of_page'}) def percent_of_page_by_day(percent_of_page_df): return _reshape_percent_of_day_series(percent_of_page_df).groupby('date').percent_of_page.mean() def percent_of_papers_with_mention(percent_of_page_df, threshold=0): percents_by_paper_date = _reshape_percent_of_day_series(percent_of_page_df) greater_than_thresh = (percents_by_paper_date.groupby(['slug', 'date']).percent_of_page.max() > threshold).reset_index() return greater_than_thresh.groupby('date').mean() # Average mentions per day syria_results = unigram_percent_of_page('Syria', df_us_3plus) print('''Percent of papers that mentioned Syria by day: {} Average percent of newspaper front page devoted to Syria by day: {}'''.format( percent_of_papers_with_mention(syria_results), percent_of_page_by_day(syria_results), )) """ Explanation: Now we run this method across all the newspapers, across all days! End of explanation """ df_population = pd.read_csv('~/data/sub-est2015_all.csv', encoding='ISO-8859-2') df_cities = df_population[df_population.NAME.str.endswith('city') | df_population.NAME.str.endswith('town')] df_cities['city'] = df_cities.NAME.str.slice(0, -5).str.lower() df_cities['place_name'] = df_cities.city + ', ' + df_cities.STNAME.str.lower() df_cities = df_cities.sort_values('POPESTIMATE2015').groupby('place_name').head(1) df_cities.head() state_abbreviation_to_name = {} with open('files/states.csv') as f: next(f) # skip header for line in f: state, abbrev = line.strip().split(',') state_abbreviation_to_name[abbrev.strip('"')] = state.strip('"').lower() us_newspapers_df['place_name'] = us_newspapers_df.city.str.lower() + ', ' + us_newspapers_df.state.apply(state_abbreviation_to_name.get) us_newspapers_with_pop = pd.merge(us_newspapers_df, df_cities[['place_name', 'POPESTIMATE2015']], how='left', on='place_name', copy=False) print('''{} out of {} newspapers had places found in the census. Examples of ones that didn't: {} '''.format( us_newspapers_with_pop.POPESTIMATE2015.count(), us_newspapers_with_pop.shape[0], us_newspapers_with_pop[us_newspapers_with_pop.POPESTIMATE2015.isnull()].place_name.head() )) us_newspapers_df.head() unidentified_map = {} unidentified_places = us_newspapers_with_pop[us_newspapers_with_pop.POPESTIMATE2015.isnull()] for i, row in unidentified_places.iterrows(): matches = (df_population.STNAME == row.state) & (df_population.NAME.str.lower().str.contains(row.city.lower())) if matches.sum() == 0: continue pops = df_population[matches].sort_values('POPESTIMATE2015').iloc[0] unidentified_map[row.place_name] = (pops.NAME, pops.POPESTIMATE2015) print('''Out of {} unidentified places, we found {} by looking for substrings.'''.format( unidentified_places.shape[0], len(unidentified_map) )) """ Explanation: Connecting newspapers with population metadata Short of getting data on readership, we'll try to pull population metadata for the hometown of each newspaper. Edit: See bottom for conclusion. Tldr: it's not great, because there are multiple papers per city, many of which are lesser read. Doh. End of explanation """ import numpy as np def set_from_map_if_null(row): if pd.isnull(row.POPESTIMATE2015): return unidentified_map.get(row.place_name, [np.nan, np.nan])[1] return row.POPESTIMATE2015 us_newspapers_with_pop['population_est_2015'] = us_newspapers_with_pop.apply(set_from_map_if_null, 1) print('''So now {} out of {} newspapers have populations. Largest newspapers by population: {} '''.format( us_newspapers_with_pop.population_est_2015.count(), us_newspapers_with_pop.shape[0], us_newspapers_with_pop.sort_values('population_est_2015', ascending=False).head(5)[['title', 'state']] )) """ Explanation: Good enough! End of explanation """ # First, without any idf weighting, we'll calculate the contribution of individual words from collections import Counter def vocab_weights_by_word(df): counter = Counter() for i, row in df.iterrows(): for word in row.bow: # we won't multiply by the number of characters to get closer to "true" word real estate because we don't # care about the length of words. but we will divide by the total area of the page to normalize across # newspapers that are different sizes. counter[word] += row.avg_character_area return counter sorted(vocab_weights_by_word(df_denver_post_latest).items(), key=lambda x: x[1], reverse=True)[:5] """ Explanation: Oof. Looks like population might not work so well, since large cities often have several, lesser-read newspapers. Most headline-y words per day This is a variation on the unigram experiment above, where instead we will compute the percent of page for all words in all newspapers. Then we'll average them together across the newspapers to get the "most headliney words". A few variations we'll consider: We'll run one version where we consider the area given to each word independently, and another one where the bounding box of the entire text box where the word is found is grouped together. In terms of the front page real estate, one approach can be viewed as basically calculating the real estate for individual words, and the other for "topics" where topics consist of all the words in the document. There is going to be a lot of noise from stopwords. "The", for instance, will be present in nearly all of the articles. We should perform tf-idf to scale the data first. However, we don't want tf-idf to count newsy words toward the document frequency, so we'll calculate it on a separate corpus first. End of explanation """ import string import operator from collections import Counter from nltk.corpus import reuters import numpy as np doc_freq_counter = Counter() for fid in reuters.fileids(): bow = set(map(operator.methodcaller('lower'), reuters.words(fid))) bow = bow - set(string.punctuation) - set(string.digits) doc_freq_counter.update(bow) idfs = {} for word, count in doc_freq_counter.items(): idfs[word] = np.log(float(len(reuters.fileids())) / count) print('''We'll calculate document frequencies across the {} articles in the Reuters corpus. The most common words in the corpus are: {} As idfs: {} '''.format( len(reuters.fileids()), sorted(doc_freq_counter.items(), key=operator.itemgetter(1), reverse=True)[:5], sorted(idfs.items(), key=operator.itemgetter(1))[:5], )) # again, this time with idf weighting def vocab_weights_by_word(df, idf=None, method='by_char'): '''Methods: `by_char`: Average character size of the textbox in which a string is embedded `by_word_area`: Average character size * len of string `by_block`: Area of block in which string is embedded''' if method not in ['by_char', 'by_word_area', 'by_block']: raise ArgumentError('method needs to be one of "by_char", "by_word_area", "by_block"') counter = Counter() max_idf = max(idf.values()) # used for missing values for i, row in df.iterrows(): for word in set(row.bow) - set(string.punctuation) - set(string.digits): # we won't multiply by the number of characters to get closer to "true" word real estate because we don't # care about the length of words. but we will divide by the total area of the page to normalize across # newspapers that are different sizes. if method in ['by_char', 'by_word_area']: weight = row.avg_character_area if method == 'by_word_area': weight *= len(word) elif method == 'by_block': weight = row.percent_of_page if idf: weight *= idf.get(word, max_idf) counter[word] += weight return counter print('''The top words in the latest Denver Post by aggregate word "real estate", weighted by inverse document frequency: {} With word areas taken into consideration (longer words get weighted higher): {} Using the area of the entire block: {} '''.format( pprint.pformat(sorted(vocab_weights_by_word(df_denver_post_latest, idfs).items(), key=operator.itemgetter(1), reverse=True)[:10]), pprint.pformat(sorted(vocab_weights_by_word(df_denver_post_latest, idfs, method='by_word_area').items(), key=operator.itemgetter(1), reverse=True)[:10]), pprint.pformat(sorted(vocab_weights_by_word(df_denver_post_latest, idfs, method='by_block').items(), key=operator.itemgetter(1), reverse=True)[:10]) )) """ Explanation: Clearly there needs to be some kind of weighting, or else words like "by" will dominate. End of explanation """ import numpy as np def make_idfs(docs): article_word_doc_counts = Counter() for doc in docs: article_word_doc_counts.update(row.bow) article_idfs = {} for word, count in article_word_doc_counts.items(): article_idfs[word] = np.log(float(len(docs)) / count) article_idfs = make_idfs(df_us.bow) print('''Vocabulary size of these two different idf datasets: Reuters: {} Front pages: {} Most common front page words: {} '''.format( len(idfs), len(article_idfs), pprint.pformat(sorted(article_idfs.items(), key=operator.itemgetter(1))[:10]) )) """ Explanation: Better document frequencies The Reuters corpus is only ~10k documents. Instead, let's reverse engineer the document frequencies from the words in a word2vec model of Google News and Zipf's Law. (Skip to other window, where I did this, and found the results to be lackluster.) I requested access to the Yahoo News n-grams corpus. Otherwise, may need to be creative. For now, let's incorporate the document frequencies from the articles themselves in the dataset. The more days we gather, the more we'll be able to do this. End of explanation """ from sklearn.feature_extraction import DictVectorizer all_vocab_weights = {} todays_papers = df_us_3plus[df_us_3plus.date == df_us_3plus.date.max()] print('Total papers: ', todays_papers.slug.nunique()) for i, (slug, paper) in enumerate(todays_papers.groupby('slug')): if i % 50 == 0: print('.', end='') all_vocab_weights[slug] = vocab_weights_by_word(paper, article_idfs, method='by_word_area') vectorizer = DictVectorizer(sparse=False) X = vectorizer.fit_transform(all_vocab_weights.values()) print('Top results with word area:') sorted(zip(vectorizer.feature_names_, X.mean(axis=0)), key=operator.itemgetter(1), reverse=True)[:10] all_vocab_weights = {} todays_papers = df_us_3plus[df_us_3plus.date == df_us_3plus.date.max()] print('Total papers: ', todays_papers.slug.nunique()) for i, (slug, paper) in enumerate(todays_papers.groupby('slug')): if i % 50 == 0: print('.', end='') all_vocab_weights[slug] = vocab_weights_by_word(paper, article_idfs, method='by_char') vectorizer = DictVectorizer(sparse=False) X = vectorizer.fit_transform(all_vocab_weights.values()) print('Top results with character area:') sorted(zip(vectorizer.feature_names_, X.mean(axis=0)), key=operator.itemgetter(1), reverse=True)[:10] all_vocab_weights = {} todays_papers = df_us_3plus[df_us_3plus.date == df_us_3plus.date.max()] print('Total papers: ', todays_papers.slug.nunique()) for i, (slug, paper) in enumerate(todays_papers.groupby('slug')): if i % 50 == 0: print('.', end='') all_vocab_weights[slug] = vocab_weights_by_word(paper, article_idfs, method='by_block') vectorizer = DictVectorizer(sparse=False) X = vectorizer.fit_transform(all_vocab_weights.values()) print('Top results with block area:') sorted(zip(vectorizer.feature_names_, X.mean(axis=0)), key=operator.itemgetter(1), reverse=True)[:10] """ Explanation: Finding "front-page-est" words By combining the results of running all of the newspapers on a given day through the method above, we attempt to find the words most representative of front pages across the country on any particular day. We'll run it using all three of the different methods we have for weighting words as well. End of explanation """ df_us_3plus['page_height_round'] = df_us_3plus.page_height.apply(int) df_us_3plus['page_width_round'] = df_us_3plus.page_width.apply(int) import utils def plot_word(dataframe, word, date=None, paper=None): title = 'Appearances of {}'.format(word) if date: dataframe = dataframe[dataframe.date == date] title += ' on {}'.format(date) if paper: dataframe = dataframe[dataframe.slug == utils.slug_for_newspaper(paper)] title += ' on {}'.format(paper) relevant_df = dataframe[dataframe.bow.apply(lambda bow: word in bow)] grids = [] for (date, slug), paper in relevant_df.groupby(['date', 'slug']): grids.append(utils.make_intensity_grid(relevant_df, relevant_df.page_height_round.max(), relevant_df.page_width_round.max())) avg_intensity = sum([x / len(grids) for x in grids]) return utils.plot_intensity(avg_intensity, title) plot_word(df_us_3plus, 'syria') """ Explanation: Ah! So it looks like: all the methods give too much weight to frequency of appearance, vs. rareness of word this is especially evident with block area, since every time "the" shows up, the entire area of the block is counted the word area weight gives some interesting results but they get skewed by extremely large banners (the enquirer, the advocate, "sunday", etc) So that means the next steps are: * remove stopwords * remove names of newspapers from themselves * find way to penalize common words even more than idf Visualizations Bringing in the approach from the "Front Page Heatmap" notebook, we can try to visualize the prominence of certain words across front pages today. End of explanation """
RedHatInsights/insights-core
docs/notebooks/Filters Tutorial.ipynb
apache-2.0
""" Some imports used by all of the code in this tutorial """ import sys sys.path.insert(0, "../..") from __future__ import print_function import os from insights import run from insights.specs import SpecSet from insights.core import IniConfigFile from insights.core.plugins import parser, rule, make_fail from insights.core.spec_factory import simple_file """ Explanation: Filtering of Data in Insights Parsers and Rules In this tutorial we will investigate filters in insights-core, what they are, how they affect your components and how you can use them in your code. Documentation on filters can be found in the insights-core documentation. The primary purposes of filters are: to prevent the collection of sensitive information while enabling the collection of necessary information for analysis, and; to reduce the amount of information collected. Filters are typically added in rule modules since the purpose of a rule is to analyze particular information and identify a problem, potential problem or fact about the system. A filter may also be added in a parse modules if it is required to enable parsing of the data. We will discuss this further when we look at the example. Filters added by rules and parsers are applied when the data is collected from a system. They are combined so that if they are added from multiple rules and parsers, each rule will receive all information that was collected by all filters for a given source. An example will help demonstrate this. Suppose you write some rules that needs information from /var/log/messages. This file could be very large and contain potentially sensitive information, so it is not desirable to collect the entire file. Let's say rule_a needs messages that indicate my_special_process has failed to start. And another rule, rule_b needs messages that indicate that my_other_process had the errors MY_OTHER_PROCESS: process locked or MY_OTHER_PROCESS: memory exceeded. Then the two rules could add the following filters to ensure that just the information they need is collected: rule_a: python add_filter(Specs.messages, 'my_special_process') rule_b: python add_filter(Specs.messages, ['MY_OTHER_PROCESS: process locked', 'MY_OTHER_PROCESS: memory exceeded']) The effect of this would be that when /var/log/messages is collected, the filters would be applied and only the lines containing the strings 'my_special_process', 'MY_OTHER_PROCESS: process locked', or 'MY_OTHER_PROCESS: memory exceeded' would be collected. This significantly reduces the size of the data and the chance that sensitive information in /var/log/messages might be collected. While there are significant benefits to filtering, you must be aware that a datasource is being filtered or your rules could fail to identify a condition that may be present on a system. For instance suppose a rule rule_c also needs information from /var/log/messages about process_xyz. If rule_c runs with other rules like rule_a or rule_b then it would never see lines containing "process_xyz" appearing in /var/log/messages unless it adds a new filter. When any rule or parser adds a filter to a datasource, that data will be filtered for all components, not just the component adding the filter. Because of this it is important to understand when a datasource is being filtered so that your rule will function properly and include its own filters if needed. Exploring Filters Unfiltered Data Suppose we want to write a rule that will evaluate the contents of the configuration file death_star.ini to determine if there are any vulnerabilities. Since this is a new data source that is not currently collected by insights-core we'll need to add three elements to collect, parse and evaluate the information. End of explanation """ class Specs(SpecSet): """ Define a new spec to collect the file we need. """ death_star_config = simple_file(os.path.join(os.getcwd(), 'death_star.ini'), filterable=True) """ Explanation: First we'll need to add a specification to collect the configuration file. Note that for purposes of this tutorial we are collecting from a directory where this notebook is located. Normally the file path would be an absolute path on your system or in an archive. End of explanation """ @parser(Specs.death_star_config) class DeathStarCfg(IniConfigFile): """ Define a new parser to parse the spec. Since the spec is a standard INI format we can use the existing IniConfigFile parser that is provided by insights-core. See documentation here: https://insights-core.readthedocs.io/en/latest/api_index.html#insights.core.IniConfigFile """ pass """ Explanation: Next we'll need to add a parser to parse the file being collected by the spec. Since this file is in INI format and insights-core provides the IniConfigFile parser, we can just use that to parse the file. See the parser documentation to find out what methods that parser provides. End of explanation """ @rule(DeathStarCfg) def ds_vulnerable(ds_cfg): """ Define a new rule to look for vulnerable conditions that may be included in the INI file. If found report them. """ vulnerabilities = [] for section in ds_cfg.sections(): print("Section: {}".format(section)) for item_key in ds_cfg.items(section): print(" {}={}".format(item_key, ds_cfg.get(section, item_key))) if 'vulnerability' in item_key: vulnerabilities.append((item_key, ds_cfg.get(section, item_key))) if vulnerabilities: return make_fail('DS_IS_VULNERABLE', vulnerabilities=vulnerabilities) """ Explanation: Finally we can write the rule that will examine the contents of the parsed configuration file to determine if there are any vulnerabilities. In this INI file we can find the vulnerabilities by searching for keywords to find one that contains the string vulnerability. If any vulnerabilities are found the rule should return information in the form of a response that documents the vulnerabilities found, and tags them with the key DS_IS_VULNERABLE. If no vulnerabilities are found the rule should just drop out, effectively returning None. End of explanation """ !cat death_star.ini """ Explanation: Before we run the rule, lets look at the contents of the configuration file. It is in the format of a typical INI file and contains some interesting information. In particular we see that it does contain a keyword that should match the string we are looking for in the rule, "major_vulnerability=ray-shielded particle exhaust vent". So we expect the rule to return results. End of explanation """ results = run(ds_vulnerable) """ Explanation: Lets run our rule and find out. To run the rule we'll use the insights.run() function and as the argument pass in our rule object (note this is not a string but the actual object). The results returned will be an insights.dr.broker object that contains all sorts of information about the execution of the rule. You can explore more details of the broker in the Insights Core Tutorial notebook. The print statements in our rule provide output as it loops through the configuration file. End of explanation """ type(results[Specs.death_star_config]) type(results[DeathStarCfg]) type(results[ds_vulnerable]) """ Explanation: Now we are ready to look at the results. The results are stored in results[ds_vulnerable] where the rule object ds_vulnerable is the key into the dictionary of objects that your rule depended upon to execute, such as the parser DeathStarCfg and the spec Spec.death_star_config. You can see this by looking at those objects in results. End of explanation """ results[ds_vulnerable] """ Explanation: Now lets look at the rule results to see if they match what we expected. End of explanation """ from insights.core.filters import add_filter add_filter(Specs.death_star_config, '[') @parser(Specs.death_star_config) class DeathStarCfg(IniConfigFile): """ Define a new parser to parse the spec. Since the spec is a standard INI format we can use the existing IniConfigFile parser that is provided by insights-core. See documentation here: https://insights-core.readthedocs.io/en/latest/api_index.html#insights.core.IniConfigFile """ pass """ Explanation: Success, it worked as we expected finding the vulnerability. Now lets look at how filtering can affect the rule results. Filtering Data When we looked at the contents of the file you may have noticed some other interesting information such as this: ``` Keep this info secret [secret_stuff] username=dvader password=luke_is_my_son `` As a parser writer, if you know that a file could contain sensitive information, you may choose to filter it in the parser module to avoid collecting it. Usernames, passwords, hostnames, security keys, and other sensitive information should not be collected. In this case theusernameandpassword` are in the configuration file, so we should add a filter to this parser to prevent them from being collected. How do we add a filter and avoid breaking the parser? Each parser is unique, so the parser writer must determine if a filter is necessary, and how to add a filter that will allow the parser to function with a minimal set of data. For instance a Yaml or XML parser might have a difficult time parsing a filtered Yaml or XML file. For our example, we are using an INI file parser. INI files are structured with sections which are identified as a section name in square brackets like [section name], followed by items like name or name=value. One possible way to filter an INI file is to add the filter "[" which will collect all lines with sections but no items. This can be successfully parsed by the INI parser, so that is how we'll filter out this sensitive information in our configuration file. We'll rewrite the parser adding the add_filter(Specs.death_star_config, '[') to filter all lines except those with a '[' string. End of explanation """ results = run(ds_vulnerable) results.get(ds_vulnerable, "No results") # Use .get method of dict so we can provide default other than None """ Explanation: Now lets run the rule again and see what happens. Do you expect the same results we got before? End of explanation """ add_filter(Specs.death_star_config, 'vulnerability') @rule(DeathStarCfg) def ds_vulnerable(ds_cfg): """ Define a new rule to look for vulnerable conditions that may be included in the INI file. If found report them. """ vulnerabilities = [] for section in ds_cfg.sections(): print("Section: {}".format(section)) for item_key in ds_cfg.items(section): print(" {}={}".format(item_key, ds_cfg.get(section, item_key))) if 'vulnerability' in item_key: vulnerabilities.append((item_key, ds_cfg.get(section, item_key))) if vulnerabilities: return make_fail('DS_IS_VULNERABLE', vulnerabilities=vulnerabilities) """ Explanation: Is that what you expected? Notice the output from the print statements in the rule, only the section names are printed. That is the result of adding the filter, only lines with '[' (the sections) are collected and provided to the parser. This means that the lines we were looking for in the rule are no longer there, and that it appears our rule didn't find any vulnerabilities. Next we'll look at how to fix our rule to work with the filtered data. Adding Filters to Rules We can add filters to a rule just like we added a filter to the parser, using the add_filter() method. The add_filter method requires a spec and a string or list/set of strings. In this case our rule is looking for the string 'vulnerability' so we just need to add that to the filter. Alternatively, filters can be added by specifying a parser or combiner in the add_filter() method instead of a spec. In that scenario, the dependency tree will be traversed to locate underlying datasources that are filterable (filterable parameter is equal to True). And the specified filters will be added to those datasouces. In our example, we can filter the underlying Specs.death_star_config datasource by adding the add_filter(DeathStarCfg, 'vulnerability') statement. This is especially useful when you are working with a combiner that consolidates data from multiple parsers, which in turn depend on multiple datasources. Adding a filter to a combiner would allow for consistent filtering of data across all applicable datasources. End of explanation """ results = run(ds_vulnerable) results.get(ds_vulnerable, "No results") # Use .get method of dict so we can provide default other than None """ Explanation: Now lets run the rule again and see what happens. End of explanation """ """ This code will disable all filtering if it is run as the first cell when the notebook is opened. After the notebook has been started you will need to click on the Kernel menu and then the restart item, and then run this cell first before all others. You would need to restart the kernel and then not run this cell to prevent disabling filters. """ import os os.environ['INSIGHTS_FILTERS_ENABLED'] = 'False' results = run(ds_vulnerable) results.get(ds_vulnerable, "No results") # Use .get method of dict so we can provide default other than None """ Explanation: Now look at the output from the print statements in the rule, the item that was missing is now included. By adding the string required by our rule to the spec filters we have successfully included the data needed by our rule to detect the problem. Also, by adding the filter to the parser we have eliminated the sensitive information from the input. Determining if a Spec is Filtered When you are developing your rule, you may want to add some code, during development, to check if the spec you are using is filtered. This can be accomplished by looking at the spec in insights/specs/init.py. Each spec is defined here as a RegistryPoint() type. If the spec is filtered it will have the parameter filterable=True, for example the following indicates that the messages log (/var/log/messages) will be filtered: messages = RegistryPoint(filterable=True) If you need to use a parser that relies on a filtered spec then you need to add your own filter to ensure that your rule will receive the data necessary to evaluate the rule conditions. If you forget to add a filter to your rule, if you include integration tests for your rule, pytest will indicate an exception like the following warning you that the add_filter is missing: ``` telemetry/rules/tests/integration.py:7: component = <function report at 0x7fa843094e60>, input_data = <InputData {name:test4-00000}>, expected = None def run_test(component, input_data, expected=None): if filters.ENABLED: mod = component.__module__ sup_mod = '.'.join(mod.split('.')[:-1]) rps = _get_registry_points(component) filterable = set(d for d in rps if dr.get_delegate(d).filterable) missing_filters = filterable - ADDED_FILTERS.get(mod, set()) - ADDED_FILTERS.get(sup_mod, set()) if missing_filters: names = [dr.get_name(m) for m in missing_filters] msg = "%s must add filters to %s" raise Exception(msg % (mod, ", ".join(names))) E Exception: telemetry.rules.plugins.kernel.overcommit must add filters to insights.specs.Specs.messages ../../insights/insights-core/insights/tests/init.py:114: Exception ``` If you see this exception when you run tests then it means you need to include add_filter to your rule. Turning Off Filtering Globally There are often times that you would want or need to turn off filtering in order to perform testing or to fully analyze some aspects of a system and diagnose problems. Also if you are running locally on a system you might want to collect all data unfiltered. You can to this by setting the environment variable INSIGHTS_FILTERS_ENABLED=False prior to running insights-core. This won't work inside this notebook unless you follow the directions below. End of explanation """ def show_results(results, component): """ This function will show the results from run() where: results = run(component) run will catch all exceptions so if there are any this function will print them out with a stack trace, making it easier to develop component code. """ if component in results: print(results[component]) else: print("No results for: {}".format(component)) if results.exceptions: for comp in results.exceptions: print("Component Exception: {}".format(comp)) for exp in results.exceptions[comp]: print(results.tracebacks[exp]) """ Explanation: Debugging Components If you are writing component code you may sometimes not see any results even though you expected them and no errors were displayed. That is because insights-core is catching the exceptions and saving them. In order to see the exceptions you can use the following method to display the results of a run and any errors that occurrerd. End of explanation """ @rule(DeathStarCfg) def bad_rule(cfg): # Force an error here infinity = 1 / 0 results = run(bad_rule) show_results(results, bad_rule) """ Explanation: Here's an example of this function in use End of explanation """
ledeprogram/algorithms
class7/donow/Zhao_Shengying_DoNow_7.ipynb
gpl-3.0
import pandas as pd %matplotlib inline import numpy as np from sklearn.linear_model import LogisticRegression """ Explanation: Apply logistic regression to categorize whether a county had high mortality rate due to contamination 1. Import the necessary packages to read in the data, plot, and create a logistic regression model End of explanation """ df=pd.read_csv("hanford.csv") """ Explanation: 2. Read in the hanford.csv file in the data/ folder End of explanation """ df.describe() df.corr() """ Explanation: <img src="../../images/hanford_variables.png"></img> 3. Calculate the basic descriptive statistics on the data End of explanation """ df['Mortality'].hist() df['Mortality'].mean() #use the median as a threshold df['Mort_high']=df['Mortality'].apply(lambda x:1 if x>=147.1 else 0) df['Expo_high']=df['Exposure'].apply(lambda x:1 if x>=3.41 else 0) def exposure_high(x): if x>=3.41: return 1 else: return 0 df Q1=df['Exposure'].quantile(q=0.25) Q1 Q2=df['Exposure'].quantile(q=0.) """ Explanation: 4. Find a reasonable threshold to say exposure is high and recode the data End of explanation """ lm = lm=lm.fit(x.y) """ Explanation: 5. Create a logistic regression model End of explanation """
gdementen/larray
doc/source/tutorial/tutorial_indexing.ipynb
gpl-3.0
from larray import * """ Explanation: Indexing, Selecting and Assigning Import the LArray library: End of explanation """ # let's start with population = load_example_data('demography_eurostat').population population """ Explanation: Import the test array population: End of explanation """ population['Belgium', 'Female', 2017] """ Explanation: Selecting (Subsets) The Array class allows to select a subset either by labels or indices (positions) Selecting by Labels To take a subset of an array using labels, use brackets [ ]. Let's start by selecting a single element: End of explanation """ # order of index doesn't matter population['Female', 2017, 'Belgium'] """ Explanation: As long as there is no ambiguity (i.e. axes sharing one or several same label(s)), the order of indexing does not matter. So you usually do not care/have to remember about axes positions during computation. It only matters for output. End of explanation """ population[['Belgium', 'Germany'], 2014:2016] """ Explanation: Selecting a subset is done by using slices or lists of labels: End of explanation """ # select all years starting from 2015 population[2015:] # select all first years until 2015 population[:2015] """ Explanation: Slices bounds are optional: if not given, start is assumed to be the first label and stop is the last one. End of explanation """ # select all even years starting from 2014 population[2014::2] """ Explanation: Slices can also have a step (defaults to 1), to take every Nth labels: End of explanation """ immigration = load_example_data('demography_eurostat').immigration # the 'immigration' array has two axes (country and citizenship) which share the same labels immigration # LArray doesn't use the position of the labels used inside the brackets # to determine the corresponding axes. Instead LArray will try to guess the # corresponding axis for each label whatever is its position. # Then, if a label is shared by two or more axes, LArray will not be able # to choose between the possible axes and will raise an error. try: immigration['Belgium', 'Netherlands'] except Exception as e: print(type(e).__name__, ':', e) # the solution is simple. You need to precise the axes on which you make a selection immigration[immigration.country['Belgium'], immigration.citizenship['Netherlands']] """ Explanation: <div class="alert alert-warning"> **Warning:** Selecting by labels as in above examples works well as long as there is no ambiguity. When two or more axes have common labels, it leads to a crash. The solution is then to precise to which axis belong the labels. </div> End of explanation """ # the previous example can also be written as immigration[X.country['Belgium'], X.citizenship['Netherlands']] """ Explanation: Ambiguous Cases - Specifying Axes Using The Special Variable X When selecting, assigning or using aggregate functions, an axis can be referred via the special variable X: population[X.time[2015:]] population.sum(X.time) This gives you access to axes of the array you are manipulating. The main drawback of using X is that you lose the autocompletion available from many editors. It only works with non-anonymous axes for which names do not contain whitespaces or special characters. End of explanation """ # select the last year population[X.time.i[-1]] # same but for the last 3 years population[X.time.i[-3:]] # using a list of indices population[X.time.i[0, 2, 4]] """ Explanation: Selecting by Indices Sometimes it is more practical to use indices (positions) along the axis, instead of labels. You need to add the character i before the brackets: .i[indices]. As for selection with labels, you can use a single index, a slice or a list of indices. Indices can be also negative (-1 represent the last element of an axis). <div class="alert alert-info"> **Note:** Remember that indices (positions) are always **0-based** in Python. So the first element is at index 0, the second is at index 1, etc. </div> End of explanation """ year = 2015 # with labels population[X.time[:year]] # with indices (i.e. using the .i[indices] syntax) index_year = population.time.index(year) population[X.time.i[:index_year]] """ Explanation: <div class="alert alert-warning"> **Warning:** The end *indice* (position) is EXCLUSIVE while the end label is INCLUSIVE. </div> End of explanation """ # select first country and last three years population.i[0, :, -3:] """ Explanation: You can use .i[] selection directly on array instead of axes. In this context, if you want to select a subset of the first and third axes for example, you must use a full slice : for the second one. End of explanation """ even_years = population.time[2014::2] population[even_years] """ Explanation: Using Groups In Selections End of explanation """ # select even years population[X.time % 2 == 0] """ Explanation: Boolean Filtering Boolean filtering can be used to extract subsets. Filtering can be done on axes: End of explanation """ # select population for the year 2017 population_2017 = population[2017] # select all data with a value greater than 30 million population_2017[population_2017 > 30e6] """ Explanation: or data: End of explanation """ start_year = Array([2015, 2016, 2017], axes=population.country) start_year population[X.time >= start_year] """ Explanation: <div class="alert alert-info"> **Note:** Be aware that after boolean filtering, several axes may have merged. </div> Arrays can also be used to create boolean filters: End of explanation """ for year in population.time: print(year) """ Explanation: Iterating over an axis Iterating over an axis is straightforward: End of explanation """ population[2017] = 0 population """ Explanation: Assigning subsets Assigning A Value Assigning a value to a subset is simple: End of explanation """ # store the data associated with the year 2016 in a new variable population_2016 = population[2016] population_2016 # now, we modify the new variable population_2016['Belgium'] = 0 # and we can see that the original array has been also modified population """ Explanation: Now, let's store a subset in a new variable and modify it: End of explanation """ # reload the 'population' array population = load_example_data('demography_eurostat').population # create a second 'population2' variable population2 = population population2 # set all data corresponding to the year 2017 to 0 population2[2017] = 0 population2 # and now take a look of what happened to the original array 'population' # after modifying the 'population2' array population """ Explanation: One very important gotcha though... <div class="alert alert-warning"> **Warning:** Storing a subset of an array in a new variable and modifying it after may also impact the original array. The reason is that selecting a contiguous subset of the data does not return a copy of the selected subset, but rather a view on a subset of the array. To avoid such behavior, use the ``.copy()`` method. </div> Remember: taking a contiguous subset of an array is extremely fast (no data is copied) if one modifies that subset, one also modifies the original array .copy() returns a copy of the subset (takes speed and memory) but allows you to change the subset without modifying the original array in the same time The same warning apply for entire arrays: End of explanation """ # reload the 'population' array population = load_example_data('demography_eurostat').population # copy the 'population' array and store the copy in a new variable population2 = population.copy() # modify the copy population2[2017] = 0 population2 # the data from the original array have not been modified population """ Explanation: <div class="alert alert-warning"> **Warning:** The syntax ``new_array = old_array`` does not create a new array but rather an 'alias' variable. To actually create a new array as a copy of a previous one, the ``.copy()`` method must be called. </div> End of explanation """ # select population for the year 2015 population_2015 = population[2015] # propagate population for the year 2015 to all next years population[2016:] = population_2015 population """ Explanation: Assigning Arrays And Broadcasting Instead of a value, we can also assign an array to a subset. In that case, that array can have less axes than the target but those which are present must be compatible with the subset being targeted. End of explanation """ # replace 'Male' and 'Female' labels by 'M' and 'F' population_2015 = population_2015.set_labels('gender', 'M,F') population_2015 # now let's try to repeat the assignement operation above with the new labels. # An error is raised because of incompatible axes try: population[2016:] = population_2015 except Exception as e: print(type(e).__name__, ':', e) """ Explanation: <div class="alert alert-warning"> **Warning:** The array being assigned must have compatible axes (i.e. same axes names and same labels) with the target subset. </div> End of explanation """
google/starthinker
colabs/airflow.ipynb
apache-2.0
!pip install git+https://github.com/google/starthinker """ Explanation: Airflow Composer Example Demonstration that uses Airflow/Composer native, Airflow/Composer local, and StarThinker tasks in the same generated DAG. License Copyright 2020 Google LLC, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Disclaimer This is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team. This code generated (see starthinker/scripts for possible source): - Command: "python starthinker_ui/manage.py colab" - Command: "python starthinker/tools/colab.py [JSON RECIPE]" 1. Install Dependencies First install the libraries needed to execute recipes, this only needs to be done once, then click play. End of explanation """ from starthinker.util.configuration import Configuration CONFIG = Configuration( project="", client={}, service={}, user="/content/user.json", verbose=True ) """ Explanation: 2. Set Configuration This code is required to initialize the project. Fill in required fields and press play. If the recipe uses a Google Cloud Project: Set the configuration project value to the project identifier from these instructions. If the recipe has auth set to user: If you have user credentials: Set the configuration user value to your user credentials JSON. If you DO NOT have user credentials: Set the configuration client value to downloaded client credentials. If the recipe has auth set to service: Set the configuration service value to downloaded service credentials. End of explanation """ FIELDS = { 'auth_read':'user', # Credentials used for reading data. } print("Parameters Set To: %s" % FIELDS) """ Explanation: 3. Enter Airflow Composer Example Recipe Parameters Execute this using Airflow or Composer, the Colab and UI recipe is for refence only. This is an example DAG that will execute and print dates and text. Run it once to ensure everything works, then customize it. Modify the values below for your use case, can be done multiple times, then click play. End of explanation """ from starthinker.util.configuration import execute from starthinker.util.recipe import json_set_fields TASKS = [ { 'airflow':{ '__comment__':'Calls a native Airflow operator.', 'operators':{ 'bash_operator':{ 'BashOperator':{ 'bash_command':'date' } } } } }, { 'starthinker.airflow':{ '__comment__':'Calls an custom operator, requires import of library.', 'operators':{ 'hello':{ 'Hello':{ 'say':'Hi, there!' } } } } }, { 'hello':{ '__comment__':'Calls a StarThinker task.', 'auth':'user', 'say':'Hello World' } } ] json_set_fields(TASKS, FIELDS) execute(CONFIG, TASKS, force=True) """ Explanation: 4. Execute Airflow Composer Example This does NOT need to be modified unless you are changing the recipe, click play. End of explanation """
albahnsen/ML_RiskManagement
notebooks/07_decision_trees.ipynb
mit
# vehicle data import pandas as pd import zipfile with zipfile.ZipFile('../datasets/vehicles_train.csv.zip', 'r') as z: f = z.open('vehicles_train.csv') train = pd.io.parsers.read_table(f, index_col=False, sep=',') # before splitting anything, just predict the mean of the entire dataset train['prediction'] = train.price.mean() train year = 0 train['pred'] = train.loc[train.year<year, 'price'].mean() train.loc[train.year>=year, 'pred'] = train.loc[train.year>=year, 'price'].mean() (((train['price'] - train['pred'])**2).mean()) ** 0.5 train_izq = train.loc[train.year<0].copy() train_izq.year.unique() def error_año(train, year): train['pred'] = train.loc[train.year<year, 'price'].mean() train.loc[train.year>=year, 'pred'] = train.loc[train.year>=year, 'price'].mean() return round(((((train['price'] - train['pred'])**2).mean()) ** 0.5), 2) def error_miles(train, miles): train['pred'] = train.loc[train.miles<miles, 'price'].mean() train.loc[train.miles>=miles, 'pred'] = train.loc[train.miles>=miles, 'price'].mean() return round(((((train['price'] - train['pred'])**2).mean()) ** 0.5), 2) """ Explanation: 07 - Decision Trees by Alejandro Correa Bahnsen & Iván Torroledo version 1.2, Feb 2018 Part of the class Machine Learning for Risk Management This notebook is licensed under a Creative Commons Attribution-ShareAlike 3.0 Unported License. Special thanks goes to Kevin Markham Adapted from Chapter 8 of An Introduction to Statistical Learning Why are we learning about decision trees? Can be applied to both regression and classification problems Many useful properties Very popular Basis for more sophisticated models Have a different way of "thinking" than the other models we have studied Lesson objectives Students will be able to: Explain how a decision tree is created Build a decision tree model in scikit-learn Tune a decision tree model and explain how tuning impacts the model Interpret a tree diagram Describe the key differences between regression and classification trees Decide whether a decision tree is an appropriate model for a given problem Part 1: Regression trees Major League Baseball player data from 1986-87: Years (x-axis): number of years playing in the major leagues Hits (y-axis): number of hits in the previous year Salary (color): low salary is blue/green, high salary is red/yellow Group exercise: The data above is our training data. We want to build a model that predicts the Salary of future players based on Years and Hits. We are going to "segment" the feature space into regions, and then use the mean Salary in each region as the predicted Salary for future players. Intuitively, you want to maximize the similarity (or "homogeneity") within a given region, and minimize the similarity between different regions. Rules for segmenting: You can only use straight lines, drawn one at a time. Your line must either be vertical or horizontal. Your line stops when it hits an existing line. Above are the regions created by a computer: $R_1$: players with less than 5 years of experience, mean Salary of \$166,000 $R_2$: players with 5 or more years of experience and less than 118 hits, mean Salary of \$403,000 $R_3$: players with 5 or more years of experience and 118 hits or more, mean Salary of \$846,000 Note: Years and Hits are both integers, but the convention is to use the midpoint between adjacent values to label a split. These regions are used to make predictions on out-of-sample data. Thus, there are only three possible predictions! (Is this different from how linear regression makes predictions?) Below is the equivalent regression tree: The first split is Years < 4.5, thus that split goes at the top of the tree. When a splitting rule is True, you follow the left branch. When a splitting rule is False, you follow the right branch. For players in the left branch, the mean Salary is \$166,000, thus you label it with that value. (Salary has been divided by 1000 and log-transformed to 5.11.) For players in the right branch, there is a further split on Hits < 117.5, dividing players into two more Salary regions: \$403,000 (transformed to 6.00), and \$846,000 (transformed to 6.74). What does this tree tell you about your data? Years is the most important factor determining Salary, with a lower number of Years corresponding to a lower Salary. For a player with a lower number of Years, Hits is not an important factor determining Salary. For a player with a higher number of Years, Hits is an important factor determining Salary, with a greater number of Hits corresponding to a higher Salary. Question: What do you like and dislike about decision trees so far? Building a regression tree by hand Your training data is a tiny dataset of used vehicle sale prices. Your goal is to predict price for testing data. Read the data into a Pandas DataFrame. Explore the data by sorting, plotting, or split-apply-combine (aka group_by). Decide which feature is the most important predictor, and use that to create your first splitting rule. Only binary splits are allowed. After making your first split, split your DataFrame into two parts, and then explore each part to figure out what other splits to make. Stop making splits once you are convinced that it strikes a good balance between underfitting and overfitting. Your goal is to build a model that generalizes well. You are allowed to split on the same variable multiple times! Draw your tree, labeling the leaves with the mean price for the observations in that region. Make sure nothing is backwards: You follow the left branch if the rule is true, and the right branch if the rule is false. How does a computer build a regression tree? Ideal approach: Consider every possible partition of the feature space (computationally infeasible) "Good enough" approach: recursive binary splitting Begin at the top of the tree. For every feature, examine every possible cutpoint, and choose the feature and cutpoint such that the resulting tree has the lowest possible mean squared error (MSE). Make that split. Examine the two resulting regions, and again make a single split (in one of the regions) to minimize the MSE. Keep repeating step 3 until a stopping criterion is met: maximum tree depth (maximum number of splits required to arrive at a leaf) minimum number of observations in a leaf Demo: Choosing the ideal cutpoint for a given feature End of explanation """ # encode car as 0 and truck as 1 train['vtype'] = train.vtype.map({'car':0, 'truck':1}) # define X and y feature_cols = ['year', 'miles', 'doors', 'vtype'] X = train[feature_cols] y = train.price # instantiate a DecisionTreeRegressor (with random_state=1) from sklearn.tree import DecisionTreeRegressor treereg = DecisionTreeRegressor(random_state=1) treereg # use leave-one-out cross-validation (LOOCV) to estimate the RMSE for this model import numpy as np from sklearn.model_selection import cross_val_score scores = cross_val_score(treereg, X, y, cv=14, scoring='neg_mean_squared_error') np.mean(np.sqrt(-scores)) """ Explanation: Recap: Before every split, this process is repeated for every feature, and the feature and cutpoint that produces the lowest MSE is chosen. Building a regression tree in scikit-learn End of explanation """ # try different values one-by-one treereg = DecisionTreeRegressor(max_depth=1, random_state=1) scores = cross_val_score(treereg, X, y, cv=14, scoring='neg_mean_squared_error') np.mean(np.sqrt(-scores)) """ Explanation: What happens when we grow a tree too deep? Left: Regression tree for Salary grown deeper Right: Comparison of the training, testing, and cross-validation errors for trees with different numbers of leaves The training error continues to go down as the tree size increases (due to overfitting), but the lowest cross-validation error occurs for a tree with 3 leaves. Tuning a regression tree Let's try to reduce the RMSE by tuning the max_depth parameter: End of explanation """ # list of values to try max_depth_range = range(1, 8) # list to store the average RMSE for each value of max_depth RMSE_scores = [] # use LOOCV with each value of max_depth for depth in max_depth_range: treereg = DecisionTreeRegressor(max_depth=depth, random_state=1) MSE_scores = cross_val_score(treereg, X, y, cv=14, scoring='neg_mean_squared_error') RMSE_scores.append(np.mean(np.sqrt(-MSE_scores))) %matplotlib inline import matplotlib.pyplot as plt # plot max_depth (x-axis) versus RMSE (y-axis) plt.plot(max_depth_range, RMSE_scores) plt.xlabel('max_depth') plt.ylabel('RMSE (lower is better)') # max_depth=3 was best, so fit a tree using that parameter treereg = DecisionTreeRegressor(max_depth=3, random_state=1) treereg.fit(X, y) # "Gini importance" of each feature: the (normalized) total reduction of error brought by that feature pd.DataFrame({'feature':feature_cols, 'importance':treereg.feature_importances_}) """ Explanation: Or, we could write a loop to try a range of values: End of explanation """ # create a Graphviz file from sklearn.tree import export_graphviz export_graphviz(treereg, out_file='tree_vehicles.dot', feature_names=feature_cols) # At the command line, run this to convert to PNG: # dot -Tpng tree_vehicles.dot -o tree_vehicles.png """ Explanation: Creating a tree diagram End of explanation """ # read the testing data with zipfile.ZipFile('../datasets/vehicles_test.csv.zip', 'r') as z: f = z.open('vehicles_test.csv') test = pd.io.parsers.read_table(f, index_col=False, sep=',') test['vtype'] = test.vtype.map({'car':0, 'truck':1}) test """ Explanation: Reading the internal nodes: samples: number of observations in that node before splitting mse: MSE calculated by comparing the actual response values in that node against the mean response value in that node rule: rule used to split that node (go left if true, go right if false) Reading the leaves: samples: number of observations in that node value: mean response value in that node mse: MSE calculated by comparing the actual response values in that node against "value" Making predictions for the testing data End of explanation """ # use fitted model to make predictions on testing data X_test = test[feature_cols] y_test = test.price y_pred = treereg.predict(X_test) y_pred # calculate RMSE from sklearn.metrics import mean_squared_error np.sqrt(mean_squared_error(y_test, y_pred)) """ Explanation: Question: Using the tree diagram above, what predictions will the model make for each observation? End of explanation """ # read in the data with zipfile.ZipFile('../datasets/titanic.csv.zip', 'r') as z: f = z.open('titanic.csv') titanic = pd.read_csv(f, sep=',', index_col=0) # encode female as 0 and male as 1 titanic['Sex'] = titanic.Sex.map({'female':0, 'male':1}) # fill in the missing values for age with the median age titanic.Age.fillna(titanic.Age.median(), inplace=True) # create a DataFrame of dummy variables for Embarked embarked_dummies = pd.get_dummies(titanic.Embarked, prefix='Embarked') embarked_dummies.drop(embarked_dummies.columns[0], axis=1, inplace=True) # concatenate the original DataFrame and the dummy DataFrame titanic = pd.concat([titanic, embarked_dummies], axis=1) # print the updated DataFrame titanic.head() """ Explanation: Part 2: Classification trees Example: Predict whether Barack Obama or Hillary Clinton will win the Democratic primary in a particular county in 2008: Questions: What are the observations? How many observations are there? What is the response variable? What are the features? What is the most predictive feature? Why does the tree split on high school graduation rate twice in a row? What is the class prediction for the following county: 15% African-American, 90% high school graduation rate, located in the South, high poverty, high population density? What is the predicted probability for that same county? Comparing regression trees and classification trees |regression trees|classification trees| |---|---| |predict a continuous response|predict a categorical response| |predict using mean response of each leaf|predict using most commonly occuring class of each leaf| |splits are chosen to minimize MSE|splits are chosen to minimize Gini index (discussed below)| Splitting criteria for classification trees Common options for the splitting criteria: classification error rate: fraction of training observations in a region that don't belong to the most common class Gini index: measure of total variance across classes in a region Example of classification error rate Pretend we are predicting whether someone buys an iPhone or an Android: At a particular node, there are 25 observations (phone buyers), of whom 10 bought iPhones and 15 bought Androids. Since the majority class is Android, that's our prediction for all 25 observations, and thus the classification error rate is 10/25 = 40%. Our goal in making splits is to reduce the classification error rate. Let's try splitting on gender: Males: 2 iPhones and 12 Androids, thus the predicted class is Android Females: 8 iPhones and 3 Androids, thus the predicted class is iPhone Classification error rate after this split would be 5/25 = 20% Compare that with a split on age: 30 or younger: 4 iPhones and 8 Androids, thus the predicted class is Android 31 or older: 6 iPhones and 7 Androids, thus the predicted class is Android Classification error rate after this split would be 10/25 = 40% The decision tree algorithm will try every possible split across all features, and choose the split that reduces the error rate the most. Example of Gini index Calculate the Gini index before making a split: $$1 - \left(\frac {iPhone} {Total}\right)^2 - \left(\frac {Android} {Total}\right)^2 = 1 - \left(\frac {10} {25}\right)^2 - \left(\frac {15} {25}\right)^2 = 0.48$$ The maximum value of the Gini index is 0.5, and occurs when the classes are perfectly balanced in a node. The minimum value of the Gini index is 0, and occurs when there is only one class represented in a node. A node with a lower Gini index is said to be more "pure". Evaluating the split on gender using Gini index: $$\text{Males: } 1 - \left(\frac {2} {14}\right)^2 - \left(\frac {12} {14}\right)^2 = 0.24$$ $$\text{Females: } 1 - \left(\frac {8} {11}\right)^2 - \left(\frac {3} {11}\right)^2 = 0.40$$ $$\text{Weighted Average: } 0.24 \left(\frac {14} {25}\right) + 0.40 \left(\frac {11} {25}\right) = 0.31$$ Evaluating the split on age using Gini index: $$\text{30 or younger: } 1 - \left(\frac {4} {12}\right)^2 - \left(\frac {8} {12}\right)^2 = 0.44$$ $$\text{31 or older: } 1 - \left(\frac {6} {13}\right)^2 - \left(\frac {7} {13}\right)^2 = 0.50$$ $$\text{Weighted Average: } 0.44 \left(\frac {12} {25}\right) + 0.50 \left(\frac {13} {25}\right) = 0.47$$ Again, the decision tree algorithm will try every possible split, and will choose the split that reduces the Gini index (and thus increases the "node purity") the most. Comparing classification error rate and Gini index Gini index is generally preferred because it will make splits that increase node purity, even if that split does not change the classification error rate. Node purity is important because we're interested in the class proportions in each region, since that's how we calculate the predicted probability of each class. scikit-learn's default splitting criteria for classification trees is Gini index. Note: There is another common splitting criteria called cross-entropy. It's numerically similar to Gini index, but slower to compute, thus it's not as popular as Gini index. Building a classification tree in scikit-learn We'll build a classification tree using the Titanic data: End of explanation """ # define X and y feature_cols = ['Pclass', 'Sex', 'Age', 'Embarked_Q', 'Embarked_S'] X = titanic[feature_cols] y = titanic.Survived # fit a classification tree with max_depth=3 on all data from sklearn.tree import DecisionTreeClassifier treeclf = DecisionTreeClassifier(max_depth=3, random_state=1) treeclf.fit(X, y) # create a Graphviz file export_graphviz(treeclf, out_file='tree_titanic.dot', feature_names=feature_cols) # At the command line, run this to convert to PNG: # dot -Tpng tree_titanic.dot -o tree_titanic.png """ Explanation: Survived: 0=died, 1=survived (response variable) Pclass: 1=first class, 2=second class, 3=third class What will happen if the tree splits on this feature? Sex: 0=female, 1=male Age: numeric value Embarked: C or Q or S End of explanation """ # compute the feature importances pd.DataFrame({'feature':feature_cols, 'importance':treeclf.feature_importances_}) """ Explanation: Notice the split in the bottom right: the same class is predicted in both of its leaves. That split didn't affect the classification error rate, though it did increase the node purity, which is important because it increases the accuracy of our predicted probabilities. End of explanation """
jdhp-docs/python_notebooks
nb_sci_ai/ai_ml_id3_fr.ipynb
mit
import pandas as pd """ Explanation: L'apprentissage d'arbres de décision avec ID3 TODO: - faire un document séparé pour ID3, CART, C4.5, etc. ou mettre tout dans ce notebook ??? End of explanation """ data_list = [['soleil', 'chaud', 'haute', 'faux', 'NePasJouer'], ['soleil', 'chaud', 'haute', 'vrai', 'NePasJouer'], ['couvert', 'chaud', 'haute', 'faux', 'Jouer'], ['pluie', 'bon', 'haute', 'faux', 'Jouer'], ['pluie', 'frais', 'normale', 'faux', 'Jouer'], ['pluie', 'frais', 'normale', 'vrai', 'NePasJouer'], ['couvert', 'frais', 'normale', 'vrai', 'Jouer'], ['soleil', 'bon', 'haute', 'faux', 'NePasJouer'], ['soleil', 'frais', 'normale', 'faux', 'Jouer'], ['pluie', 'bon', 'normale', 'faux', 'Jouer'], ['soleil', 'bon', 'normale', 'vrai', 'Jouer'], ['couvert', 'bon', 'haute', 'vrai', 'Jouer'], ['couvert', 'chaud', 'normale', 'faux', 'Jouer'], ['pluie', 'bon', 'haute', 'vrai', 'NePasJouer']] columns_list = ['ciel', 'temperature', 'humidite', 'vent', 'golf'] df = pd.DataFrame(data_list, columns=columns_list) df """ Explanation: Principales implémentations en Python http://scikit-learn.org/stable/modules/tree.html ... Les arbres de décision TODO: - mettre un exemple: illustration générée à partir d'une bibliothèque dédiée - expliquer la signification des noeuds, des entrées, des sorties, etc. Les noeuds des arbres de décisions sont appelés des attributs. Ils ont un ensemble fini de valeurs possibles. Les feuilles de l'arbre sont les classes. Générer un arbre de décisions à partir d'une base d'exemples On cherche à générer (apprendre) un arbre de décision à partir de la base d'exemples suivante: (Tiré de Quinlan 1983) End of explanation """ # TODO """ Explanation: L'algorithme ID3 Quinlan, 1979 TODO... On construit automatiquement un arbre à partir d'une base d'exemples. Construction de l'arbre: algorithme récursif ..., ..., critère d'arret, ... Choix des attributs on pourrait se contenter de choisir les attributs au hasard pour chaque noeud de l'arbre dans l'algorithme précédent d'accord mais si on veut construire l'arbre le plus simple possible (une des bonnes raisons de vouloir faire un arbre simple: un arbre simple a plus de chance d'être bon en généralisation qu'un arbre inutilement compliqué) d'abord qu'est-ce qu'un arbre simple ? un critère possible (arbitraire): un arbre simple est un arbre qui minimise le nombre de questions en moyenne (ie qui minimise le nombre de noeuds traversé en moyenne pour définir la classe d'un exemple) ok mais comment on fait ? une première approche très naive pourrait consister à générer tous les arbres possible suivant l'algorithme 1., compter (ou calculer avec des probas) le nombre moyen de question sur la base des exemples et retenir l'arbre qui minimise ce nombre moyen de question problème: c'est pas très rusé et ça peut être très long si il y a beaucoup d'attributs et/ou d'exemples ok mais on peut faire mieux que ça non ? Entropie = mesure du désordre dans une collection d'objets Si tous les objets appartiennent à la même classe, pas de désordre (entropie nuelle) On choisi l'attribut qui minimise le désordre de la partition résultante ok mais comment on mesure l'entropie d'un ensemble d'exemples ? ... Shannon a proposé une mesure de l'entropie ... Pourquoi? ... slide 18: Meilleur ensemble de questions (code de Huffman) Nombre moyen de questions: $$ P(rouge) \times 1 + P(bleu) \times 2 + P(vert) \times 3 + P(marron) \times 3 = \frac12 \times 1 + \frac14 \times 2 + \frac18 \times 3 + \frac18 \times 3 = 1.75 $$ Ok, c'est intuitif si on regarde l'arbre des tirages possibles dans le slide d'avant. Les couleurs représentent les classes de notre problème. Entropie: $$ Entropie d'un ensemble d'exemples = - \sum_{i \in \Omega} p_i \log_2 (p_i) $$ Avec $\Omega$ l'ensemble des classes du problème. $b$ bits permettent de coder $i = 2^b$ informations. $b = \log_2(i)$ bits permettent de coder $i$ informations. Une implémentation en Python Algorithme End of explanation """ data_list = [['soleil', 'chaud', 'haute', 'faux', 'NePasJouer'], ['soleil', 'chaud', 'haute', 'vrai', 'NePasJouer'], ['couvert', 'chaud', 'haute', 'faux', 'Jouer'], ['pluie', 'bon', 'haute', 'faux', 'Jouer'], ['pluie', 'frais', 'normale', 'faux', 'Jouer'], ['pluie', 'frais', 'normale', 'vrai', 'NePasJouer'], ['couvert', 'frais', 'normale', 'vrai', 'Jouer'], ['soleil', 'bon', 'haute', 'faux', 'NePasJouer'], ['soleil', 'frais', 'normale', 'faux', 'Jouer'], ['pluie', 'bon', 'normale', 'faux', 'Jouer'], ['soleil', 'bon', 'normale', 'vrai', 'Jouer'], ['couvert', 'bon', 'haute', 'vrai', 'Jouer'], ['couvert', 'chaud', 'normale', 'faux', 'Jouer'], ['pluie', 'bon', 'haute', 'vrai', 'NePasJouer']] columns_list = ['ciel', 'temperature', 'humidite', 'vent', 'golf'] df = pd.DataFrame(data_list, columns=columns_list) df """ Explanation: Exemple 1 Tiré de "exemple1.txt" de JG Ganascia (Tiré de Quinlan 1983) End of explanation """ data_list = [['soleil', 'chaud', 'haute', 'faux', 'NePasJouer'], ['soleil', 'chaud', 'haute', 'vrai', 'NePasJouer'], ['couvert', 'chaud', 'haute', 'faux', 'Jouer'], ['pluie', 'bon', 'haute', 'faux', 'Jouer'], ['pluie', 'frais', 'normale', 'faux', 'Jouer'], ['pluie', 'frais', 'normale', 'vrai', 'NePasJouer'], ['pluie', 'frais', 'normale', 'vrai', 'Jouer'], ['soleil', 'bon', 'haute', 'faux', 'NePasJouer'], ['soleil', 'frais', 'normale', 'faux', 'Jouer'], ['pluie', 'bon', 'normale', 'faux', 'Jouer'], ['soleil', 'bon', 'normale', 'vrai', 'Jouer'], ['couvert', 'bon', 'haute', 'vrai', 'Jouer'], ['couvert', 'chaud', 'normale', 'faux', 'Jouer'], ['pluie', 'bon', 'haute', 'vrai', 'NePasJouer']] columns_list = ['ciel', 'temperature', 'humidite', 'vent', 'golf'] df = pd.DataFrame(data_list, columns=columns_list) df """ Explanation: Exemple 2 Tiré de "exemple2.txt" de JG Ganascia. La seule différence avec l'exemple 1: première colonne de la ligne 6 (du dataframe). (Tiré de Quinlan 1983) End of explanation """ data_list = [['young', 'myope', 'no', 'reduced', 'none'], ['young', 'myope', 'no', 'normal', 'soft'], ['young', 'myope', 'yes', 'reduced', 'none'], ['young', 'myope', 'yes', 'normal', 'hard'], ['young', 'hypermetrope', 'no', 'reduced', 'none'], ['young', 'hypermetrope', 'no', 'normal', 'soft'], ['young', 'hypermetrope', 'yes', 'reduced', 'none'], ['young', 'hypermetrope', 'yes', 'normal', 'hard'], ['pre-presbyopic', 'myope', 'no', 'reduced', 'none'], ['pre-presbyopic', 'myope', 'no', 'normal', 'soft'], ['pre-presbyopic', 'myope', 'yes', 'reduced', 'none'], ['pre-presbyopic', 'myope', 'yes', 'normal', 'hard'], ['pre-presbyopic', 'hypermetrope', 'no', 'reduced', 'none'], ['pre-presbyopic', 'hypermetrope', 'no', 'normal', 'soft'], ['pre-presbyopic', 'hypermetrope', 'yes', 'reduced', 'none'], ['pre-presbyopic', 'hypermetrope', 'yes', 'normal', 'none'], ['presbyopic', 'myope', 'no', 'reduced', 'none'], ['presbyopic', 'myope', 'no', 'normal', 'none'], ['presbyopic', 'myope', 'yes', 'reduced', 'none'], ['presbyopic', 'myope', 'yes', 'normal', 'hard'], ['presbyopic', 'hypermetrope', 'no', 'reduced', 'none'], ['presbyopic', 'hypermetrope', 'no', 'normal', 'soft'], ['presbyopic', 'hypermetrope', 'yes', 'reduced', 'none'], ['presbyopic', 'hypermetrope', 'yes', 'normal', 'none']] columns_list = ['age', 'prescription', 'astigmatic', 'tear_rate', 'lenses'] df = pd.DataFrame(data_list, columns=columns_list) df """ Explanation: Exemple 3 Tiré de "lentilles.txt" de JG Ganascia TODO: traduire la base suivante en Français... End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/cmcc/cmip6/models/sandbox-3/ocnbgchem.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'cmcc', 'sandbox-3', 'ocnbgchem') """ Explanation: ES-DOC CMIP6 Model Properties - Ocnbgchem MIP Era: CMIP6 Institute: CMCC Source ID: SANDBOX-3 Topic: Ocnbgchem Sub-Topics: Tracers. Properties: 65 (37 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:53:50 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Time Stepping Framework --&gt; Passive Tracers Transport 3. Key Properties --&gt; Time Stepping Framework --&gt; Biology Sources Sinks 4. Key Properties --&gt; Transport Scheme 5. Key Properties --&gt; Boundary Forcing 6. Key Properties --&gt; Gas Exchange 7. Key Properties --&gt; Carbon Chemistry 8. Tracers 9. Tracers --&gt; Ecosystem 10. Tracers --&gt; Ecosystem --&gt; Phytoplankton 11. Tracers --&gt; Ecosystem --&gt; Zooplankton 12. Tracers --&gt; Disolved Organic Matter 13. Tracers --&gt; Particules 14. Tracers --&gt; Dic Alkalinity 1. Key Properties Ocean Biogeochemistry key properties 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of ocean biogeochemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of ocean biogeochemistry model code (PISCES 2.0,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.model_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Geochemical" # "NPZD" # "PFT" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Model Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of ocean biogeochemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Fixed" # "Variable" # "Mix of both" # TODO - please enter value(s) """ Explanation: 1.4. Elemental Stoichiometry Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe elemental stoichiometry (fixed, variable, mix of the two) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.5. Elemental Stoichiometry Details Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe which elements have fixed/variable stoichiometry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.6. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of all prognostic tracer variables in the ocean biogeochemistry component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.7. Diagnostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of all diagnotic tracer variables in the ocean biogeochemistry component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.damping') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.8. Damping Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe any tracer damping used (such as artificial correction or relaxation to climatology,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "use ocean model transport time step" # "use specific time step" # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Time Stepping Framework --&gt; Passive Tracers Transport Time stepping method for passive tracers transport in ocean biogeochemistry 2.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time stepping framework for passive tracers End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 2.2. Timestep If Not From Ocean Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Time step for passive tracers (if different from ocean) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "use ocean model transport time step" # "use specific time step" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Time Stepping Framework --&gt; Biology Sources Sinks Time stepping framework for biology sources and sinks in ocean biogeochemistry 3.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time stepping framework for biology sources and sinks End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Timestep If Not From Ocean Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Time step for biology sources and sinks (if different from ocean) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Offline" # "Online" # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Transport Scheme Transport scheme in ocean biogeochemistry 4.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of transport scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Use that of ocean model" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 4.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Transport scheme used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.3. Use Different Scheme Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Decribe transport scheme if different than that of ocean model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "from file (climatology)" # "from file (interannual variations)" # "from Atmospheric Chemistry model" # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Boundary Forcing Properties of biogeochemistry boundary forcing 5.1. Atmospheric Deposition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how atmospheric deposition is modeled End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "from file (climatology)" # "from file (interannual variations)" # "from Land Surface model" # TODO - please enter value(s) """ Explanation: 5.2. River Input Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how river input is modeled End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Sediments From Boundary Conditions Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List which sediments are speficied from boundary condition End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.4. Sediments From Explicit Model Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List which sediments are speficied from explicit sediment model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Gas Exchange *Properties of gas exchange in ocean biogeochemistry * 6.1. CO2 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is CO2 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OMIP protocol" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6.2. CO2 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe CO2 gas exchange End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.3. O2 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is O2 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OMIP protocol" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6.4. O2 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe O2 gas exchange End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.5. DMS Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is DMS gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.6. DMS Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify DMS gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.7. N2 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is N2 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.8. N2 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify N2 gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.9. N2O Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is N2O gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.10. N2O Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify N2O gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.11. CFC11 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is CFC11 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.12. CFC11 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify CFC11 gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.13. CFC12 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is CFC12 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.14. CFC12 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify CFC12 gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.15. SF6 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is SF6 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.16. SF6 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify SF6 gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.17. 13CO2 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is 13CO2 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.18. 13CO2 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify 13CO2 gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.19. 14CO2 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is 14CO2 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.20. 14CO2 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify 14CO2 gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.21. Other Gases Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any other gas exchange End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OMIP protocol" # "Other protocol" # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Carbon Chemistry Properties of carbon chemistry biogeochemistry 7.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how carbon chemistry is modeled End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Sea water" # "Free" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7.2. PH Scale Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If NOT OMIP protocol, describe pH scale. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Constants If Not OMIP Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If NOT OMIP protocol, list carbon chemistry constants. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Tracers Ocean biogeochemistry tracers 8.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of tracers in ocean biogeochemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.2. Sulfur Cycle Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is sulfur cycle modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Nitrogen (N)" # "Phosphorous (P)" # "Silicium (S)" # "Iron (Fe)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.3. Nutrients Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List nutrient species present in ocean biogeochemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Nitrates (NO3)" # "Amonium (NH4)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.4. Nitrous Species If N Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N If nitrogen present, list nitrous species. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Dentrification" # "N fixation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.5. Nitrous Processes If N Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N If nitrogen present, list nitrous processes. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Tracers --&gt; Ecosystem Ecosystem properties in ocean biogeochemistry 9.1. Upper Trophic Levels Definition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Definition of upper trophic level (e.g. based on size) ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.2. Upper Trophic Levels Treatment Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Define how upper trophic level are treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Generic" # "PFT including size based (specify both below)" # "Size based only (specify below)" # "PFT only (specify below)" # TODO - please enter value(s) """ Explanation: 10. Tracers --&gt; Ecosystem --&gt; Phytoplankton Phytoplankton properties in ocean biogeochemistry 10.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of phytoplankton End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Diatoms" # "Nfixers" # "Calcifiers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10.2. Pft Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Phytoplankton functional types (PFT) (if applicable) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Microphytoplankton" # "Nanophytoplankton" # "Picophytoplankton" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10.3. Size Classes Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Phytoplankton size classes (if applicable) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Generic" # "Size based (specify below)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11. Tracers --&gt; Ecosystem --&gt; Zooplankton Zooplankton properties in ocean biogeochemistry 11.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of zooplankton End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Microzooplankton" # "Mesozooplankton" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.2. Size Classes Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Zooplankton size classes (if applicable) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12. Tracers --&gt; Disolved Organic Matter Disolved organic matter properties in ocean biogeochemistry 12.1. Bacteria Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there bacteria representation ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Labile" # "Semi-labile" # "Refractory" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.2. Lability Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe treatment of lability in dissolved organic matter End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Diagnostic" # "Diagnostic (Martin profile)" # "Diagnostic (Balast)" # "Prognostic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Tracers --&gt; Particules Particulate carbon properties in ocean biogeochemistry 13.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is particulate carbon represented in ocean biogeochemistry? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "POC" # "PIC (calcite)" # "PIC (aragonite" # "BSi" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Types If Prognostic Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N If prognostic, type(s) of particulate matter taken into account End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "No size spectrum used" # "Full size spectrum" # "Discrete size classes (specify which below)" # TODO - please enter value(s) """ Explanation: 13.3. Size If Prognostic Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If prognostic, describe if a particule size spectrum is used to represent distribution of particules in water volume End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 13.4. Size If Discrete Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If prognostic and discrete size, describe which size classes are used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Function of particule size" # "Function of particule type (balast)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.5. Sinking Speed If Prognostic Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If prognostic, method for calculation of sinking speed of particules End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "C13" # "C14)" # TODO - please enter value(s) """ Explanation: 14. Tracers --&gt; Dic Alkalinity DIC and alkalinity properties in ocean biogeochemistry 14.1. Carbon Isotopes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Which carbon isotopes are modelled (C13, C14)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 14.2. Abiotic Carbon Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is abiotic carbon modelled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Prognostic" # "Diagnostic)" # TODO - please enter value(s) """ Explanation: 14.3. Alkalinity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is alkalinity modelled ? End of explanation """
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive2/recommendation_systems/solutions/wals.ipynb
apache-2.0
import os PROJECT = "cloud-training-demos" # REPLACE WITH YOUR PROJECT ID BUCKET = "cloud-training-demos-ml" # REPLACE WITH YOUR BUCKET NAME REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1 # Do not change these os.environ["PROJECT"] = PROJECT os.environ["BUCKET"] = BUCKET os.environ["REGION"] = REGION os.environ["TFVERSION"] = "1.15" %%bash gcloud config set project $PROJECT gcloud config set compute/region $REGION import tensorflow as tf print(tf.__version__) """ Explanation: Collaborative Filtering on Google Analytics Data Learning objectives Prepare the user-item matrix and use it with WALS. Train a WALSMatrixFactorization within TensorFlow locally and on AI Platform. Visualize the embedding vectors with principal components analysis. Overview This notebook demonstrates how to implement a WALS matrix refactorization approach to do collaborative filtering. Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook. End of explanation """ from google.cloud import bigquery bq = bigquery.Client(project = PROJECT) sql = """ WITH CTE_visitor_page_content AS ( SELECT # Schema: https://support.google.com/analytics/answer/3437719?hl=en # For a completely unique visit-session ID, you combine combination of fullVisitorId and visitNumber: CONCAT(fullVisitorID,'-',CAST(visitNumber AS STRING)) AS visitorId, (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS latestContentId, (LEAD(hits.time, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) - hits.time) AS session_duration FROM `cloud-training-demos.GA360_test.ga_sessions_sample`, UNNEST(hits) AS hits WHERE # only include hits on pages hits.type = "PAGE" GROUP BY fullVisitorId, visitNumber, latestContentId, hits.time ) -- Aggregate web stats SELECT visitorId, latestContentId as contentId, SUM(session_duration) AS session_duration FROM CTE_visitor_page_content WHERE latestContentId IS NOT NULL GROUP BY visitorId, latestContentId HAVING session_duration > 0 """ df = bq.query(sql).to_dataframe() df.head() stats = df.describe() stats df[["session_duration"]].plot(kind="hist", logy=True, bins=100, figsize=[8,5]) # The rating is the session_duration scaled to be in the range 0-1. This will help with training. median = stats.loc["50%", "session_duration"] df["rating"] = 0.3 * df["session_duration"] / median df.loc[df["rating"] > 1, "rating"] = 1 df[["rating"]].plot(kind="hist", logy=True, bins=100, figsize=[8,5]) del df["session_duration"] %%bash rm -rf data mkdir data # TODO 1: Write object to a comma-separated values (csv) file. df.to_csv(path_or_buf = "data/collab_raw.csv", index = False, header = False) !head data/collab_raw.csv """ Explanation: Create raw dataset <p> For collaborative filtering, you don't need to know anything about either the users or the content. Essentially, all you need to know is userId, itemId, and rating that the particular user gave the particular item. <p> In this case, you are working with newspaper articles. The company doesn't ask their users to rate the articles. However, you can use the time-spent on the page as a proxy for rating. <p> Normally, you would also add a time filter to this ("latest 7 days"), but your dataset is itself limited to a few days. End of explanation """ import pandas as pd import numpy as np def create_mapping(values, filename): with open(filename, 'w') as ofp: value_to_id = {value:idx for idx, value in enumerate(values.unique())} for value, idx in value_to_id.items(): ofp.write("{},{}\n".format(value, idx)) return value_to_id df = pd.read_csv(filepath_or_buffer = "data/collab_raw.csv", header = None, names = ["visitorId", "contentId", "rating"], dtype = {"visitorId": str, "contentId": str, "rating": np.float}) df.to_csv(path_or_buf = "data/collab_raw.csv", index = False, header = False) user_mapping = create_mapping(df["visitorId"], "data/users.csv") item_mapping = create_mapping(df["contentId"], "data/items.csv") !head -3 data/*.csv df["userId"] = df["visitorId"].map(user_mapping.get) df["itemId"] = df["contentId"].map(item_mapping.get) mapped_df = df[["userId", "itemId", "rating"]] mapped_df.to_csv(path_or_buf = "data/collab_mapped.csv", index = False, header = False) mapped_df.head() """ Explanation: Create dataset for WALS <p> The raw dataset (above) won't work for WALS: <ol> <li> The userId and itemId have to be 0,1,2 ... so you need to create a mapping from visitorId (in the raw data) to userId and contentId (in the raw data) to itemId. <li> You will need to save the above mapping to a file because at prediction time, you'll need to know how to map the contentId in the table above to the itemId. <li> You'll need two files: a "rows" dataset where all the items for a particular user are listed; and a "columns" dataset where all the users for a particular item are listed. </ol> <p> ### Mapping End of explanation """ import pandas as pd import numpy as np mapped_df = pd.read_csv(filepath_or_buffer = "data/collab_mapped.csv", header = None, names = ["userId", "itemId", "rating"]) mapped_df.head() NITEMS = np.max(mapped_df["itemId"]) + 1 NUSERS = np.max(mapped_df["userId"]) + 1 mapped_df["rating"] = np.round(mapped_df["rating"].values, 2) print("{} items, {} users, {} interactions".format( NITEMS, NUSERS, len(mapped_df) )) grouped_by_items = mapped_df.groupby("itemId") iter = 0 for item, grouped in grouped_by_items: print(item, grouped["userId"].values, grouped["rating"].values) iter = iter + 1 if iter > 5: break import tensorflow as tf grouped_by_items = mapped_df.groupby("itemId") with tf.python_io.TFRecordWriter("data/users_for_item") as ofp: for item, grouped in grouped_by_items: example = tf.train.Example(features = tf.train.Features(feature = { "key": tf.train.Feature(int64_list = tf.train.Int64List(value = [item])), "indices": tf.train.Feature(int64_list = tf.train.Int64List(value = grouped["userId"].values)), "values": tf.train.Feature(float_list = tf.train.FloatList(value = grouped["rating"].values)) })) ofp.write(example.SerializeToString()) grouped_by_users = mapped_df.groupby("userId") with tf.python_io.TFRecordWriter("data/items_for_user") as ofp: for user, grouped in grouped_by_users: example = tf.train.Example(features = tf.train.Features(feature = { "key": tf.train.Feature(int64_list = tf.train.Int64List(value = [user])), "indices": tf.train.Feature(int64_list = tf.train.Int64List(value = grouped["itemId"].values)), "values": tf.train.Feature(float_list = tf.train.FloatList(value = grouped["rating"].values)) })) ofp.write(example.SerializeToString()) !ls -lrt data """ Explanation: Creating rows and columns datasets End of explanation """ import os import tensorflow as tf from tensorflow.python.lib.io import file_io from tensorflow.contrib.factorization import WALSMatrixFactorization def read_dataset(mode, args): # TODO 2: Decode the example def decode_example(protos, vocab_size): features = { "key": tf.FixedLenFeature(shape = [1], dtype = tf.int64), "indices": tf.VarLenFeature(dtype = tf.int64), "values": tf.VarLenFeature(dtype = tf.float32)} parsed_features = tf.parse_single_example(serialized = protos, features = features) values = tf.sparse_merge(sp_ids = parsed_features["indices"], sp_values = parsed_features["values"], vocab_size = vocab_size) # Save key to remap after batching # This is a temporary workaround to assign correct row numbers in each batch. # You can ignore details of this part and remap_keys(). key = parsed_features["key"] decoded_sparse_tensor = tf.SparseTensor(indices = tf.concat(values = [values.indices, [key]], axis = 0), values = tf.concat(values = [values.values, [0.0]], axis = 0), dense_shape = values.dense_shape) return decoded_sparse_tensor def remap_keys(sparse_tensor): # Current indices of your SparseTensor that you need to fix bad_indices = sparse_tensor.indices # shape = (current_batch_size * (number_of_items/users[i] + 1), 2) # Current values of your SparseTensor that you need to fix bad_values = sparse_tensor.values # shape = (current_batch_size * (number_of_items/users[i] + 1),) # Since batch is ordered, the last value for a batch index is the user # Find where the batch index chages to extract the user rows # 1 where user, else 0 user_mask = tf.concat(values = [bad_indices[1:,0] - bad_indices[:-1,0], tf.constant(value = [1], dtype = tf.int64)], axis = 0) # shape = (current_batch_size * (number_of_items/users[i] + 1), 2) # Mask out the user rows from the values good_values = tf.boolean_mask(tensor = bad_values, mask = tf.equal(x = user_mask, y = 0)) # shape = (current_batch_size * number_of_items/users[i],) item_indices = tf.boolean_mask(tensor = bad_indices, mask = tf.equal(x = user_mask, y = 0)) # shape = (current_batch_size * number_of_items/users[i],) user_indices = tf.boolean_mask(tensor = bad_indices, mask = tf.equal(x = user_mask, y = 1))[:, 1] # shape = (current_batch_size,) good_user_indices = tf.gather(params = user_indices, indices = item_indices[:,0]) # shape = (current_batch_size * number_of_items/users[i],) # User and item indices are rank 1, need to make rank 1 to concat good_user_indices_expanded = tf.expand_dims(input = good_user_indices, axis = -1) # shape = (current_batch_size * number_of_items/users[i], 1) good_item_indices_expanded = tf.expand_dims(input = item_indices[:, 1], axis = -1) # shape = (current_batch_size * number_of_items/users[i], 1) good_indices = tf.concat(values = [good_user_indices_expanded, good_item_indices_expanded], axis = 1) # shape = (current_batch_size * number_of_items/users[i], 2) remapped_sparse_tensor = tf.SparseTensor(indices = good_indices, values = good_values, dense_shape = sparse_tensor.dense_shape) return remapped_sparse_tensor def parse_tfrecords(filename, vocab_size): if mode == tf.estimator.ModeKeys.TRAIN: num_epochs = None # indefinitely else: num_epochs = 1 # end-of-input after this files = tf.gfile.Glob(filename = os.path.join(args["input_path"], filename)) # Create dataset from file list dataset = tf.data.TFRecordDataset(files) dataset = dataset.map(map_func = lambda x: decode_example(x, vocab_size)) dataset = dataset.repeat(count = num_epochs) dataset = dataset.batch(batch_size = args["batch_size"]) dataset = dataset.map(map_func = lambda x: remap_keys(x)) return dataset.make_one_shot_iterator().get_next() def _input_fn(): features = { WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords("items_for_user", args["nitems"]), WALSMatrixFactorization.INPUT_COLS: parse_tfrecords("users_for_item", args["nusers"]), WALSMatrixFactorization.PROJECT_ROW: tf.constant(True) } return features, None return _input_fn """ Explanation: To summarize, you created the following data files from collab_raw.csv: <ol> <li> ```collab_mapped.csv``` is essentially the same data as in ```collab_raw.csv``` except that ```visitorId``` and ```contentId``` which are business-specific have been mapped to ```userId``` and ```itemId``` which are enumerated in 0,1,2,.... The mappings themselves are stored in ```items.csv``` and ```users.csv``` so that they can be used during inference. <li> ```users_for_item``` contains all the users/ratings for each item in TFExample format <li> ```items_for_user``` contains all the items/ratings for each user in TFExample format </ol> Train with WALS Once you have the dataset, do matrix factorization with WALS using the WALSMatrixFactorization in the contrib directory. This is an estimator model, so it should be relatively familiar. <p> As usual, you write an input_fn to provide the data to the model, and then create the Estimator to do train_and_evaluate. Because it is in contrib and hasn't moved over to tf.estimator yet, you use tf.contrib.learn.Experiment to handle the training loop.<p> End of explanation """ def try_out(): with tf.Session() as sess: fn = read_dataset( mode = tf.estimator.ModeKeys.EVAL, args = {"input_path": "data", "batch_size": 4, "nitems": NITEMS, "nusers": NUSERS}) feats, _ = fn() print(feats["input_rows"].eval()) print(feats["input_rows"].eval()) try_out() def find_top_k(user, item_factors, k): all_items = tf.matmul(a = tf.expand_dims(input = user, axis = 0), b = tf.transpose(a = item_factors)) topk = tf.nn.top_k(input = all_items, k = k) return tf.cast(x = topk.indices, dtype = tf.int64) def batch_predict(args): import numpy as np with tf.Session() as sess: estimator = tf.contrib.factorization.WALSMatrixFactorization( num_rows = args["nusers"], num_cols = args["nitems"], embedding_dimension = args["n_embeds"], model_dir = args["output_dir"]) # This is how you would get the row factors for out-of-vocab user data # row_factors = list(estimator.get_projections(input_fn=read_dataset(tf.estimator.ModeKeys.EVAL, args))) # user_factors = tf.convert_to_tensor(np.array(row_factors)) # But for in-vocab data, the row factors are already in the checkpoint user_factors = tf.convert_to_tensor(value = estimator.get_row_factors()[0]) # (nusers, nembeds) # In either case, you have to assume catalog doesn"t change, so col_factors are read in item_factors = tf.convert_to_tensor(value = estimator.get_col_factors()[0])# (nitems, nembeds) # For each user, find the top K items topk = tf.squeeze(input = tf.map_fn(fn = lambda user: find_top_k(user, item_factors, args["topk"]), elems = user_factors, dtype = tf.int64)) with file_io.FileIO(os.path.join(args["output_dir"], "batch_pred.txt"), mode = 'w') as f: for best_items_for_user in topk.eval(): f.write(",".join(str(x) for x in best_items_for_user) + '\n') def train_and_evaluate(args): train_steps = int(0.5 + (1.0 * args["num_epochs"] * args["nusers"]) / args["batch_size"]) steps_in_epoch = int(0.5 + args["nusers"] / args["batch_size"]) print("Will train for {} steps, evaluating once every {} steps".format(train_steps, steps_in_epoch)) def experiment_fn(output_dir): return tf.contrib.learn.Experiment( tf.contrib.factorization.WALSMatrixFactorization( num_rows = args["nusers"], num_cols = args["nitems"], embedding_dimension = args["n_embeds"], model_dir = args["output_dir"]), train_input_fn = read_dataset(tf.estimator.ModeKeys.TRAIN, args), eval_input_fn = read_dataset(tf.estimator.ModeKeys.EVAL, args), train_steps = train_steps, eval_steps = 1, min_eval_frequency = steps_in_epoch ) from tensorflow.contrib.learn.python.learn import learn_runner learn_runner.run(experiment_fn = experiment_fn, output_dir = args["output_dir"]) batch_predict(args) import shutil shutil.rmtree(path = "wals_trained", ignore_errors=True) train_and_evaluate({ "output_dir": "wals_trained", "input_path": "data/", "num_epochs": 0.05, "nitems": NITEMS, "nusers": NUSERS, "batch_size": 512, "n_embeds": 10, "topk": 3 }) !ls wals_trained !head wals_trained/batch_pred.txt """ Explanation: This code is helpful in developing the input function. You don't need it in production. End of explanation """ os.environ["NITEMS"] = str(NITEMS) os.environ["NUSERS"] = str(NUSERS) %%bash rm -rf wals.tar.gz wals_trained gcloud ai-platform local train \ --module-name=walsmodel.task \ --package-path=${PWD}/walsmodel \ -- \ --output_dir=${PWD}/wals_trained \ --input_path=${PWD}/data \ --num_epochs=0.01 --nitems=${NITEMS} --nusers=${NUSERS} \ --job-dir=./tmp """ Explanation: Run as a Python module Let's run it as Python module for just a few steps. End of explanation """ %%bash gsutil -m cp data/* gs://${BUCKET}/wals/data %%bash OUTDIR=gs://${BUCKET}/wals/model_trained JOBNAME=wals_$(date -u +%y%m%d_%H%M%S) echo $OUTDIR $REGION $JOBNAME gsutil -m rm -rf $OUTDIR gcloud ai-platform jobs submit training $JOBNAME \ --region=$REGION \ --module-name=walsmodel.task \ --package-path=${PWD}/walsmodel \ --job-dir=$OUTDIR \ --staging-bucket=gs://$BUCKET \ --scale-tier=BASIC_GPU \ --runtime-version=$TFVERSION \ -- \ --output_dir=$OUTDIR \ --input_path=gs://${BUCKET}/wals/data \ --num_epochs=10 --nitems=${NITEMS} --nusers=${NUSERS} """ Explanation: Run on Cloud End of explanation """ def get_factors(args): with tf.Session() as sess: estimator = tf.contrib.factorization.WALSMatrixFactorization( num_rows = args["nusers"], num_cols = args["nitems"], embedding_dimension = args["n_embeds"], model_dir = args["output_dir"]) row_factors = estimator.get_row_factors()[0] col_factors = estimator.get_col_factors()[0] return row_factors, col_factors args = { "output_dir": "gs://{}/wals/model_trained".format(BUCKET), "nitems": NITEMS, "nusers": NUSERS, "n_embeds": 10 } user_embeddings, item_embeddings = get_factors(args) print(user_embeddings[:3]) print(item_embeddings[:3]) """ Explanation: This will take <b>10 minutes</b> to complete. Rerun the above command until the jobs gets submitted. Get row and column factors Once you have a trained WALS model, you can get row and column factors (user and item embeddings) from the checkpoint file. You'll look at how to use these in the section on building a recommendation system using deep neural networks. End of explanation """ import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn.decomposition import PCA pca = PCA(n_components = 3) pca.fit(user_embeddings) # TODO 3: Apply the mapping (transform) to user embeddings user_embeddings_pca = pca.transform(user_embeddings) fig = plt.figure(figsize = (8,8)) ax = fig.add_subplot(111, projection = "3d") xs, ys, zs = user_embeddings_pca[::150].T ax.scatter(xs, ys, zs) """ Explanation: You can visualize the embedding vectors using dimensional reduction techniques such as PCA. End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/inpe/cmip6/models/sandbox-2/seaice.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'inpe', 'sandbox-2', 'seaice') """ Explanation: ES-DOC CMIP6 Model Properties - Seaice MIP Era: CMIP6 Institute: INPE Source ID: SANDBOX-2 Topic: Seaice Sub-Topics: Dynamics, Thermodynamics, Radiative Processes. Properties: 80 (63 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:07 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.model.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties --&gt; Model 2. Key Properties --&gt; Variables 3. Key Properties --&gt; Seawater Properties 4. Key Properties --&gt; Resolution 5. Key Properties --&gt; Tuning Applied 6. Key Properties --&gt; Key Parameter Values 7. Key Properties --&gt; Assumptions 8. Key Properties --&gt; Conservation 9. Grid --&gt; Discretisation --&gt; Horizontal 10. Grid --&gt; Discretisation --&gt; Vertical 11. Grid --&gt; Seaice Categories 12. Grid --&gt; Snow On Seaice 13. Dynamics 14. Thermodynamics --&gt; Energy 15. Thermodynamics --&gt; Mass 16. Thermodynamics --&gt; Salt 17. Thermodynamics --&gt; Salt --&gt; Mass Transport 18. Thermodynamics --&gt; Salt --&gt; Thermodynamics 19. Thermodynamics --&gt; Ice Thickness Distribution 20. Thermodynamics --&gt; Ice Floe Size Distribution 21. Thermodynamics --&gt; Melt Ponds 22. Thermodynamics --&gt; Snow Processes 23. Radiative Processes 1. Key Properties --&gt; Model Name of seaice model used. 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of sea ice model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.model.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.variables.prognostic') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sea ice temperature" # "Sea ice concentration" # "Sea ice thickness" # "Sea ice volume per grid cell area" # "Sea ice u-velocity" # "Sea ice v-velocity" # "Sea ice enthalpy" # "Internal ice stress" # "Salinity" # "Snow temperature" # "Snow depth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Variables List of prognostic variable in the sea ice model. 2.1. Prognostic Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of prognostic variables in the sea ice component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "TEOS-10" # "Constant" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Seawater Properties Properties of seawater relevant to sea ice 3.1. Ocean Freezing Point Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Ocean Freezing Point Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant seawater freezing point, specify this value. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Resolution Resolution of the sea ice grid 4.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Canonical Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Number Of Horizontal Gridpoints Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Tuning Applied Tuning applied to sea ice model component 5.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Target Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Simulations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 *Which simulations had tuning applied, e.g. all, not historical, only pi-control? * End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.4. Metrics Used Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List any observed metrics used in tuning model/parameters End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.5. Variables Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Which variables were changed during the tuning process? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Ice strength (P*) in units of N m{-2}" # "Snow conductivity (ks) in units of W m{-1} K{-1} " # "Minimum thickness of ice created in leads (h0) in units of m" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Key Parameter Values Values of key parameters 6.1. Typical Parameters Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N What values were specificed for the following parameters if used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Additional Parameters Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N If you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.description') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Assumptions Assumptions made in the sea ice model 7.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General overview description of any key assumptions made in this model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. On Diagnostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Note any assumptions that specifically affect the CMIP6 diagnostic sea ice variables. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Missing Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation Conservation in the sea ice component 8.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Provide a general description of conservation methodology. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.properties') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Energy" # "Mass" # "Salt" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Properties Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Properties conserved in sea ice by the numerical schemes. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.budget') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Budget Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 For each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3 End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.4. Was Flux Correction Used Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does conservation involved flux correction? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Corrected Conserved Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List any variables which are conserved by more than the numerical scheme alone. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Ocean grid" # "Atmosphere Grid" # "Own Grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9. Grid --&gt; Discretisation --&gt; Horizontal Sea ice discretisation in the horizontal 9.1. Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Grid on which sea ice is horizontal discretised? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Structured grid" # "Unstructured grid" # "Adaptive grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9.2. Grid Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the type of sea ice grid? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Finite differences" # "Finite elements" # "Finite volumes" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9.3. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the advection scheme? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 9.4. Thermodynamics Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the time step in the sea ice model thermodynamic component in seconds. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 9.5. Dynamics Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the time step in the sea ice model dynamic component in seconds. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.6. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional horizontal discretisation details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Zero-layer" # "Two-layers" # "Multi-layers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Grid --&gt; Discretisation --&gt; Vertical Sea ice vertical properties 10.1. Layering Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N What type of sea ice vertical layers are implemented for purposes of thermodynamic calculations? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 10.2. Number Of Layers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using multi-layers specify how many. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional vertical grid details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 11. Grid --&gt; Seaice Categories What method is used to represent sea ice categories ? 11.1. Has Mulitple Categories Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Set to true if the sea ice model has multiple sea ice categories. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.2. Number Of Categories Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using sea ice categories specify how many. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.3. Category Limits Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using sea ice categories specify each of the category limits. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.4. Ice Thickness Distribution Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the sea ice thickness distribution scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.other') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.5. Other Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12. Grid --&gt; Snow On Seaice Snow on sea ice details 12.1. Has Snow On Ice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is snow on ice represented in this model? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 12.2. Number Of Snow Levels Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of vertical levels of snow on ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.3. Snow Fraction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how the snow fraction on sea ice is determined End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.4. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional details related to snow on ice. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.horizontal_transport') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Incremental Re-mapping" # "Prather" # "Eulerian" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Dynamics Sea Ice Dynamics 13.1. Horizontal Transport Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of horizontal advection of sea ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Incremental Re-mapping" # "Prather" # "Eulerian" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Transport In Thickness Space Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of sea ice transport in thickness space (i.e. in thickness categories)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Hibler 1979" # "Rothrock 1975" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.3. Ice Strength Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Which method of sea ice strength formulation is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.redistribution') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Rafting" # "Ridging" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.4. Redistribution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Which processes can redistribute sea ice (including thickness)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.rheology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Free-drift" # "Mohr-Coloumb" # "Visco-plastic" # "Elastic-visco-plastic" # "Elastic-anisotropic-plastic" # "Granular" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.5. Rheology Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Rheology, what is the ice deformation formulation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pure ice latent heat (Semtner 0-layer)" # "Pure ice latent and sensible heat" # "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)" # "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Thermodynamics --&gt; Energy Processes related to energy in sea ice thermodynamics 14.1. Enthalpy Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the energy formulation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pure ice" # "Saline ice" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.2. Thermal Conductivity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What type of thermal conductivity is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Conduction fluxes" # "Conduction and radiation heat fluxes" # "Conduction, radiation and latent heat transport" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.3. Heat Diffusion Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of heat diffusion? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Heat Reservoir" # "Thermal Fixed Salinity" # "Thermal Varying Salinity" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.4. Basal Heat Flux Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method by which basal ocean heat flux is handled? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.5. Fixed Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.6. Heat Content Of Precipitation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method by which the heat content of precipitation is handled. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.7. Precipitation Effects On Salinity Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Thermodynamics --&gt; Mass Processes related to mass in sea ice thermodynamics 15.1. New Ice Formation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method by which new sea ice is formed in open water. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Ice Vertical Growth And Melt Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method that governs the vertical growth and melt of sea ice. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Floe-size dependent (Bitz et al 2001)" # "Virtual thin ice melting (for single-category)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.3. Ice Lateral Melting Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of sea ice lateral melting? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.4. Ice Surface Sublimation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method that governs sea ice surface sublimation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.5. Frazil Ice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method of frazil ice formation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 16. Thermodynamics --&gt; Salt Processes related to salt in sea ice thermodynamics. 16.1. Has Multiple Sea Ice Salinities Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 16.2. Sea Ice Salinity Thermal Impacts Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does sea ice salinity impact the thermal properties of sea ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Prescribed salinity profile" # "Prognostic salinity profile" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Thermodynamics --&gt; Salt --&gt; Mass Transport Mass transport of salt 17.1. Salinity Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is salinity determined in the mass transport of salt calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 17.2. Constant Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant salinity value specify this value in PSU? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the salinity profile used. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Prescribed salinity profile" # "Prognostic salinity profile" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Thermodynamics --&gt; Salt --&gt; Thermodynamics Salt thermodynamics 18.1. Salinity Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is salinity determined in the thermodynamic calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 18.2. Constant Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant salinity value specify this value in PSU? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the salinity profile used. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Virtual (enhancement of thermal conductivity, thin ice melting)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Thermodynamics --&gt; Ice Thickness Distribution Ice thickness distribution details. 19.1. Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is the sea ice thickness distribution represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Parameterised" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Thermodynamics --&gt; Ice Floe Size Distribution Ice floe-size distribution details. 20.1. Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is the sea ice floe-size represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20.2. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Please provide further details on any parameterisation of floe-size. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 21. Thermodynamics --&gt; Melt Ponds Characteristics of melt ponds. 21.1. Are Included Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are melt ponds included in the sea ice model? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Flocco and Feltham (2010)" # "Level-ice melt ponds" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21.2. Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What method of melt pond formulation is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Albedo" # "Freshwater" # "Heat" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21.3. Impacts Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N What do melt ponds have an impact on? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') # PROPERTY VALUE(S): # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22. Thermodynamics --&gt; Snow Processes Thermodynamic processes in snow on sea ice 22.1. Has Snow Aging Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Set to True if the sea ice model has a snow aging scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.2. Snow Aging Scheme Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the snow aging scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22.3. Has Snow Ice Formation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Set to True if the sea ice model has snow ice formation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.4. Snow Ice Formation Scheme Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the snow ice formation scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.5. Redistribution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the impact of ridging on snow cover? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Single-layered heat diffusion" # "Multi-layered heat diffusion" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.6. Heat Diffusion Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the heat diffusion through snow methodology in sea ice thermodynamics? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Delta-Eddington" # "Parameterized" # "Multi-band albedo" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiative Processes Sea Ice Radiative Processes 23.1. Surface Albedo Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method used to handle surface albedo. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Delta-Eddington" # "Exponential attenuation" # "Ice radiation transmission per category" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23.2. Ice Radiation Transmission Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method by which solar radiation through sea ice is handled. End of explanation """
robblack007/clase-cinematica-robot
Practicas/practica4/Problemas.ipynb
mit
def DH_simbolico(a, d, α, θ): from sympy import Matrix, sin, cos # YOUR CODE HERE raise NotImplementedError() from sympy import Matrix, sin, cos, pi from nose.tools import assert_equal assert_equal(DH_simbolico(0,0,0,pi/2), Matrix([[0,-1,0,0],[1,0,0,0], [0,0,1,0],[0,0,0,1]])) assert_equal(DH_simbolico(0,0,pi/2,0), Matrix([[1,0,0,0],[0,0,-1,0], [0,1,0,0],[0,0,0,1]])) assert_equal(DH_simbolico(0,1,0,0), Matrix([[1,0,0,0],[0,1,0,0], [0,0,1,1],[0,0,0,1]])) assert_equal(DH_simbolico(1,0,0,0), Matrix([[1,0,0,1],[0,1,0,0], [0,0,1,0],[0,0,0,1]])) """ Explanation: Problemas Defina una función que tome como argumentos los parametros de Denavit - Hartenberg, y cree una matriz de transformación homogénea. End of explanation """ def cinematica_PUMA(q1, q2, q3): from sympy import pi, var var("l1:4") # YOUR CODE HERE raise NotImplementedError() return A1, A2, A3 from nose.tools import assert_equal from sympy import pi, var, Matrix var("l1:4") A1, A2, A3 = cinematica_PUMA(0, 0, 0) assert_equal(A1*A2*A3, Matrix([[1,0,0,l2+l3], [0,0,-1,0], [0,1,0,l1], [0,0,0,1]])) A1, A2, A3 = cinematica_PUMA(pi/2, 0, 0) assert_equal(A1*A2*A3, Matrix([[0,0,1,0], [1,0,0,l2+l3], [0,1,0,l1], [0,0,0,1]])) A1, A2, A3 = cinematica_PUMA(0, pi/2, 0) assert_equal(A1*A2*A3, Matrix([[0,-1,0,0], [0,0,-1,0], [1,0,0,l1+l2+l3], [0,0,0,1]])) A1, A2, A3 = cinematica_PUMA(0, 0, pi/2) assert_equal(A1*A2*A3, Matrix([[0,-1,0,l2], [0,0,-1,0], [1,0,0,l1+l3], [0,0,0,1]])) """ Explanation: Cree una función que tome como argumentos los parametros de los grados de libertad de un manipulador tipo PUMA y devuelva las matrices de transformación homogénea asociadas a cada eslabon. End of explanation """ def transformacion_PUMA(q1, q2, q3): from sympy import pi, var var("l1:4") # YOUR CODE HERE raise NotImplementedError() from nose.tools import assert_equal from sympy import pi, var, Matrix var("l1:4") assert_equal(transformacion_PUMA(0, 0, 0), Matrix([[1,0,0,l2+l3], [0,0,-1,0], [0,1,0,l1], [0,0,0,1]])) assert_equal(transformacion_PUMA(pi/2, 0, 0), Matrix([[0,0,1,0], [1,0,0,l2+l3], [0,1,0,l1], [0,0,0,1]])) assert_equal(transformacion_PUMA(0, pi/2, 0), Matrix([[0,-1,0,0], [0,0,-1,0], [1,0,0,l1+l2+l3], [0,0,0,1]])) assert_equal(transformacion_PUMA(0, 0, pi/2), Matrix([[0,-1,0,l2], [0,0,-1,0], [1,0,0,l1+l3], [0,0,0,1]])) """ Explanation: Cree una función que dados los angulos del manipulador devuelva la transformación total del manipulador (ayudese de la función creada en el segundo problema). End of explanation """ def DH_numerico(a, d, α, θ): # YOUR CODE HERE raise NotImplementedError() def cinematica_PUMA(q1, q2, q3): # Considere que las longitudes son todas iguales a 1 l1, l2, l3 = 1, 1, 1 from numpy import pi # YOUR CODE HERE raise NotImplementedError() return A1, A2, A3 def grafica_PUMA(q1, q2, q3): from numpy import matrix # YOUR CODE HERE raise NotImplementedError() fig = figure(figsize=(8, 8)) ax = fig.add_subplot(111, projection='3d') ax.plot(xs, ys, zs, "-o") ax.set_xlim(-1.1, 1.1) ax.set_ylim(-1.1, 1.1) ax.set_zlim(-0.1, 2.1) return ax %matplotlib inline from matplotlib.pyplot import figure, plot, style from mpl_toolkits.mplot3d import Axes3D style.use("ggplot") from numpy.testing import assert_allclose from numpy import array ax = grafica_PUMA(0,0.5,0.5) ls = ax.get_lines() assert_allclose(ls[0].get_xdata(), array([0, 0, 0.8775, 1.417885]), rtol=1e-01, atol=1e-01) assert_allclose(ls[0].get_ydata(), array([-0.0384900179, 0, 0.00915, 0.03809]), rtol=1e-01, atol=1e-01) """ Explanation: Cree una función que dados los angulos del manipulador, grafique las posiciones de los eslabones del manipulador del primer punto (ayudese de las funciones creadas en el primer y segundo problemas, modificadas ligeramente para aceptar matrices numéricas, así como la función creada en la práctica anterior para la graficación de un sistema robótico). End of explanation """ %matplotlib inline from matplotlib.pyplot import figure, plot, style from mpl_toolkits.mplot3d import Axes3D style.use("ggplot") from ipywidgets import interact from numpy import pi τ = 2*pi # YOUR CODE HERE raise NotImplementedError() from nose.tools import assert_almost_equal from numpy import pi τ = 2*pi """ Explanation: Utilice la función interact para manipular la posición del manipulador, de tal manera que su posición sea aproximadamente $q_1=0.6rad$, $q_2=0.2rad$ y $q_3 = -0.8rad$ End of explanation """
palrogg/foundations-homework
Data_and_databases/Homework_4_Paul_Ronga_graded.ipynb
mit
numbers_str = '496,258,332,550,506,699,7,985,171,581,436,804,736,528,65,855,68,279,721,120' """ Explanation: Grade: 12 / 11 Homework #4 These problem sets focus on list comprehensions, string operations and regular expressions. Problem set #1: List slices and list comprehensions Let's start with some data. The following cell contains a string with comma-separated integers, assigned to a variable called numbers_str: End of explanation """ numbers = [int(i) for i in numbers_str.split(',')] # replace 'None' with an expression, as described above max(numbers) """ Explanation: In the following cell, complete the code with an expression that evaluates to a list of integers derived from the raw numbers in numbers_str, assigning the value of this expression to a variable numbers. If you do everything correctly, executing the cell should produce the output 985 (not '985'). End of explanation """ sorted(numbers)[-10:] """ Explanation: Great! We'll be using the numbers list you created above in the next few problems. In the cell below, fill in the square brackets so that the expression evaluates to a list of the ten largest values in numbers. Expected output: [506, 528, 550, 581, 699, 721, 736, 804, 855, 985] (Hint: use a slice.) End of explanation """ sorted([i for i in numbers if i % 3 == 0]) """ Explanation: In the cell below, write an expression that evaluates to a list of the integers from numbers that are evenly divisible by three, sorted in numerical order. Expected output: [120, 171, 258, 279, 528, 699, 804, 855] End of explanation """ from math import sqrt [sqrt(i) for i in numbers if i < 100] """ Explanation: Okay. You're doing great. Now, in the cell below, write an expression that evaluates to a list of the square roots of all the integers in numbers that are less than 100. In order to do this, you'll need to use the sqrt function from the math module, which I've already imported for you. Expected output: [2.6457513110645907, 8.06225774829855, 8.246211251235321] (These outputs might vary slightly depending on your platform.) End of explanation """ planets = [ {'diameter': 0.382, 'mass': 0.06, 'moons': 0, 'name': 'Mercury', 'orbital_period': 0.24, 'rings': 'no', 'type': 'terrestrial'}, {'diameter': 0.949, 'mass': 0.82, 'moons': 0, 'name': 'Venus', 'orbital_period': 0.62, 'rings': 'no', 'type': 'terrestrial'}, {'diameter': 1.00, 'mass': 1.00, 'moons': 1, 'name': 'Earth', 'orbital_period': 1.00, 'rings': 'no', 'type': 'terrestrial'}, {'diameter': 0.532, 'mass': 0.11, 'moons': 2, 'name': 'Mars', 'orbital_period': 1.88, 'rings': 'no', 'type': 'terrestrial'}, {'diameter': 11.209, 'mass': 317.8, 'moons': 67, 'name': 'Jupiter', 'orbital_period': 11.86, 'rings': 'yes', 'type': 'gas giant'}, {'diameter': 9.449, 'mass': 95.2, 'moons': 62, 'name': 'Saturn', 'orbital_period': 29.46, 'rings': 'yes', 'type': 'gas giant'}, {'diameter': 4.007, 'mass': 14.6, 'moons': 27, 'name': 'Uranus', 'orbital_period': 84.01, 'rings': 'yes', 'type': 'ice giant'}, {'diameter': 3.883, 'mass': 17.2, 'moons': 14, 'name': 'Neptune', 'orbital_period': 164.8, 'rings': 'yes', 'type': 'ice giant'}] """ Explanation: Problem set #2: Still more list comprehensions Still looking good. Let's do a few more with some different data. In the cell below, I've defined a data structure and assigned it to a variable planets. It's a list of dictionaries, with each dictionary describing the characteristics of a planet in the solar system. Make sure to run the cell before you proceed. End of explanation """ [i['name'] for i in planets if i['diameter'] > 4] """ Explanation: Now, in the cell below, write a list comprehension that evaluates to a list of names of the planets that have a diameter greater than four earth radii. Expected output: ['Jupiter', 'Saturn', 'Uranus'] End of explanation """ sum([i['mass'] for i in planets]) """ Explanation: In the cell below, write a single expression that evaluates to the sum of the mass of all planets in the solar system. Expected output: 446.79 End of explanation """ [i['name'] for i in planets if 'giant' in i['type']] """ Explanation: Good work. Last one with the planets. Write an expression that evaluates to the names of the planets that have the word giant anywhere in the value for their type key. Expected output: ['Jupiter', 'Saturn', 'Uranus', 'Neptune'] End of explanation """ [i['name'] for i in sorted(planets, key=lambda planet: planet['moons'])] """ Explanation: EXTREME BONUS ROUND: Write an expression below that evaluates to a list of the names of the planets in ascending order by their number of moons. (The easiest way to do this involves using the key parameter of the sorted function, which we haven't yet discussed in class! That's why this is an EXTREME BONUS question.) Expected output: ['Mercury', 'Venus', 'Earth', 'Mars', 'Neptune', 'Uranus', 'Saturn', 'Jupiter'] End of explanation """ import re poem_lines = ['Two roads diverged in a yellow wood,', 'And sorry I could not travel both', 'And be one traveler, long I stood', 'And looked down one as far as I could', 'To where it bent in the undergrowth;', '', 'Then took the other, as just as fair,', 'And having perhaps the better claim,', 'Because it was grassy and wanted wear;', 'Though as for that the passing there', 'Had worn them really about the same,', '', 'And both that morning equally lay', 'In leaves no step had trodden black.', 'Oh, I kept the first for another day!', 'Yet knowing how way leads on to way,', 'I doubted if I should ever come back.', '', 'I shall be telling this with a sigh', 'Somewhere ages and ages hence:', 'Two roads diverged in a wood, and I---', 'I took the one less travelled by,', 'And that has made all the difference.'] """ Explanation: Problem set #3: Regular expressions In the following section, we're going to do a bit of digital humanities. (I guess this could also be journalism if you were... writing an investigative piece about... early 20th century American poetry?) We'll be working with the following text, Robert Frost's The Road Not Taken. Make sure to run the following cell before you proceed. End of explanation """ [line for line in poem_lines if re.search(r'\b\w{4}\s\w{4}\b', line)] """ Explanation: In the cell above, I defined a variable poem_lines which has a list of lines in the poem, and imported the re library. In the cell below, write a list comprehension (using re.search()) that evaluates to a list of lines that contain two words next to each other (separated by a space) that have exactly four characters. (Hint: use the \b anchor. Don't overthink the "two words in a row" requirement.) Expected result: ['Then took the other, as just as fair,', 'Had worn them really about the same,', 'And both that morning equally lay', 'I doubted if I should ever come back.', 'I shall be telling this with a sigh'] End of explanation """ # TA-COMMENT: Could also use the ? quantifier instead of {0,1} [line for line in poem_lines if re.search('\w{5}\.{0,1}$', line)] """ Explanation: Good! Now, in the following cell, write a list comprehension that evaluates to a list of lines in the poem that end with a five-letter word, regardless of whether or not there is punctuation following the word at the end of the line. (Hint: Try using the ? quantifier. Is there an existing character class, or a way to write a character class, that matches non-alphanumeric characters?) Expected output: ['And be one traveler, long I stood', 'And looked down one as far as I could', 'And having perhaps the better claim,', 'Though as for that the passing there', 'In leaves no step had trodden black.', 'Somewhere ages and ages hence:'] End of explanation """ all_lines = " ".join(poem_lines) """ Explanation: Okay, now a slightly trickier one. In the cell below, I've created a string all_lines which evaluates to the entire text of the poem in one string. Execute this cell. End of explanation """ re.findall('I\s(.*?)\s', all_lines) """ Explanation: Now, write an expression that evaluates to all of the words in the poem that follow the word 'I'. (The strings in the resulting list should not include the I.) Hint: Use re.findall() and grouping! Expected output: ['could', 'stood', 'could', 'kept', 'doubted', 'should', 'shall', 'took'] End of explanation """ entrees = [ "Yam, Rosemary and Chicken Bowl with Hot Sauce $10.95", "Lavender and Pepperoni Sandwich $8.49", "Water Chestnuts and Peas Power Lunch (with mayonnaise) $12.95 - v", "Artichoke, Mustard Green and Arugula with Sesame Oil over noodles $9.95 - v", "Flank Steak with Lentils And Tabasco Pepper With Sweet Chilli Sauce $19.95", "Rutabaga And Cucumber Wrap $8.49 - v" ] """ Explanation: Finally, something super tricky. Here's a list of strings that contains a restaurant menu. Your job is to wrangle this plain text, slightly-structured data into a list of dictionaries. End of explanation """ menu = [] for item in entrees: dictitem = {} dictitem['name'] = re.search('(.*)\s\$', item).group(1) # why 1? 0 = whole match? dictitem['price'] = float(re.search('\d{1,2}\.\d{2}', item).group()) dictitem['vegetarian'] = bool(re.match('.*v$', item)) menu.append(dictitem) menu """ Explanation: You'll need to pull out the name of the dish and the price of the dish. The v after the hyphen indicates that the dish is vegetarian---you'll need to include that information in your dictionary as well. I've included the basic framework; you just need to fill in the contents of the for loop. Expected output: [{'name': 'Yam, Rosemary and Chicken Bowl with Hot Sauce ', 'price': 10.95, 'vegetarian': False}, {'name': 'Lavender and Pepperoni Sandwich ', 'price': 8.49, 'vegetarian': False}, {'name': 'Water Chestnuts and Peas Power Lunch (with mayonnaise) ', 'price': 12.95, 'vegetarian': True}, {'name': 'Artichoke, Mustard Green and Arugula with Sesame Oil over noodles ', 'price': 9.95, 'vegetarian': True}, {'name': 'Flank Steak with Lentils And Tabasco Pepper With Sweet Chilli Sauce ', 'price': 19.95, 'vegetarian': False}, {'name': 'Rutabaga And Cucumber Wrap ', 'price': 8.49, 'vegetarian': True}] End of explanation """
brettavedisian/phys202-2015-work
assignments/assignment08/InterpolationEx02.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import numpy as np sns.set_style('white') from scipy.interpolate import griddata """ Explanation: Interpolation Exercise 2 End of explanation """ # Collaborated with James A. on this part x=np.empty(1,) x[0]=0 x=np.hstack((x,[-5]*11)) for i in range(-4,5): x=np.hstack((x,np.array((i,i)))) x=np.hstack((x,np.array(([5]*11)))) y=np.empty(1,) for i in range(-5,6): y=np.hstack((y,np.array((i)))) y=np.hstack((y,np.array((5,-5)*9))) for i in range(-5,6): y=np.hstack((y,np.array((i)))) f=np.zeros_like(x) f[0]=1.0 """ Explanation: Sparse 2d interpolation In this example the values of a scalar field $f(x,y)$ are known at a very limited set of points in a square domain: The square domain covers the region $x\in[-5,5]$ and $y\in[-5,5]$. The values of $f(x,y)$ are zero on the boundary of the square at integer spaced points. The value of $f$ is known at a single interior point: $f(0,0)=1.0$. The function $f$ is not known at any other points. Create arrays x, y, f: x should be a 1d array of the x coordinates on the boundary and the 1 interior point. y should be a 1d array of the y coordinates on the boundary and the 1 interior point. f should be a 1d array of the values of f at the corresponding x and y coordinates. You might find that np.hstack is helpful. End of explanation """ plt.scatter(x, y); assert x.shape==(41,) assert y.shape==(41,) assert f.shape==(41,) assert np.count_nonzero(f)==1 """ Explanation: The following plot should show the points on the boundary and the single point in the interior: End of explanation """ xnew=np.linspace(-5,5,100) ynew=np.linspace(-5,5,100) Xnew,Ynew=np.meshgrid(xnew,ynew) Fnew=griddata((x,y),f,(Xnew,Ynew),method='cubic') assert xnew.shape==(100,) assert ynew.shape==(100,) assert Xnew.shape==(100,100) assert Ynew.shape==(100,100) assert Fnew.shape==(100,100) """ Explanation: Use meshgrid and griddata to interpolate the function $f(x,y)$ on the entire square domain: xnew and ynew should be 1d arrays with 100 points between $[-5,5]$. Xnew and Ynew should be 2d versions of xnew and ynew created by meshgrid. Fnew should be a 2d array with the interpolated values of $f(x,y)$ at the points (Xnew,Ynew). Use cubic spline interpolation. End of explanation """ plt.pcolormesh(Fnew,cmap='gist_earth') plt.colorbar() plt.title('2D Data Interpolation') plt.xlabel('X-Data') plt.ylabel('Y-Data'); assert True # leave this to grade the plot """ Explanation: Plot the values of the interpolated scalar field using a contour plot. Customize your plot to make it effective and beautiful. End of explanation """
jupyter/nbgrader
nbgrader/tests/apps/files/test-v2.ipynb
bsd-3-clause
def squares(n): """Compute the squares of numbers from 1 to n, such that the ith element of the returned list equals i^2. """ ### BEGIN SOLUTION if n < 1: raise ValueError("n must be greater than or equal to 1") return [i ** 2 for i in range(1, n + 1)] ### END SOLUTION """ Explanation: For this problem set, we'll be using the Jupyter notebook: Part A (2 points) Write a function that returns a list of numbers, such that $x_i=i^2$, for $1\leq i \leq n$. Make sure it handles the case where $n<1$ by raising a ValueError. End of explanation """ squares(10) """Check that squares returns the correct output for several inputs""" assert squares(1) == [1] assert squares(2) == [1, 4] assert squares(10) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100] assert squares(11) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121] """Check that squares raises an error for invalid inputs""" try: squares(0) except ValueError: pass else: raise AssertionError("did not raise") try: squares(-4) except ValueError: pass else: raise AssertionError("did not raise") """ Explanation: Your function should print [1, 4, 9, 16, 25, 36, 49, 64, 81, 100] for $n=10$. Check that it does: End of explanation """ def sum_of_squares(n): """Compute the sum of the squares of numbers from 1 to n.""" ### BEGIN SOLUTION return sum(squares(n)) ### END SOLUTION """ Explanation: Part B (1 point) Using your squares function, write a function that computes the sum of the squares of the numbers from 1 to $n$. Your function should call the squares function -- it should NOT reimplement its functionality. End of explanation """ sum_of_squares(10) """Check that sum_of_squares returns the correct answer for various inputs.""" assert sum_of_squares(1) == 1 assert sum_of_squares(2) == 5 assert sum_of_squares(10) == 385 assert sum_of_squares(11) == 506 """Check that sum_of_squares relies on squares.""" orig_squares = squares del squares try: sum_of_squares(1) except NameError: pass else: raise AssertionError("sum_of_squares does not use squares") finally: squares = orig_squares """ Explanation: The sum of squares from 1 to 10 should be 385. Verify that this is the answer you get: End of explanation """ def pyramidal_number(n): """Returns the n^th pyramidal number""" return sum_of_squares(n) """ Explanation: Part C (1 point) Using LaTeX math notation, write out the equation that is implemented by your sum_of_squares function. $\sum_{i=1}^n i^2$ Part D (2 points) Find a usecase for your sum_of_squares function and implement that usecase in the cell below. End of explanation """
ramseylab/networkscompbio
class07_clustcoeff_python3_template.ipynb
apache-2.0
from igraph import Graph from igraph import summary import pandas import numpy import timeit from pympler import asizeof import bintrees """ Explanation: CS446/519 - Class Session 7 - Transitivity (Clustering Coefficients) In this class session we are going to compute the local clustering coefficient of all vertices in the undirected human protein-protein interaction network (PPI), in two ways -- first without using igraph, and the using igraph. We'll obtain the interaction data from the Pathway Commons SIF file (in the shared/ folder), we'll make an "adjacency forest" representation of the network, and we'll manually compute the local clustering coefficient of each vertex (protein) in the network using the "enumerating neighbor pairs" method described by Newman. Then we'll run the same algorithm using the transitivity_local_undirected function in igraph, and we'll compare the results in order to check our work. Grad students: you should also group vertices by their "binned" vertex degree k (bin size 50, total number of bins = 25) and plot the average local clustering coefficient for the vertices within a bin, against the center k value for the bin, on log-log scale (compare to Newman Fig. 8.12) End of explanation """ sif_data = pandas.read_csv("shared/pathway_commons.sif", sep="\t", names=["species1","interaction_type","species2"]) """ Explanation: Step 1: load in the SIF file (refer to Class 6 exercise) into a data frame sif_data, using the pandas.read_csv function, and name the columns species1, interaction_type, and species2. End of explanation """ interaction_types_ppi = set(["interacts-with", "in-complex-with"]) interac_ppi = sif_data[sif_data.interaction_type.isin(interaction_types_ppi)] """ Explanation: Step 2: restrict the interactions to protein-protein undirected ("in-complex-with", "interacts-with"), by using the isin function and then using [ to index rows into the data frame. Call the returned ata frame interac_ppi. End of explanation """ for i in range(0, interac_ppi.shape[0]): if interac_ppi.iat[i,0] > interac_ppi.iat[i,2]: temp_name = interac_ppi.iat[i,0] interac_ppi.set_value(i, 'species1', interac_ppi.iat[i,2]) interac_ppi.set_value(i, 'species2', temp_name) interac_ppi_unique = interac_ppi[["species1","species2"]].drop_duplicates() ppi_igraph = Graph.TupleList(interac_ppi_unique.values.tolist(), directed=False) summary(ppi_igraph) """ Explanation: Step 3: restrict the data frame to only the unique interaction pairs of proteins (ignoring the interaction type), and call that data frame interac_ppi_unique. Make an igraph Graph object from interac_ppi_unique using Graph.TupleList, values, and tolist. Call summary on the Graph object. Refer to the notebooks for the in-class exercises in Class sessions 3 and 6. End of explanation """ ppi_adj_list = ppi_igraph.get_adjlist() """ Explanation: Step 4: Obtain an adjacency list representation of the graph (refer to Class 5 exercise), using get_adjlist. End of explanation """ def get_bst_forest(theadjlist): g_adj_list = theadjlist n = len(g_adj_list) theforest = [] for i in range(0,n): itree = bintrees.AVLTree() for j in g_adj_list[i]: itree.insert(j,1) theforest.append(itree) return theforest def find_bst_forest(bst_forest, i, j): return j in bst_forest[i] ppi_adj_forest = get_bst_forest(ppi_adj_list) """ Explanation: Step 5: Make an "adjacency forest" data structure as a list of AVLTree objects (refer to Class 5 exercise). Call this adjacency forest, ppi_adj_forest. End of explanation """ N = len(ppi_adj_list) civals = numpy.zeros(100) civals[:] = numpy.NaN start_time = timeit.default_timer() ## PUT CODE HERE TO CALCULATE THE LOCAL CLUSTERING COEFFICIENT ci_elapsed = timeit.default_timer() - start_time print(ci_elapsed) """ Explanation: Step 6: Compute the local clustering coefficient (Ci) values of the first 100 vertices (do timing on this operation) as a numpy.array; for any vertex with degree=1, it's Ci value can be numpy NaN. You'll probably want to have an outer for loop for vertex ID n going from 0 to 99, and then an inner for loop iterating over neighbor vertices of vertex n. Store the clustering coefficients in a list, civals. Print out how many seconds it takes to perform this calculation. End of explanation """ start_time = timeit.default_timer() civals_igraph = ppi_igraph.transitivity_local_undirected(vertices=list(range(0,100))) ci_elapsed = timeit.default_timer() - start_time print(ci_elapsed) """ Explanation: Step 7: Calculate the local clustering coefficients for the first 100 vertices using the method igraph.Graph.transitivity_local_undirected and save the results as a list civals_igraph. Do timing on the call to transitivity_local_undirected, using vertices= to specify the vertices for which you want to compute the local clustering coefficient. End of explanation """ import matplotlib.pyplot matplotlib.pyplot.plot(civals, civals_igraph) matplotlib.pyplot.xlabel("Ci (my code)") matplotlib.pyplot.ylabel("Ci (igraph)") matplotlib.pyplot.show() """ Explanation: Step 8: Compare your Ci values to those that you got from igraph, using a scatter plot where civals is on the horizontal axis and civals_igraph is on the vertical axis. End of explanation """ civals_igraph = numpy.array(ppi_igraph.transitivity_local_undirected()) deg_igraph = ppi_igraph.degree() deg_npa = numpy.array(deg_igraph) deg_binids = numpy.rint(deg_npa/50) binkvals = 50*numpy.array(range(0,25)) civals_avg = numpy.zeros(25) for i in range(0,25): civals_avg[i] = numpy.mean(civals_igraph[deg_binids == i]) matplotlib.pyplot.loglog( binkvals, civals_avg) matplotlib.pyplot.ylabel("<Ci>") matplotlib.pyplot.xlabel("k") matplotlib.pyplot.show() """ Explanation: Step 9: scatter plot the average log(Ci) vs. log(k) (i.e., local clustering coefficient vs. vertex degree) for 25 bins of vertex degree, with each bin size being 50 (so we are binning by k, and the bin centers are 50, 100, 150, 200, ...., 1250) End of explanation """ civals = numpy.zeros(len(ppi_adj_list)) civals[:] = numpy.NaN ppi_adj_hash = [] for i in range(0, len(ppi_adj_list)): newhash = {} for j in ppi_adj_list[i]: newhash[j] = True ppi_adj_hash.append(newhash) start_time = timeit.default_timer() for n in range(0, len(ppi_adj_list)): neighbors = ppi_adj_hash[n] nneighbors = len(neighbors) if nneighbors > 1: nctr = 0 for i in neighbors: for j in neighbors: if (j > i) and (j in ppi_adj_hash[i]): nctr += 1 civals[n] = nctr/(nneighbors*(nneighbors-1)/2) ci_elapsed = timeit.default_timer() - start_time print(ci_elapsed) """ Explanation: Step 10: Now try computing the local clustering coefficient using a "list of hashtables" approach; compute the local clustering coefficients for all vertices, and compare to the timing for R. Which is faster, the python3 implementation or the R implementation? End of explanation """ asizeof.asizeof(ppi_adj_hash)/1000000 """ Explanation: So the built-in python dictionary type gave us fantastic performance. But is this coming at the cost of huge memory footprint? Let's check the size of our adjacency "list of hashtables", in MB: End of explanation """
junhwanjang/DataSchool
Lecture/12. Scikit-Learn & statsmodels 패키지 소개/4) Scikit-Learn 패키지의 샘플 데이터 - classification용.ipynb
mit
from sklearn.datasets import load_iris iris = load_iris() print(iris.DESCR) df = pd.DataFrame(iris.data, columns=iris.feature_names) sy = pd.Series(iris.target, dtype="category") sy = sy.cat.rename_categories(iris.target_names) df['species'] = sy df.tail() sns.pairplot(df, hue="species") plt.show() """ Explanation: Scikit-Learn 패키지의 샘플 데이터 - classification용 Iris Dataset load_iris() https://en.wikipedia.org/wiki/Iris_flower_data_set R.A Fisher의 붓꽃 분류 연구 관찰 자료 꽃받침 길이(Sepal Length) 꽃받침 폭(Sepal Width) 꽃잎 길이(Petal Length) 꽃잎 폭(Petal Width) 종 setosa versicolor virginica End of explanation """ from sklearn.datasets import fetch_20newsgroups newsgroups = fetch_20newsgroups(subset='all') print(newsgroups.description) print(newsgroups.keys()) from pprint import pprint pprint(list(newsgroups.target_names)) print(newsgroups.data[1]) print("=" * 80) print(newsgroups.target_names[newsgroups.target[1]]) """ Explanation: 뉴스 그룹 텍스트 fetch_20newsgroups(): 20 News Groups text End of explanation """ from sklearn.datasets import fetch_olivetti_faces olivetti = fetch_olivetti_faces() print(olivetti.DESCR) print(olivetti.keys()) N=2; M=5; fig = plt.figure(figsize=(8,5)) plt.subplots_adjust(top=1, bottom=0, hspace=0, wspace=0.05) klist = np.random.choice(range(len(olivetti.data)), N * M) for i in range(N): for j in range(M): k = klist[i*M+j] ax = fig.add_subplot(N, M, i*M+j+1) ax.imshow(olivetti.images[k], cmap=plt.cm.bone); ax.grid(False) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) plt.title(olivetti.target[k]) plt.tight_layout() plt.show() """ Explanation: Olivetti faces fetch_olivetti_faces() 얼굴 인식 이미지 End of explanation """ from sklearn.datasets import fetch_lfw_people lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4) print(lfw_people.DESCR) print(lfw_people.keys()) N=2; M=5; fig = plt.figure(figsize=(8,5)) plt.subplots_adjust(top=1, bottom=0, hspace=0.1, wspace=0.05) klist = np.random.choice(range(len(lfw_people.data)), N * M) for i in range(N): for j in range(M): k = klist[i*M+j] ax = fig.add_subplot(N, M, i*M+j+1) ax.imshow(lfw_people.images[k], cmap=plt.cm.bone); ax.grid(False) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) plt.title(lfw_people.target_names[lfw_people.target[k]]) plt.tight_layout() plt.show() """ Explanation: Labeled Faces in the Wild (LFW) #### fetch_lfw_people() 유명인 얼굴 이미지 Parameters funneled : boolean, optional, default: True Download and use the funneled variant of the dataset. resize : float, optional, default 0.5 Ratio used to resize the each face picture. min_faces_per_person : int, optional, default None The extracted dataset will only retain pictures of people that have at least min_faces_per_person different pictures. color : boolean, optional, default False Keep the 3 RGB channels instead of averaging them to a single gray level channel. If color is True the shape of the data has one more dimension than than the shape with color = False. End of explanation """ from sklearn.datasets import fetch_lfw_pairs lfw_pairs = fetch_lfw_pairs(resize=0.4) print(lfw_pairs.DESCR) print(lfw_pairs.keys()) N=2; M=5; fig = plt.figure(figsize=(8,5)) plt.subplots_adjust(top=1, bottom=0, hspace=0.01, wspace=0.05) klist = np.random.choice(range(len(lfw_pairs.data)), M) for j in range(M): k = klist[j] ax1 = fig.add_subplot(N, M, j+1) ax1.imshow(lfw_pairs.pairs [k][0], cmap=plt.cm.bone); ax1.grid(False) ax1.xaxis.set_ticks([]) ax1.yaxis.set_ticks([]) plt.title(lfw_pairs.target_names[lfw_pairs.target[k]]) ax2 = fig.add_subplot(N, M, j+1 + M) ax2.imshow(lfw_pairs.pairs [k][1], cmap=plt.cm.bone); ax2.grid(False) ax2.xaxis.set_ticks([]) ax2.yaxis.set_ticks([]) plt.tight_layout() plt.show() """ Explanation: #### fetch_lfw_pairs() 얼굴 이미지 Pair 동일 인물일 수도 있고 아닐 수도 있음 End of explanation """ from sklearn.datasets import load_digits digits = load_digits() print(digits.DESCR) print(digits.keys()) N=2; M=5; fig = plt.figure(figsize=(10,5)) plt.subplots_adjust(top=1, bottom=0, hspace=0, wspace=0.05) for i in range(N): for j in range(M): k = i*M+j ax = fig.add_subplot(N, M, k+1) ax.imshow(digits.images[k], cmap=plt.cm.bone, interpolation="none"); ax.grid(False) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) plt.title(digits.target_names[k]) plt.tight_layout() plt.show() """ Explanation: Digits Handwriting Image load_digits() 숫자 필기 이미지 End of explanation """ from sklearn.datasets.mldata import fetch_mldata mnist = fetch_mldata('MNIST original') mnist.keys() N=2; M=5; fig = plt.figure(figsize=(8,5)) plt.subplots_adjust(top=1, bottom=0, hspace=0, wspace=0.05) klist = np.random.choice(range(len(mnist.data)), N * M) for i in range(N): for j in range(M): k = klist[i*M+j] ax = fig.add_subplot(N, M, i*M+j+1) ax.imshow(mnist.data[k].reshape(28, 28), cmap=plt.cm.bone, interpolation="nearest"); ax.grid(False) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) plt.title(mnist.target[k]) plt.tight_layout() plt.show() """ Explanation: mldata.org repository fetch_mldata() http://mldata.org public repository for machine learning data, supported by the PASCAL network 홈페이지에서 data name 을 검색 후 key로 이용 MNIST 숫자 필기인식 자료 https://en.wikipedia.org/wiki/MNIST_database Mixed National Institute of Standards and Technology (MNIST) database 0-9 필기 숫자 이미지 28x28 pixel bounding box anti-aliased, grayscale levels 60,000 training images and 10,000 testing images End of explanation """
metpy/MetPy
v0.10/_downloads/4d64a32e8cfca4a5a78f2d1f68ae3c83/Gradient.ipynb
bsd-3-clause
import numpy as np import metpy.calc as mpcalc from metpy.units import units """ Explanation: Gradient Use metpy.calc.gradient. This example demonstrates the various ways that MetPy's gradient function can be utilized. End of explanation """ data = np.array([[23, 24, 23], [25, 26, 25], [27, 28, 27], [24, 25, 24]]) * units.degC # Create an array of x position data (the coordinates of our temperature data) x = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) * units.kilometer y = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]]) * units.kilometer """ Explanation: Create some test data to use for our example End of explanation """ grad = mpcalc.gradient(data, coordinates=(y, x)) print('Gradient in y direction: ', grad[0]) print('Gradient in x direction: ', grad[1]) """ Explanation: Calculate the gradient using the coordinates of the data End of explanation """ x_delta = 2 * units.km y_delta = 1 * units.km grad = mpcalc.gradient(data, deltas=(y_delta, x_delta)) print('Gradient in y direction: ', grad[0]) print('Gradient in x direction: ', grad[1]) """ Explanation: It's also possible that we do not have the position of data points, but know that they are evenly spaced. We can then specify a scalar delta value for each axes. End of explanation """ x_deltas = np.array([[2, 3], [1, 3], [2, 3], [1, 2]]) * units.kilometer y_deltas = np.array([[2, 3, 1], [1, 3, 2], [2, 3, 1]]) * units.kilometer grad = mpcalc.gradient(data, deltas=(y_deltas, x_deltas)) print('Gradient in y direction: ', grad[0]) print('Gradient in x direction: ', grad[1]) """ Explanation: Finally, the deltas can be arrays for unevenly spaced data. End of explanation """
1x0r/pspis
lectures/lecture_04/demos-1.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt n = 19 print("Каждая цифра представлена матрицей формы ", digits.data[n, :].shape) """ Explanation: Какую задачу можно поставить для этого набора данных? End of explanation """ digit = 255 - digits.data[n, :].reshape(8, 8) plt.imshow(digit, cmap='gray', interpolation='none') plt.title("This is " + str(digits.target[n])) plt.show() """ Explanation: Чтобы отобразить её на экране, нужно применить метод reshape. Целевая форма &mdash; $8 \times 8$. End of explanation """ from sklearn.tree import DecisionTreeClassifier """ Explanation: Возьмем один из методов прошлой лекции. Например, метод классификации, основанный на деревьях (CART). End of explanation """ clf = DecisionTreeClassifier(random_state=0) """ Explanation: Почти у всех классов, отвечающих за методы классификации в scikit-learn, есть следующие методы: - fit &mdash; обучение модели; - predict &mdash; классификация примера обученным классификатором; - score &mdash;оценка качества классификации в соответствии с некоторым критерием. Чтобы создать дерево-классификатор, достаточно создать объект класса DecisionTreeClassifier End of explanation """ clf.fit(digits.data[:-10], digits.target[:-10]) """ Explanation: Обучим классификатор на всех цифрах, кроме последних 10. End of explanation """ errors = 0 for i in range(1, 11): k = clf.predict(digits.data[-i].reshape(1, -1)) print("Классификатор предсказал число {}, на самом деле это {}. Числа {}совпали." .format(k[0], digits.target[-i], "" if k[0] == digits.target[-i] else "не ")) if k[0] != digits.target[-i]: errors += 1 """ Explanation: Теперь попробуем классифицировать оставшиеся 10 картинок. End of explanation """ fig = plt.figure(figsize=(12, 4)) frame = 1 for i in range(1, 11): k = clf.predict(digits.data[-i].reshape(1, -1)) if k[0] != digits.target[-i]: digit = 255 - digits.data[-i, :].reshape(8, 8) ax = fig.add_subplot(1, errors, frame) ax.imshow(digit, cmap='gray', interpolation='none') ax.set_title("This is {}, recognized as {}".format(digits.target[-i], k[0])) frame += 1 """ Explanation: Давайте посмотрим на "проблемные" числа: End of explanation """
yugangzhang/CHX_Pipelines
2017_3/Mask_pipeline_2017_V6.ipynb
bsd-3-clause
from chxanalys.chx_libs import (np, roi, time, datetime, os, getpass, db, get_images,LogNorm, plt,ManualMask) from chxanalys.chx_libs import cmap_albula, cmap_vge, random from chxanalys.chx_generic_functions import (get_detector, get_meta_data,create_user_folder, get_fields, get_sid_filenames,load_data, RemoveHot, show_img,get_avg_img, reverse_updown,create_cross_mask,mask_badpixels ) from skimage.draw import line_aa, line, polygon, circle %matplotlib notebook """ Explanation: Masking Pipeline CHX Olog NoteBook CHX Olog (https://logbook.nsls2.bnl.gov/11-ID/) End of explanation """ CYCLE= '2017_3' #change clycle here path = '/XF11ID/analysis/%s/masks/'%CYCLE print ("The analysis results will be saved in : %s"%path) """ Explanation: Path for Saving Results End of explanation """ uid = 'e09db5' # (scan num: 6358) (Measurement: Single image for masking ) uid = '727729' # (scan num: 6708) (Measurement: test series with feedback ) uid = '2bc66e'# (scan num: 6709) (Measurement: .2s 1k 36/.2% ) md = get_meta_data( uid ) detector = get_detector( db[uid ] ) print ('Detector is: %s'%detector ) sud = get_sid_filenames(db[uid]) print ('scan_id, full-uid, data path are: %s--%s--%s'%(sud[0], sud[1], sud[2][0] )) print(md['beam_center_y'], md['beam_center_x']) imgs = load_data( uid, detector, reverse= False ) #imgs = load_data( uid, detector, reverse= True ) md.update( imgs.md );Nimg = len(imgs); #if 'number of images' not in list(md.keys()): md['number of images'] = Nimg pixel_mask = 1- np.int_( np.array( imgs.md['pixel_mask'], dtype= bool) ) print( 'The data are: %s' %imgs ) pixel_mask = 1- np.int_( np.array( md['pixel_mask'], dtype= bool) ) img_choice_N = 1 #can change this number to select more frames for average img_samp_index = random.sample( range(len(imgs)), img_choice_N) avg_img = get_avg_img( imgs, img_samp_index, plot_ = False, uid = uid) """ Explanation: Get the image series and metadata from the uid End of explanation """ show_img( avg_img*pixel_mask , vmin=.001, vmax=1e8, logs=True, image_name ='uid=%s'%uid, aspect=1, cmap= cmap_albula ) """ Explanation: show image and the pixel mask show image End of explanation """ pixel_mask = mask_badpixels( pixel_mask, md['detector']) show_img( pixel_mask, vmin=0, vmax=1, image_name ='pixel_mask--uid=%s'%uid ,aspect=1 ) """ Explanation: pixel mask Update Pixel Mask due to bad pixel of Eiger Detector End of explanation """ #avg_img = get_avg_img( imgs, sampling = 1000, plot_ = False, uid =uid) mask_rh = RemoveHot( avg_img, 2**20-1, plot_=True) show_img(avg_img*pixel_mask,vmin=0.1,vmax=1e3, logs=True, image_name= 'uid= %s with pixel mask'%uid , aspect=1, cmap= cmap_albula ) """ Explanation: Remove hotspots in the image End of explanation """ md['beam_center_x'], md['beam_center_y'] """ Explanation: Create a polygon mask check beam center End of explanation """ path #creat the right part mask partial_mask = create_cross_mask( avg_img,center=[1000,1218], wy_left= 0, wy_right= 35, wx_up= 0, wx_down= 0,center_radius= 0 ) show_img( partial_mask ) #np.save( '/XF11ID/analysis/2017_1/masks/Ver_Beamstop', partial_mask ) #Ver_Beamstop = np.load( '/XF11ID/analysis/2017_1/masks/Ver_Beamstop.npy' ) #Ver_Beamstop = move_beamstop( Vertical_Beamstop, xshift=0, yshift=0 ) #creat the left/right/up/down part mask partial_mask *= create_cross_mask( avg_img, center=[ 1099,1218], wy_left= 0, wy_right= 0, wx_up= 5, wx_down=5,center_radius= 0 ) #partial_mask2[1285:1350,1430:1440,] = False #creat the left/right/up/down part mask partial_mask *= create_cross_mask( avg_img, center=[ 1030,395], wy_left= 145, wy_right= 0, wx_up= 0, wx_down= 0,center_radius= 0 ) #partial_mask2[1285:1350,1430:1440,] = False #creat the left/right/up/down part mask partial_mask *= create_cross_mask( avg_img, center=[ 1139,1225], wy_left= 0, wy_right= 0, wx_up= 0, wx_down=0,center_radius= 0 ) #partial_mask2[1285:1350,1430:1440,] = False #from chxanalys.chx_generic_functions import create_multi_rotated_rectangle_mask #np.save( '/XF11ID/analysis/2017_1/masks/Hor_Beamstop', partial_mask ) #Hor_Beamstop = np.load( '/XF11ID/analysis/2017_1/masks/Hor_Beamstop.npy' ) #Hor_Beamstop = move_beamstop( Hor_Beamstop, xshift=0, yshift=0 ) show_img( partial_mask ) #creat the left/right/up/down part mask #partial_mask *= create_cross_mask( avg_img, center=[ 1615,2000], # wy_left= 0, wy_right= 100, # wx_up= 0, wx_down=0,center_radius= 0 ) #partial_mask2[1285:1350,1430:1440,] = False #np.save( '/XF11ID/analysis/2017_1/masks/Bad_4M', partial_mask ) #Bad_4M = np.load( '/XF11ID/analysis/2017_1/masks/Bad_4M.npy' ) #create a circle mask for windows if False: #make it True to make window mask window_shadow = ~create_cross_mask( avg_img, center=[ 911,997], wy_left= 0, wy_right= 0, wx_up= 0, wx_down= 0,center_circle=True, center_radius= 680) else: window_shadow = 1 full_mask = partial_mask *window_shadow #full_mask = Ver_Beamstop * Hor_Beamstop *Bad_4M *window_shadow show_img( full_mask, aspect = 1 ) mask = np.array ( full_mask * pixel_mask*mask_rh , dtype = bool ) #mask = np.array ( full_mask * pixel_mask , dtype = bool ) fig, ax = plt.subplots() #new_mask = im=ax.imshow( (~mask) * avg_img,origin='lower' , norm= LogNorm( vmin=0.001, vmax= 1e5), cmap= cmap_albula) #im = ax.imshow(avg_img, cmap='viridis',origin='lower', norm= LogNorm( vmin=0.001, vmax=100 ) ) plt.show() fig, ax = plt.subplots() im = ax.imshow((mask)*avg_img, cmap= cmap_albula,origin='lower',norm= LogNorm( vmin=.1, vmax=1e5 ), interpolation='none') plt.show() """ Explanation: To create multi-rectangle masks, for each sub-mask End of explanation """ #mask = np.array ( ~new_mask* ~plgon_mask * md['pixel_mask']*mask_rh, dtype = bool ) fig, ax = plt.subplots() im=ax.imshow(mask, origin='lower' ,vmin=0, vmax=1,cmap='viridis') fig.colorbar(im) plt.show() """ Explanation: Combine the hand-drawn/polygon mask and the pixel mask and hot pixel mask End of explanation """ np.save( path + uid +"_mask", mask) path + uid +"_mask" """ Explanation: Save the combined mask to use in further data analysis End of explanation """ if True: meaningful_name = 'Sept23_SAXS' np.save( path + meaningful_name, mask) print( path + meaningful_name ) path + meaningful_name 2ef764uid """ Explanation: save with a meaningful filename, make False after excute to avoid overwrite End of explanation """
anthonyng2/FX-Trading-with-Python-and-Oanda
Oanda v20 REST-oandapyV20/06.00 Position Management.ipynb
mit
import pandas as pd import oandapyV20 import oandapyV20.endpoints.positions as positions import configparser config = configparser.ConfigParser() config.read('../config/config_v20.ini') accountID = config['oanda']['account_id'] access_token = config['oanda']['api_key'] client = oandapyV20.API(access_token=access_token) """ Explanation: <!--NAVIGATION--> < Trade Management | Contents | Transaction History > Position Management OANDA REST-V20 API Wrapper Doc on Position OANDA API Getting Started OANDA DOC on Position End of explanation """ r = positions.PositionList(accountID=accountID) client.request(r) print(r.response) """ Explanation: List all Positions for an Account. End of explanation """ r = positions.OpenPositions(accountID=accountID) client.request(r) """ Explanation: List all open Positions for an Account. End of explanation """ instrument = "AUD_USD" r = positions.PositionDetails(accountID=accountID, instrument=instrument) client.request(r) """ Explanation: Get the details of a single instrument’s position in an Account End of explanation """ data = { "longUnits": "ALL" } r = positions.PositionClose(accountID=accountID, instrument=instrument, data=data) client.request(r) """ Explanation: Closeout the open Position regarding instrument in an Account. End of explanation """
AtmaMani/pyChakras
udemy_ml_bootcamp/Python-for-Data-Analysis/NumPy/Numpy Indexing and Selection.ipynb
mit
import numpy as np #Creating sample array arr = np.arange(0,11) #Show arr """ Explanation: <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a> NumPy Indexing and Selection In this lecture we will discuss how to select elements or groups of elements from an array. End of explanation """ #Get a value at an index arr[8] #Get values in a range arr[1:5] #Get values in a range arr[0:5] """ Explanation: Bracket Indexing and Selection The simplest way to pick one or some elements of an array looks very similar to python lists: End of explanation """ #Setting a value with index range (Broadcasting) arr[0:5]=100 #Show arr # Reset array, we'll see why I had to reset in a moment arr = np.arange(0,11) #Show arr #Important notes on Slices slice_of_arr = arr[0:6] #Show slice slice_of_arr #Change Slice slice_of_arr[:]=99 #Show Slice again slice_of_arr """ Explanation: Broadcasting Numpy arrays differ from a normal Python list because of their ability to broadcast: End of explanation """ arr """ Explanation: Now note the changes also occur in our original array! End of explanation """ #To get a copy, need to be explicit arr_copy = arr.copy() arr_copy """ Explanation: Data is not copied, it's a view of the original array! This avoids memory problems! End of explanation """ arr_2d = np.array(([5,10,15],[20,25,30],[35,40,45])) #Show arr_2d #Indexing row arr_2d[1] # Format is arr_2d[row][col] or arr_2d[row,col] # Getting individual element value arr_2d[1][0] # Getting individual element value arr_2d[1,0] # 2D array slicing #Shape (2,2) from top right corner arr_2d[:2,1:] #Shape bottom row arr_2d[2] #Shape bottom row arr_2d[2,:] """ Explanation: Indexing a 2D array (matrices) The general format is arr_2d[row][col] or arr_2d[row,col]. I recommend usually using the comma notation for clarity. End of explanation """ #Set up matrix arr2d = np.zeros((10,10)) #Length of array arr_length = arr2d.shape[1] #Set up array for i in range(arr_length): arr2d[i] = i arr2d """ Explanation: Fancy Indexing Fancy indexing allows you to select entire rows or columns out of order,to show this, let's quickly build out a numpy array: End of explanation """ arr2d[[2,4,6,8]] #Allows in any order arr2d[[6,4,2,7]] """ Explanation: Fancy indexing allows the following End of explanation """ arr = np.arange(1,11) arr arr > 4 bool_arr = arr>4 bool_arr arr[bool_arr] arr[arr>2] x = 2 arr[arr>x] """ Explanation: More Indexing Help Indexing a 2d matrix can be a bit confusing at first, especially when you start to add in step size. Try google image searching NumPy indexing to fins useful images, like this one: <img src= 'http://memory.osu.edu/classes/python/_images/numpy_indexing.png' width=500/> Selection Let's briefly go over how to use brackets for selection based off of comparison operators. End of explanation """
geoneill12/phys202-2015-work
assignments/assignment07/AlgorithmsEx02.ipynb
mit
%matplotlib inline from matplotlib import pyplot as plt import seaborn as sns import numpy as np """ Explanation: Algorithms Exercise 2 Imports End of explanation """ def find_peaks(a): """Find the indices of the local maxima in a sequence.""" b = np.array(a) c = b.max() return b[c] p1 = find_peaks([2,0,1,0,2,0,1]) assert np.allclose(p1, np.array([0,2,4,6])) p2 = find_peaks(np.array([0,1,2,3])) assert np.allclose(p2, np.array([3])) p3 = find_peaks([3,2,1,0]) assert np.allclose(p3, np.array([0])) """ Explanation: Peak finding Write a function find_peaks that finds and returns the indices of the local maxima in a sequence. Your function should: Properly handle local maxima at the endpoints of the input array. Return a Numpy array of integer indices. Handle any Python iterable as input. End of explanation """ from sympy import pi, N pi_digits_str = str(N(pi, 10001))[2:] # YOUR CODE HERE raise NotImplementedError() assert True # use this for grading the pi digits histogram """ Explanation: Here is a string with the first 10000 digits of $\pi$ (after the decimal). Write code to perform the following: Convert that string to a Numpy array of integers. Find the indices of the local maxima in the digits of $\pi$. Use np.diff to find the distances between consequtive local maxima. Visualize that distribution using an appropriately customized histogram. End of explanation """
JorisBolsens/PYNQ
Pynq-Z1/notebooks/examples/overlay_download.ipynb
bsd-3-clause
# Using base.bit located in pynq package from pynq import Overlay ol = Overlay("base.bit") """ Explanation: Downloading Overlays This notebook demonstrates how to download an FPGA overlay and examine programmable logic state. 1. Instantiating an overlay To instantiate an overlay, a bitstream file name is passed to the Overlay class. The bitstream file does not need a full path if it resides in the pynq package, but a full path can be used for any bitstream located on the Linux file system. Two examples of overlay instantiation are shown below. End of explanation """ # Using the same bitstream, but with full path from pynq import Overlay ol = Overlay("/home/xilinx/pynq/bitstream/base.bit") """ Explanation: In the second case, users can use absolute file path to instantiate the overlay. End of explanation """ ol.download() ol.bitstream.timestamp """ Explanation: Now we can check the download timestamp for this overlay End of explanation """ from pynq import PL PL.bitfile_name PL.timestamp """ Explanation: 2. Examining the PL state While there can be multiple overlay instances in Python, there is only one bitstream that is currently loaded onto the programmable logic (PL). This bitstream state is held in the singleton class, PL, and is available for user queries. End of explanation """ ol.is_loaded() """ Explanation: Users can verify whether an overlay instance is currently loaded using the Overlay is_loaded() method End of explanation """ import time import matplotlib.pyplot as plt from pynq import Overlay ol1 = Overlay("base.bit") length = 50 log1 = [] for i in range(length): start = time.time() ol1.download() end = time.time() # Record milliseconds log1.append((end-start)*1000) # Draw the figure %matplotlib inline plt.plot(range(length), log1, 'ro') plt.title('Bitstream loading time (ms)') plt.axis([0, length, 0, 1000]) plt.show() del ol1 """ Explanation: 3. Overlay downloading overhead Finally, using Python, we can see the bitstream download time over 50 downloads. End of explanation """
statsmodels/statsmodels.github.io
v0.13.0/examples/notebooks/generated/robust_models_1.ipynb
bsd-3-clause
%matplotlib inline from statsmodels.compat import lmap import numpy as np from scipy import stats import matplotlib.pyplot as plt import statsmodels.api as sm """ Explanation: M-Estimators for Robust Linear Modeling End of explanation """ norms = sm.robust.norms def plot_weights(support, weights_func, xlabels, xticks): fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111) ax.plot(support, weights_func(support)) ax.set_xticks(xticks) ax.set_xticklabels(xlabels, fontsize=16) ax.set_ylim(-0.1, 1.1) return ax """ Explanation: An M-estimator minimizes the function $$Q(e_i, \rho) = \sum_i~\rho \left (\frac{e_i}{s}\right )$$ where $\rho$ is a symmetric function of the residuals The effect of $\rho$ is to reduce the influence of outliers $s$ is an estimate of scale. The robust estimates $\hat{\beta}$ are computed by the iteratively re-weighted least squares algorithm We have several choices available for the weighting functions to be used End of explanation """ help(norms.AndrewWave.weights) a = 1.339 support = np.linspace(-np.pi * a, np.pi * a, 100) andrew = norms.AndrewWave(a=a) plot_weights( support, andrew.weights, ["$-\pi*a$", "0", "$\pi*a$"], [-np.pi * a, 0, np.pi * a] ) """ Explanation: Andrew's Wave End of explanation """ help(norms.Hampel.weights) c = 8 support = np.linspace(-3 * c, 3 * c, 1000) hampel = norms.Hampel(a=2.0, b=4.0, c=c) plot_weights(support, hampel.weights, ["3*c", "0", "3*c"], [-3 * c, 0, 3 * c]) """ Explanation: Hampel's 17A End of explanation """ help(norms.HuberT.weights) t = 1.345 support = np.linspace(-3 * t, 3 * t, 1000) huber = norms.HuberT(t=t) plot_weights(support, huber.weights, ["-3*t", "0", "3*t"], [-3 * t, 0, 3 * t]) """ Explanation: Huber's t End of explanation """ help(norms.LeastSquares.weights) support = np.linspace(-3, 3, 1000) lst_sq = norms.LeastSquares() plot_weights(support, lst_sq.weights, ["-3", "0", "3"], [-3, 0, 3]) """ Explanation: Least Squares End of explanation """ help(norms.RamsayE.weights) a = 0.3 support = np.linspace(-3 * a, 3 * a, 1000) ramsay = norms.RamsayE(a=a) plot_weights(support, ramsay.weights, ["-3*a", "0", "3*a"], [-3 * a, 0, 3 * a]) """ Explanation: Ramsay's Ea End of explanation """ help(norms.TrimmedMean.weights) c = 2 support = np.linspace(-3 * c, 3 * c, 1000) trimmed = norms.TrimmedMean(c=c) plot_weights(support, trimmed.weights, ["-3*c", "0", "3*c"], [-3 * c, 0, 3 * c]) """ Explanation: Trimmed Mean End of explanation """ help(norms.TukeyBiweight.weights) c = 4.685 support = np.linspace(-3 * c, 3 * c, 1000) tukey = norms.TukeyBiweight(c=c) plot_weights(support, tukey.weights, ["-3*c", "0", "3*c"], [-3 * c, 0, 3 * c]) """ Explanation: Tukey's Biweight End of explanation """ x = np.array([1, 2, 3, 4, 500]) """ Explanation: Scale Estimators Robust estimates of the location End of explanation """ x.mean() """ Explanation: The mean is not a robust estimator of location End of explanation """ np.median(x) """ Explanation: The median, on the other hand, is a robust estimator with a breakdown point of 50% End of explanation """ x.std() """ Explanation: Analogously for the scale The standard deviation is not robust End of explanation """ stats.norm.ppf(0.75) print(x) sm.robust.scale.mad(x) np.array([1, 2, 3, 4, 5.0]).std() """ Explanation: Median Absolute Deviation $$ median_i |X_i - median_j(X_j)|) $$ Standardized Median Absolute Deviation is a consistent estimator for $\hat{\sigma}$ $$\hat{\sigma}=K \cdot MAD$$ where $K$ depends on the distribution. For the normal distribution for example, $$K = \Phi^{-1}(.75)$$ End of explanation """ sm.robust.scale.iqr(x) """ Explanation: Another robust estimator of scale is the Interquartile Range (IQR) $$\left(\hat{X}{0.75} - \hat{X}{0.25}\right),$$ where $\hat{X}_{p}$ is the sample p-th quantile and $K$ depends on the distribution. The standardized IQR, given by $K \cdot \text{IQR}$ for $$K = \frac{1}{\Phi^{-1}(.75) - \Phi^{-1}(.25)} \approx 0.74,$$ is a consistent estimator of the standard deviation for normal data. End of explanation """ sm.robust.scale.qn_scale(x) """ Explanation: The IQR is less robust than the MAD in the sense that it has a lower breakdown point: it can withstand 25\% outlying observations before being completely ruined, whereas the MAD can withstand 50\% outlying observations. However, the IQR is better suited for asymmetric distributions. Yet another robust estimator of scale is the $Q_n$ estimator, introduced in Rousseeuw & Croux (1993), 'Alternatives to the Median Absolute Deviation'. Then $Q_n$ estimator is given by $$ Q_n = K \left\lbrace \vert X_{i} - X_{j}\vert : i<j\right\rbrace_{(h)} $$ where $h\approx (1/4){{n}\choose{2}}$ and $K$ is a given constant. In words, the $Q_n$ estimator is the normalized $h$-th order statistic of the absolute differences of the data. The normalizing constant $K$ is usually chosen as 2.219144, to make the estimator consistent for the standard deviation in the case of normal data. The $Q_n$ estimator has a 50\% breakdown point and a 82\% asymptotic efficiency at the normal distribution, much higher than the 37\% efficiency of the MAD. End of explanation """ np.random.seed(12345) fat_tails = stats.t(6).rvs(40) kde = sm.nonparametric.KDEUnivariate(fat_tails) kde.fit() fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111) ax.plot(kde.support, kde.density) print(fat_tails.mean(), fat_tails.std()) print(stats.norm.fit(fat_tails)) print(stats.t.fit(fat_tails, f0=6)) huber = sm.robust.scale.Huber() loc, scale = huber(fat_tails) print(loc, scale) sm.robust.mad(fat_tails) sm.robust.mad(fat_tails, c=stats.t(6).ppf(0.75)) sm.robust.scale.mad(fat_tails) """ Explanation: The default for Robust Linear Models is MAD another popular choice is Huber's proposal 2 End of explanation """ from statsmodels.graphics.api import abline_plot from statsmodels.formula.api import ols, rlm prestige = sm.datasets.get_rdataset("Duncan", "carData", cache=True).data print(prestige.head(10)) fig = plt.figure(figsize=(12, 12)) ax1 = fig.add_subplot(211, xlabel="Income", ylabel="Prestige") ax1.scatter(prestige.income, prestige.prestige) xy_outlier = prestige.loc["minister", ["income", "prestige"]] ax1.annotate("Minister", xy_outlier, xy_outlier + 1, fontsize=16) ax2 = fig.add_subplot(212, xlabel="Education", ylabel="Prestige") ax2.scatter(prestige.education, prestige.prestige) ols_model = ols("prestige ~ income + education", prestige).fit() print(ols_model.summary()) infl = ols_model.get_influence() student = infl.summary_frame()["student_resid"] print(student) print(student.loc[np.abs(student) > 2]) print(infl.summary_frame().loc["minister"]) sidak = ols_model.outlier_test("sidak") sidak.sort_values("unadj_p", inplace=True) print(sidak) fdr = ols_model.outlier_test("fdr_bh") fdr.sort_values("unadj_p", inplace=True) print(fdr) rlm_model = rlm("prestige ~ income + education", prestige).fit() print(rlm_model.summary()) print(rlm_model.weights) """ Explanation: Duncan's Occupational Prestige data - M-estimation for outliers End of explanation """ dta = sm.datasets.get_rdataset("starsCYG", "robustbase", cache=True).data from matplotlib.patches import Ellipse fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot( 111, xlabel="log(Temp)", ylabel="log(Light)", title="Hertzsprung-Russell Diagram of Star Cluster CYG OB1", ) ax.scatter(*dta.values.T) # highlight outliers e = Ellipse((3.5, 6), 0.2, 1, alpha=0.25, color="r") ax.add_patch(e) ax.annotate( "Red giants", xy=(3.6, 6), xytext=(3.8, 6), arrowprops=dict(facecolor="black", shrink=0.05, width=2), horizontalalignment="left", verticalalignment="bottom", clip_on=True, # clip to the axes bounding box fontsize=16, ) # annotate these with their index for i, row in dta.loc[dta["log.Te"] < 3.8].iterrows(): ax.annotate(i, row, row + 0.01, fontsize=14) xlim, ylim = ax.get_xlim(), ax.get_ylim() from IPython.display import Image Image(filename="star_diagram.png") y = dta["log.light"] X = sm.add_constant(dta["log.Te"], prepend=True) ols_model = sm.OLS(y, X).fit() abline_plot(model_results=ols_model, ax=ax) rlm_mod = sm.RLM(y, X, sm.robust.norms.TrimmedMean(0.5)).fit() abline_plot(model_results=rlm_mod, ax=ax, color="red") """ Explanation: Hertzprung Russell data for Star Cluster CYG 0B1 - Leverage Points Data is on the luminosity and temperature of 47 stars in the direction of Cygnus. End of explanation """ infl = ols_model.get_influence() h_bar = 2 * (ols_model.df_model + 1) / ols_model.nobs hat_diag = infl.summary_frame()["hat_diag"] hat_diag.loc[hat_diag > h_bar] sidak2 = ols_model.outlier_test("sidak") sidak2.sort_values("unadj_p", inplace=True) print(sidak2) fdr2 = ols_model.outlier_test("fdr_bh") fdr2.sort_values("unadj_p", inplace=True) print(fdr2) """ Explanation: Why? Because M-estimators are not robust to leverage points. End of explanation """ l = ax.lines[-1] l.remove() del l weights = np.ones(len(X)) weights[X[X["log.Te"] < 3.8].index.values - 1] = 0 wls_model = sm.WLS(y, X, weights=weights).fit() abline_plot(model_results=wls_model, ax=ax, color="green") """ Explanation: Let's delete that line End of explanation """ yy = y.values[:, None] xx = X["log.Te"].values[:, None] """ Explanation: MM estimators are good for this type of problem, unfortunately, we do not yet have these yet. It's being worked on, but it gives a good excuse to look at the R cell magics in the notebook. End of explanation """ params = [-4.969387980288108, 2.2531613477892365] # Computed using R print(params[0], params[1]) abline_plot(intercept=params[0], slope=params[1], ax=ax, color="red") """ Explanation: Note: The R code and the results in this notebook has been converted to markdown so that R is not required to build the documents. The R results in the notebook were computed using R 3.5.1 and robustbase 0.93. ```ipython %load_ext rpy2.ipython %R library(robustbase) %Rpush yy xx %R mod <- lmrob(yy ~ xx); %R params <- mod$coefficients; %Rpull params ``` ipython %R print(mod) Call: lmrob(formula = yy ~ xx) \--&gt; method = "MM" Coefficients: (Intercept) xx -4.969 2.253 End of explanation """ np.random.seed(12345) nobs = 200 beta_true = np.array([3, 1, 2.5, 3, -4]) X = np.random.uniform(-20, 20, size=(nobs, len(beta_true) - 1)) # stack a constant in front X = sm.add_constant(X, prepend=True) # np.c_[np.ones(nobs), X] mc_iter = 500 contaminate = 0.25 # percentage of response variables to contaminate all_betas = [] for i in range(mc_iter): y = np.dot(X, beta_true) + np.random.normal(size=200) random_idx = np.random.randint(0, nobs, size=int(contaminate * nobs)) y[random_idx] = np.random.uniform(-750, 750) beta_hat = sm.RLM(y, X).fit().params all_betas.append(beta_hat) all_betas = np.asarray(all_betas) se_loss = lambda x: np.linalg.norm(x, ord=2) ** 2 se_beta = lmap(se_loss, all_betas - beta_true) """ Explanation: Exercise: Breakdown points of M-estimator End of explanation """ np.array(se_beta).mean() all_betas.mean(0) beta_true se_loss(all_betas.mean(0) - beta_true) """ Explanation: Squared error loss End of explanation """
metpy/MetPy
dev/_downloads/5f6dfc4b913dc349eba9f04f6161b5f1/GINI_Water_Vapor.ipynb
bsd-3-clause
import cartopy.feature as cfeature import matplotlib.pyplot as plt import xarray as xr from metpy.cbook import get_test_data from metpy.io import GiniFile from metpy.plots import add_metpy_logo, add_timestamp, colortables # Open the GINI file from the test data f = GiniFile(get_test_data('WEST-CONUS_4km_WV_20151208_2200.gini')) print(f) """ Explanation: GINI Water Vapor Imagery Use MetPy's support for GINI files to read in a water vapor satellite image and plot the data using CartoPy. End of explanation """ ds = xr.open_dataset(f) x = ds.variables['x'][:] y = ds.variables['y'][:] dat = ds.metpy.parse_cf('WV') """ Explanation: Get a Dataset view of the data (essentially a NetCDF-like interface to the underlying data). Pull out the data and (x, y) coordinates. We use metpy.parse_cf to handle parsing some netCDF Climate and Forecasting (CF) metadata to simplify working with projections. End of explanation """ fig = plt.figure(figsize=(10, 12)) add_metpy_logo(fig, 125, 145) ax = fig.add_subplot(1, 1, 1, projection=dat.metpy.cartopy_crs) wv_norm, wv_cmap = colortables.get_with_range('WVCIMSS', 100, 260) wv_cmap.set_under('k') im = ax.imshow(dat[:], cmap=wv_cmap, norm=wv_norm, extent=(x.min(), x.max(), y.min(), y.max()), origin='upper') ax.add_feature(cfeature.COASTLINE.with_scale('50m')) add_timestamp(ax, f.prod_desc.datetime, y=0.02, high_contrast=True) plt.show() """ Explanation: Plot the image. We use MetPy's xarray/cartopy integration to automatically handle parsing the projection information. End of explanation """
statkclee/ThinkStats2
code/chap02soln-kor.ipynb
gpl-3.0
%matplotlib inline import chap01soln resp = chap01soln.ReadFemResp() resp.columns """ Explanation: 통계적 사고 (2판) 연습문제 (thinkstats2.com, think-stat.xwmooc.org)<br> Allen Downey / 이광춘(xwMOOC) 여성 응답자 파일을 읽어들여 변수명을 표시하시오. End of explanation """ import thinkstats2 hist = thinkstats2.Hist(resp.totincr) """ Explanation: 응답자 가족에 대한 총소득 <tt>totincr</tt> 히스토그램을 생성하시오. 코드를 해석하기 위해서, codebook을 살펴보시오. End of explanation """ import thinkplot thinkplot.Hist(hist, label='totincr') thinkplot.Show() """ Explanation: 히스토그램을 화면에 표시하시오. End of explanation """ hist = thinkstats2.Hist(resp.age_r) thinkplot.Hist(hist, label='age_r') thinkplot.Show() """ Explanation: 인터뷰 당시 응답자 나이 변수, <tt>age_r</tt>에 대한 히스토그램을 생성하시오. End of explanation """ hist = thinkstats2.Hist(resp.numfmhh) thinkplot.Hist(hist, label='numfmhh') thinkplot.Show() """ Explanation: 응답자 가구의 가구원수, <tt>numfmhh</tt>에 대한 히스토그램을 생성하시오. End of explanation """ hist = thinkstats2.Hist(resp.parity) thinkplot.Hist(hist, label='parity') thinkplot.Show() """ Explanation: 응답자가 낳은 자녀수, <tt>parity</tt>에 대한 히스토그램을 생성하시오. 이 분포를 어떻게 기술할까요? End of explanation """ print('The largest parity is ...', hist.Largest(10)) """ Explanation: Hist.Largest를 사용해서 <tt>parity</tt>의 가장 큰 수를 찾으시오. End of explanation """ resp.totincr.value_counts() ## 총임금 빈도수 계산 rich = resp[resp.totincr == 14] hist = thinkstats2.Hist(rich.parity) thinkplot.Hist(hist, label='rich parity') thinkplot.Show() """ Explanation: <tt>totincr</tt>를 사용해서 가장 높은 임금을 갖는 응답자를 고르시오. 고임금 응답자에 대해서만 <tt>parity</tt> 분포를 계산하시오. End of explanation """ hist.Largest(10) """ Explanation: 고임금 응답자에 대한 가장 큰 <tt>parity</tt>를 구하시오. End of explanation """ rich = resp[resp.totincr == 14] poor = resp[resp.totincr < 14] print('Rich mean value is: ', rich.parity.mean()) print('Poor mean value is: ', poor.parity.mean()) """ Explanation: 고임금과 고임금이 아닌 집단에 대한 평균 <tt>parity</tt>를 비교하시오. End of explanation """ hist = thinkstats2.Hist(resp.fmarno) thinkplot.Hist(hist, label='famrno') thinkplot.Show() """ Explanation: 다른 흥미로워 보이는 변수도 조사하시오. End of explanation """
JeffreyWang98/JeffreyWang98.github.io
Projects/Grains/Grains.ipynb
mit
import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt """ Explanation: Tutorial content This tutorial will show how to use the Feed Grains open data source, provided by the USDA Economic Research Service to learn about grain production and historical effects. This tutorial will also explore grain prices in relation to production levels and determine whether there is evidence of some direct correlation, as people would expect based on supply and demand expectations. Before we start the tutorial though, let's just give a simple introduction to the different grain types we'll be talking about! Barley: Member of the grass family, a major cereal grain. First cultivated in Eurasia around 10,000 years ago. Versatile grain that can be grown in many climates globally. Nutlike flavor and chewy. Grown for human consumption, unlike oats, it's not usually grown for animal feed, but for beer. Hay: Grass, legumes, or other assorted plants that have been cut, dried, and stored. Used as animal fodder for grazing animals like cows, horses, goats, sheep. Generally not used for human consumption, but some crazy people may eat it). One of, if not the most used feed for livestock. Easy to grow since basically anything can turn into hay. Corn: Also known as maize, a large grain first domesticated in southern Mexico around 10,000 years ago. It is a very popular grain, one of the most widely grown foods in the world. People eat it, but also use it for a variety of other stuff like fuel. Oats: Also known as the common oat, a cereal grain that is grown for its seed. Another popular grain that is widely grown. Although suitable for human consumption, in the form of oatmeal and rolled oats, typically it is grown for livestock feed. Sorghum: A genus of flower plants, the majority of people have no idea what it is (I never heard of it until making this tutorial). Versatile crop that can be grown as a grain or sweet crop, supposedl one of the top 5 cereal crops in the world (probably not in the US though). Now that you know what these grains are (not that it really matters), we can get started with the tutorial! The following topics will be covered in this tutorial: - Loading data - Processing data - Supply and use - Annual grain stocks - Prices - Annual grain prices - Summary and references Before we start, first run the lines below the import the necessary libraries. numpy will be used for numerical calculations, pandas will be used to load the data and perform the majority of parsing and data processing, matplotlib will be the used to graph the data. End of explanation """ df = pd.read_csv("FeedGrains/FeedGrains.csv") df.head() """ Explanation: Loading Data Now that the necessary libraries are loaded, lets load the grain data. The data will be in csv format (comma separated values), which stores comma separated values for each cell element. No manual data reading or formatting is necessary as it can be easily parsed using pandas's method read_csv. Download the FeedGrains.zip file from the USDA Economic Research Service website: https://www.ers.usda.gov/data-products/feed-grains-database/feed-grains-yearbook-tables.aspx. Unzip the FeedGrains.zip file so FeedGrains.csv is in a FeedGrains folder. There's no real reason to put it in a separate folder since it's just one file, but why not. Copy the foder into the same folder as this notebook and load the data using the below command. End of explanation """ del df['SC_Group_ID'] del df['SC_GroupCommod_ID'] del df['SortOrder'] del df['SC_Geography_ID'] del df['SC_Commodity_ID'] del df['SC_Attribute_ID'] del df['SC_Unit_ID'] del df['SC_Frequency_ID'] del df['Timeperiod_ID'] df.head() """ Explanation: 1 line, that's how easy it is to load a csv file using pandas! Next, we will be processing the data to make it easier to work with. Processing Data As you can see, the column names are odd and meaningless without prior knowledge of the dataset. To help with that, here is a list describing what each column represents: | Column name | Description | | :------------------------ | :--------- | | SC_Group_ID | # ID of data group | | SC_Group_Desc | description of data group | | SC_GroupCommod_ID | # ID of commodity group | | SC_GroupCommod_Desc | description of commodity group | | SC_Geography_ID | # ID of data location | | SortOrder | weighted sort order by country? unsure | | SC_GeographyIndented_Desc | description of location | | SC_Commodity_ID | # ID of specific commodity type | | SC_Commodity_Desc | description of specific commodity type | | SC_Attribute_ID | # ID of data's attribute | | SC_Attribute_Desc | description of data's attribute | | SC_Unit_ID | # ID of unit type | | SC_Unit_Desc | description of unit type | | Year_ID | year the data represents | | SC_Frequency_ID | # ID of frequency data is collected | | SC_Frequency_Desc | description of frequency data is collected | | Timeperiod_ID | # ID of timeperiod the data represents | | Timeperiod_Desc | description of timeperiod the data represents | | Amount | amount of units describing the attribute for the given commodity type | Note that 'commodity' is asynchronous to grain for the most part. Since the data includes products such as malted barley and alcohol, which aren't grains, we will use 'commodity'. You probably realized that there are two columns describing each column (the ID and description). To make it easier to work with and shrink the width of the table, we will now delete all the 'duplicate' ID columns by using the code below. End of explanation """ df.columns = ['Group', 'Commod_Group', 'Location', 'Commod', 'Attribute', 'Unit', 'Year', 'Frequency', 'Time Period', 'Amount'] print(df.shape) df.head() """ Explanation: You can see that the table is much cleaner, now that there are no more duplicate columns. Also note that you can only run the above code once on the dataframe. Running it more times will result in an error since the columns can't be found, as they have already been deleted. Now that we have cleared the duplicate columns, lets rename the columns to be more descriptive and intuitive for our purposes. You can use the following line to manually change the column names. End of explanation """ df_us = df[df.Location.isin(['United States'])] print(df_us.shape) """ Explanation: This looks much more pleasing to look at than the initial table (in my opinion, you might think different, but at least the table is smaller). You might've noticed that there's an extra line before the table above. This is the shape of the dataframe. It consists of a tuple (# rows, # columns). There is a huge amount of data totaling to over 400,000 rows. Computers are fast enough that now they can process that amount of data relatively quickly, but we will still be extracting only the data we need. It is a good habit since the dataframe will be easier to work with after clearing out unnecessary data, as well as if working with even larger datasets in the future, say trillions of values. For the purposes of this tutorial, we will only be looking at the data for the US, so we can begin by extracting all the rows whose Location is United States. Run the following code to achieve this. End of explanation """ df_us = df_us.sort_values(by=['Group']) names = df_us['Group'].unique().tolist() names """ Explanation: You can see that now the number of rows has decreased from over 400,000 to just a little over 40,000. We saved the new extracted dataframe into a new variable df_us. This way, if we still need to find something in the original dataset, we can easily reference it without having to load the data again. There's multiple groups that the data is categorized in. Before we can look at them, let's first sort the dataframe and find a list of unique groups. End of explanation """ data = {} for i in range(0,6): data[names[i]] = df_us[df_us.Group.isin([names[i]])] """ Explanation: We now have a list of unique groups in names. The next step before we can begin to analyze our data is to filter the dataframe again. We will achieve this by creating 6 different dataframes, one for each data group. End of explanation """ data['Supply and use'].head() """ Explanation: Now that we have separated the data into their respective groups, we can begin analyzing and visualizing it. Supply and Use We will start by looking at the Supply and use data group. Let's start by looking at the corresponding data. End of explanation """ sau = data['Supply and use'] sau = sau[sau.Attribute.isin(['Beginning stocks'])] """ Explanation: We can see that even within the data group, there are many other variables that we can use to try and analyze the data. Following, we will attempt to analyze and graph the data by the annual beginning stocks for grains. Annual Grain Stocks To start, let's look at annual beginning stock of each commodity. Before we can begin looking at the data, once again, we need to filter and sort the data. In the Attribute column, you can see that there are a variety of attributes. For this section, we will only be looking at Beginning stocks. First, lets extract only the rows whose attribute is Beginning stocks. End of explanation """ sau = sau[sau.Frequency.isin(['Annual'])] """ Explanation: You will notice that in the Frequency column, there are Annual and Quarterly data values. For simplicity, we will only be looking at annual values so we will extract only the rows with Annual. End of explanation """ sau = sau.sort_values(by='Year') sau.head() """ Explanation: Now that we have the data we need, let's sort it by Year and take a look! End of explanation """ for i, group in sau.groupby('Commod_Group'): plot = group.plot(x='Year', y='Amount', title=str(i)) y = 'Beginning Stocks (' + group.Unit.iloc[0] + ')' plot.set(xlabel='Year', ylabel=y) plt.show() """ Explanation: Finally, we can start to graph and visualize the data! To begin, we will make line graphs for each commodity to view the change in the amount of beginning stock for each year. Hopefully we will find some relationship! End of explanation """ data['Prices'].head() """ Explanation: Personally, I would think that the beginning stock of grains each year would be increasing, or at least have an increasing trend, due to improvements in agricultural techniques. However, it is clear based on the graphs that something happened between 1986 and 1988. One event must have led to a massive surplus in 1986, while another event must have led to a huge (deficit?) in 1988. Through a quick search on "corn supply 1986" and "corn supply 1987", I managed to find 2 articles that explain the phenomena. http://agfax.com/2016/11/29/grain-market-reminded-of-1986-king-daddy-year-of-corn-stocks-dtn/ The article in the link above describes how there were "four consecutive years of good crop weather" which led to not only the US, but the entire world to have excess grain supplies. Only about 66% of corn stock was used, explaining the massive surplus in the time period. http://articles.latimes.com/1988-07-13/business/fi-5672_1_food-prices This article compares the staggering grain production in 1988 compared to 1987. There seems to have been a severe drought that would kill off an estimated 30%-45% of projected harvest. As a result, the surplus that built up the past years must have been depleted in an instant, over the following years of bad weather. Based on this, it seems that weather has a much greater effect on grain supply than agricultural techniques. However, this correlation can't be concluded without looking at weather data and trying to find a relation between the two datasets. We won't be looking at that today since this is a tutorial to get started in looking at grain data. Instead, we will explore more subsets of data in FeedGrains.csv, specifically prices! Prices Annual Grain Prices Just looking at one correlation of data is no fun since finding a relation is so simple. Next, let's take a look at the prices of the grains above! We will begin by looking at annual grain prices and graph the data to see if we can find a basic visual correlation. Prices won't be found in the Supply and use group, so we will be looking at the Prices data group for this part. End of explanation """ p = data['Prices'] p = p.sort_values(by='Year') p.head() """ Explanation: Similar to the Supply and use data group, Prices also has a variety of information. Once again, we will start by working to filter out unnecessary data. Let's start by sorting the data by year and see where to go from there. End of explanation """ p = p[p.Frequency.isin(['Annual'])] p.head() """ Explanation: Once again, we can see that there is detailed data for each quarter of the year or each month, but for simplicity, we will again, be looking at annual prices. This consistency with the annual grain stocks data will also allow us to easily perform a comparison. Let's now extract only the annual data... End of explanation """ p['Commod_Group'].unique().tolist() """ Explanation: You may have noticed that there is some difference between the two datasets, the Prices dataset has a Hay group, while the Supply and use dataset has Feed grains. These two seem to be the same group of data, considering that hay is used as a feed grain for animals (most people choose not to eat hay and instead, eat oats, sorghum, barley, and corn). Before we come to this conclusion, let's take a look at what groups are in the Prices datasets. End of explanation """ p = p[p.Attribute.isin(['Prices received by farmers'])] p.head() """ Explanation: Just like the Supply and use dataset, there are 5 groups, so we will be assuming that Hay corresponds to Feed grains. You probably also noticed that the commodities in Prices differs from the group, as compared to the Supply and use dataset. Although there are many different types of specific grains and farmers are paid different prices depending on the specific product being made from their produce, we will be looking at the grain groups as a whole so it will be easier to compare. Next, we see that there are newer attributes in recent years, distinguishing between farmers' pay on the high end, low end, and average. We will take only the average pay rate in order to maximize the data we can use (early years don't distinguish between high and low end). We can extract only the average prices with: End of explanation """ p = p[p.Unit != 'Dollars per cwt'] p.head() """ Explanation: After filtering some more data, we can see that there are some more issues with data consistency. Sorghum is listed under two types of units, Dollars per bushel and Dollars per cwt. We will only be looking at Dollars per bushel so next, we will remove those sets of data. The code below is slightly different from previous methods of extraction, because instead of extracting data with a certain value, we will extract data that doesn't contain a certain value, effectively removing the unwanted data. End of explanation """ for i, group in p.groupby('Commod'): if i == 'Corn' or i == 'Hay all (dry)' or i == 'Barley' or i == 'Oats' or i == 'Sorghum': plot = group.plot(x='Year', y='Amount', title=str(i)) plot.set(xlabel='Year', ylabel=group.Unit.iloc[0]) plt.show() """ Explanation: There doesn't seem to be any more data inconsistencies so we can finally begin to graph the data. We will be graphing based on Commod this time instead of Commod_Group since Commod is more consistent. Hopefully we can find some trends! End of explanation """ for i, group in p.groupby('Commod'): if i == 'Corn' or i == 'Hay all (dry)' or i == 'Barley' or i == 'Oats' or i == 'Sorghum': plot = group.plot(x='Year', y='Amount', title=str(i), xlim=(1975, 2017)) plot.set(xlabel='Year', ylabel=group.Unit.iloc[0]) plt.show() """ Explanation: We can see that there is inconsistency in the start years for the datasets, so we will set a x-limit so it will be easier to compare the datasets. End of explanation """
dolittle007/dolittle007.github.io
notebooks/GP-introduction.ipynb
gpl-3.0
%matplotlib inline import matplotlib.pyplot as plt import matplotlib.cm as cmap cm = cmap.inferno import numpy as np import scipy as sp import theano import theano.tensor as tt import theano.tensor.nlinalg import sys sys.path.insert(0, "../../..") import pymc3 as pm """ Explanation: Gaussian Process Regression Gaussian Process regression is a non-parametric approach to regression or data fitting that assumes that observed data points $y$ are generated by some unknown latent function $f(x)$. The latent function $f(x)$ is modeled as being multivariate normally distributed (a Gaussian Process), and is commonly denoted \begin{equation} f(x) \sim \mathcal{GP}(m(x;\theta), \, k(x, x';\theta)) \,. \end{equation} $m(x ; \theta)$ is the mean function, and $k(x, x' ;\theta)$ is the covariance function. In many applications, the mean function is set to $0$ because the data can still be fit well using just covariances. $\theta$ is the set of hyperparameters for either the mean or covariance function. These are the unknown variables. They are usually found by maximizing the marginal likelihood. This approach is much faster computationally than MCMC, but produces a point estimate, $\theta_{\mathrm{MAP}}$. The data in the next two examples is generated by a GP with noise that is also gaussian distributed. In sampling notation this is, \begin{equation} \begin{aligned} y & = f(x) + \epsilon \ f(x) & \sim \mathcal{GP}(0, \, k(x, x'; \theta)) \ \epsilon & \sim \mathcal{N}(0, \sigma^2) \ \sigma^2 & \sim \mathrm{Prior} \ \theta & \sim \mathrm{Prior} \,. \end{aligned} \end{equation} With Theano as a backend, PyMC3 is an excellent environment for developing fully Bayesian Gaussian Process models, particularly when a GP is component in a larger model. The GP functionality of PyMC3 is meant to be lightweight, highly composable, and have a clear syntax. This example is meant to give an introduction to how to specify a GP in PyMC3. End of explanation """ np.random.seed(20090425) n = 20 X = np.sort(3*np.random.rand(n))[:,None] with pm.Model() as model: # f(x) l_true = 0.3 s2_f_true = 1.0 cov = s2_f_true * pm.gp.cov.ExpQuad(1, l_true) # noise, epsilon s2_n_true = 0.1 K_noise = s2_n_true**2 * tt.eye(n) K = cov(X) + K_noise # evaluate the covariance with the given hyperparameters K = theano.function([], cov(X) + K_noise)() # generate fake data from GP with white noise (with variance sigma2) y = np.random.multivariate_normal(np.zeros(n), K) fig = plt.figure(figsize=(14,5)); ax = fig.add_subplot(111) ax.plot(X, y, 'ok', ms=10); ax.set_xlabel("x"); ax.set_ylabel("f(x)"); """ Explanation: Example 1: Non-Linear Regression This is an example of a non-linear fit in a situation where there isn't much data. Using optimization to find hyperparameters in this situation will greatly underestimate the amount of uncertainty if using the GP for prediction. In PyMC3 it is easy to be fully Bayesian and use MCMC methods. We generate 20 data points at random x values between 0 and 3. The true values of the hyperparameters are hardcoded in this temporary model. End of explanation """ Z = np.linspace(0,3,100)[:,None] with pm.Model() as model: # priors on the covariance function hyperparameters l = pm.Uniform('l', 0, 10) # uninformative prior on the function variance log_s2_f = pm.Uniform('log_s2_f', lower=-10, upper=5) s2_f = pm.Deterministic('s2_f', tt.exp(log_s2_f)) # uninformative prior on the noise variance log_s2_n = pm.Uniform('log_s2_n', lower=-10, upper=5) s2_n = pm.Deterministic('s2_n', tt.exp(log_s2_n)) # covariance functions for the function f and the noise f_cov = s2_f * pm.gp.cov.ExpQuad(1, l) y_obs = pm.gp.GP('y_obs', cov_func=f_cov, sigma=s2_n, observed={'X':X, 'Y':y}) with model: trace = pm.sample(2000) """ Explanation: Since there isn't much data, there will likely be a lot of uncertainty in the hyperparameter values. We assign prior distributions that are uniform in log space, suitable for variance-type parameters. Each hyperparameter must at least be constrained to be positive valued by its prior. None of the covariance function objects have a scaling coefficient built in. This is because random variables, such as s2_f, can be multiplied directly with a covariance function object, gp.cov.ExpQuad. The last line is the marginal likelihood. Since the observed data $y$ is also assumed to be multivariate normally distributed, the marginal likelihood is also multivariate normal. It is obtained by integrating out $f(x)$ from the product of the data likelihood $p(y \mid f, X)$ and the GP prior $p(f \mid X)$, \begin{equation} p(y \mid X) = \int p(y \mid f, X) p(f \mid X) df \end{equation} The call in the last line f_cov.K(X) evaluates the covariance function across the inputs X. The result is a matrix. The sum of this matrix and the diagonal noise term are used as the covariance matrix for the marginal likelihood. End of explanation """ pm.traceplot(trace[1000:], varnames=['l', 's2_f', 's2_n'], lines={"l": l_true, "s2_f": s2_f_true, "s2_n": s2_n_true}); """ Explanation: The results show that the hyperparameters were recovered pretty well, but definitely with a high degree of uncertainty. Lets look at the predicted fits and uncertainty next using samples from the full posterior. End of explanation """ with model: gp_samples = pm.gp.sample_gp(trace[1000:], y_obs, Z, samples=50, random_seed=42) fig, ax = plt.subplots(figsize=(14,5)) [ax.plot(Z, x, color=cm(0.3), alpha=0.3) for x in gp_samples] # overlay the observed data ax.plot(X, y, 'ok', ms=10); ax.set_xlabel("x"); ax.set_ylabel("f(x)"); ax.set_title("Posterior predictive distribution"); """ Explanation: The sample_gp function draws realizations of the GP from the predictive distribution. End of explanation """ np.random.seed(200) n = 150 X = np.sort(40*np.random.rand(n))[:,None] # define gp, true parameter values with pm.Model() as model: l_per_true = 2 cov_per = pm.gp.cov.Cosine(1, l_per_true) l_drift_true = 4 cov_drift = pm.gp.cov.Matern52(1, l_drift_true) s2_p_true = 0.3 s2_d_true = 1.5 s2_w_true = 0.3 periodic_cov = s2_p_true * cov_per drift_cov = s2_d_true * cov_drift signal_cov = periodic_cov + drift_cov noise_cov = s2_w_true**2 * tt.eye(n) K = theano.function([], signal_cov(X, X) + noise_cov)() y = np.random.multivariate_normal(np.zeros(n), K) """ Explanation: Example 2: A periodic signal in non-white noise This time let's pretend we have some more complex data that we would like to decompose. For the sake of example, we simulate some data points from a function that 1. has a fainter periodic component 2. has a lower frequency drift away from periodicity 3. has additive white noise As before, we generate the data using a throwaway PyMC3 model. We consider the sum of the drift term and the white noise to be "noise", while the periodic component is "signal". In GP regression, the treatment of signal and noise covariance functions is identical, so the distinction between signal and noise is somewhat arbitrary. End of explanation """ fig = plt.figure(figsize=(12,5)); ax = fig.add_subplot(111) ax.plot(X, y, '--', color=cm(0.4)) ax.plot(X, y, 'o', color="k", ms=10); ax.set_xlabel("x"); ax.set_ylabel("f(x)"); """ Explanation: In the plot of the observed data, the periodic component is barely distinguishable by eye. It is plausible that there isn't a periodic component, and the observed data is just the drift component and white noise. End of explanation """ with pm.Model() as model: # prior for periodic lengthscale, or frequency l_per = pm.Uniform('l_per', lower=1e-5, upper=10) # prior for the drift lengthscale hyperparameter l_drift = pm.Uniform('l_drift', lower=1e-5, upper=10) # uninformative prior on the periodic amplitude log_s2_p = pm.Uniform('log_s2_p', lower=-10, upper=5) s2_p = pm.Deterministic('s2_p', tt.exp(log_s2_p)) # uninformative prior on the drift amplitude log_s2_d = pm.Uniform('log_s2_d', lower=-10, upper=5) s2_d = pm.Deterministic('s2_d', tt.exp(log_s2_d)) # uninformative prior on the white noise variance log_s2_w = pm.Uniform('log_s2_w', lower=-10, upper=5) s2_w = pm.Deterministic('s2_w', tt.exp(log_s2_w)) # the periodic "signal" covariance signal_cov = s2_p * pm.gp.cov.Cosine(1, l_per) # the "noise" covariance drift_cov = s2_d * pm.gp.cov.Matern52(1, l_drift) y_obs = pm.gp.GP('y_obs', cov_func=signal_cov + drift_cov, sigma=s2_w, observed={'X':X, 'Y':y}) with model: trace = pm.sample(2000, step=pm.NUTS(integrator="two-stage"), init=None) pm.traceplot(trace[1000:], varnames=['l_per', 'l_drift', 's2_d', 's2_p', 's2_w'], lines={"l_per": l_per_true, "l_drift": l_drift_true, "s2_d": s2_d_true, "s2_p": s2_p_true, "s2_w": s2_w_true}); """ Explanation: Lets see if we can infer the correct values of the hyperparameters. End of explanation """ ax.get_ybound() fig = plt.figure(figsize=(12,6)); ax = fig.add_subplot(111) ax.hist(trace['s2_p', 1000:], 100, range=(0,4), color=cm(0.3), ec='none'); ax.plot([0.3, 0.3], [0, ax.get_ybound()[1]], "k", lw=2); ax.set_title("Histogram of s2_p"); ax.set_ylabel("Number of samples"); ax.set_xlabel("s2_p"); """ Explanation: Some large samples make the histogram of s2_p hard to read. Below is a zoomed in histogram. End of explanation """ Z = np.linspace(0, 40, 100).reshape(-1, 1) with model: gp_samples = pm.gp.sample_gp(trace[1000:], y_obs, Z, samples=50, random_seed=42, progressbar=False) fig, ax = plt.subplots(figsize=(14,5)) [ax.plot(Z, x, color=cm(0.3), alpha=0.3) for x in gp_samples] # overlay the observed data ax.plot(X, y, 'o', color="k", ms=10); ax.set_xlabel("x"); ax.set_ylabel("f(x)"); ax.set_title("Posterior predictive distribution"); """ Explanation: Comparing the histograms of the results to the true values, we can see that the PyMC3's MCMC methods did a good job estimating the true GP hyperparameters. Although the periodic component is faintly apparent in the observed data, the GP model is able to extract it with high accuracy. End of explanation """
zingale/pyreaclib
library-examples-filtering.ipynb
bsd-3-clause
%matplotlib inline import pynucastro as pyna library_file = '20180201ReaclibV2.22' mylibrary = pyna.rates.Library(library_file) """ Explanation: Using RateFilter to Search Rates in a Library The Library class in pynucastro provides a high level interface for reading files containing one or more Reaclib rates and then filtering these rates based on user-specified criteria for the nuclei involved in the reactions. We can then use the resulting rates to build a network. This example uses a Reaclib snapshot downloaded from: https://groups.nscl.msu.edu/jina/reaclib/db/library.php?action=viewsnapshots. Reading a rate snapshot The Library class will look for the library file in the working directory or in the pynucastro/library subdirectory of the pynucastro package. When the constructor is supplied a file name, pynucastro will read the contents of this file and interpret them as Reaclib rates in either the Reaclib 1 or 2 formats. The Library then stores the rates from the file as Rate objects. End of explanation """ c12_inexact_filter = pyna.rates.RateFilter(reactants=['c12'], exact=False) """ Explanation: Filtering the Library This example introduces the RateFilter class which allows us to define a set of reactants and products to search for in a Library object. Inexact Filtering Inexact filtering is like using wildcards: in the following example, the rate filter we define will match any rates in which $\mathrm{^{12}C}$ is a reactant. End of explanation """ c12_inexact_library = mylibrary.filter(c12_inexact_filter) print(c12_inexact_library) """ Explanation: Once we construct a RateFilter object, we can apply it to our Library by passing it to the Library.filter function. Library.filter returns a new Library object containing the rates that match our RateFilter. We can print a Library to see the rates it contains. In parentheses the rate identifier is printed, showing the Reaclib rate label as well as whether the rate is forward or reverse. End of explanation """ cago = c12_inexact_library.get_rate('c12 + he4 --> o16 <nac2_reaclib>') cago.plot() """ Explanation: The rate identifiers above can be used to access individual Rate objects within a Library as follows: End of explanation """ c12_exact_filter = pyna.rates.RateFilter(reactants=['c12', 'c12']) c12_exact_library = mylibrary.filter(c12_exact_filter) print(c12_exact_library) """ Explanation: Exact Filtering Exact filtering is useful when you have a specific rate in mind or a specific combination of reactants or products. In the following example, we look for all rates of the form $\mathrm{^{12}C + ^{12}C \rightarrow \ldots}$ To use exact filtering, omit the exact keyword to the RateFilter constructor, as it is turned on by default. Exact filtering does not mean all the nuclei involved in the rate must be specified, it means that all filtering options passed to the RateFilter constructor are strictly applied. In this case, the filter will return rates with exactly two reactants, both of which are $\mathrm{^{12}C}$. However, the filter places no constraint on the products or number of products in the rate. End of explanation """ alpha_library = pyna.rates.Library() capture = pyna.rates.Nucleus('he4') seed = pyna.rates.Nucleus('c12') while True: ac_filter = pyna.rates.RateFilter(reactants=[capture, seed], max_products=1) ac_library = mylibrary.filter(ac_filter) alpha_library = alpha_library + ac_library heavy = ac_library.heaviest() ac_filter_inv = pyna.rates.RateFilter(reactants=[heavy], products=[capture, seed]) ac_inv_library = mylibrary.filter(ac_filter_inv) alpha_library = alpha_library + ac_inv_library print(heavy) if heavy.A == 56: break else: seed = heavy """ Explanation: Example: Building an Alpha Capture Network In the next example, we use rate filtering to iteratively construct a Library containing the alpha capture rates linking $\mathrm{^{12}C}$ to $\mathrm{^{56}Ni}$. After finding each successive link in the alpha capture chain, we call Library.heaviest() to find the heaviest nucleus in the filtered rates. This corresponds to the nucleus with the largest mass number, and in case of a tie between isobars, this returns the isobar with the smallest atomic number. We use this feature to find the reverse rate for each alpha capture reaction. In the example below, we add each filtered library to our alpha capture library alpha_library, initialized as an empty Library. The Library class supports the addition operator by returning a new library containing the rates in the two libraries we added together. This example also introduces the max_products keyword, which specifies we are looking for reactions producing at most max_products product nuclei. Similarly, the RateFilter constructor supports the following keywords constraining the number of reactants and products: min_reactants max_reactants min_products max_products Because we have omitted the argument exact=False, the filter constraints we apply are exact. End of explanation """ print(alpha_library) """ Explanation: We will next print out the library we constructed, seeing that we have both forward and reverse rates for the alpha chain. Note that at this time pynucastro has not yet implemented nuclear partition functions, so these reverse rates are calculated only from detailed balance in the Reaclib library. End of explanation """ alpha_network = pyna.networks.PythonNetwork(libraries=alpha_library) """ Explanation: Next we can create a reaction network from our filtered alpha capture library by passing our library to a network constructor using the libraries keyword. End of explanation """ print(alpha_network.network_overview()) alpha_network.plot() """ Explanation: And finally we can print an overview of the network as well as a Z-N plot of the nuclei linked via the reactions we selected. End of explanation """
laantoi/fun-with-python
coin_games.ipynb
mit
import numpy as np import matplotlib.pyplot as plt """ Explanation: Coin games: classical and quantum In this notebook we play a set of interesting coin tossing games using coins obeying classical (games 1-2) and quantum (game 3) mechanics. Game 1: Gambler's ruin A gambler enters the casino with a bankroll of size $k$, and repeatedly plays a game where one wins 1 with probability $p$ and loses 1 with probability $1 - p$. The gambler stops playing if the bankroll reaches $0$ or the house limit $N$. We can ask questions such as <ol> <li>What is the probability that the gambler loses it all / hits the house limit / neither?</li> <li>What is the probability that the gambler loses it all in a game without a house limit?</li> <li>How many games will the gambler need to play on average to leave the casino?</li> </ol> Game 2: Questionable gamble We are presented with the following bet: On flipping 1000 fair coins, if 550 or more land on heads we win EUR 20; otherwise the bet is lost. Should we play this game for EUR 10? Game 3: Gambling in a quantum casino (flipping a Hadamard coin) We encountered a classical random walk in Game 1. What happens when the gambler is allowed to simultaneosly win and lose conditional on the outcome of a quantum coin? What happens when we gamble in a casino obeying quantum laws? We learned in Game 1 what happens in the classical casino where the gambler either wins or loses $-$ and not both at the same time $-$ in a given game, but in the quantum world things are not quite so simple. End of explanation """ u = lambda k,p,N: (((1-p)/p)**k -((1-p)/p)**N) / (1 - ((1-p)/p)**N); # Evaluating u for p < 1/2 runs into numerical problems with the large fractions. Let us regroup: u1 = lambda k,p,N: (((1-p)/p)**(k-N) -1) / (((1-p)/p)**(-N) - 1); uhalf = lambda k,N: 1 - k/N; def SimulateGame1_1(k,p,N): NGames = 50; # number of games we simulate. The higher the closer to the 'frequentist' prob. ret = []; for ktmp in k: ktmp = int(ktmp); Nruins = 0; # number of ruins we have encountered for this k for i1 in range(NGames): ktmp1 = ktmp; while (True): if (ktmp1 == 0): Nruins += 1; break; if (ktmp1 == N): break; if (np.random.uniform(0,1) <= p): ktmp1 += 1; else: ktmp1 += -1; ret.append(Nruins/NGames); # prob of ruin for this k return ret; N = 100; krange=np.linspace(0, N, num=100); plist = [0.51,0.55,0.7]; p1list = [0.3,0.45,0.49]; for p in p1list: plt.plot(krange, u1(krange,p,N), linewidth=2, label=" p = %f "%(p)); plt.plot(krange, SimulateGame1_1(krange,p,N),color='#c42d41', alpha=0.6); plt.plot(krange, uhalf(krange,N), linewidth=2, label=" p = 0.5 "); plt.plot(krange, SimulateGame1_1(krange,0.5,N),color='#c42d41', alpha=0.6, label = 'Simulated'); for p in plist: plt.plot(krange, u(krange,p,N), linewidth=2, label=" p = %f "%(p)); plt.plot(krange, SimulateGame1_1(krange,p,N),color='#c42d41', alpha=0.6); plt.legend(bbox_to_anchor=(1.04,1), loc="upper left"); plt.title('Gambler\'s ruin, house limit = %d'%(N)); plt.xlabel('Initial bankroll');plt.ylabel('Probability of ruin'); plt.show(); """ Explanation: Playing the games Game 1 Game 1 represents a Markov chain on a countable state space that follows a random walk. If we denote by the random variable $X_n$ the bankroll at toss $n$ with $X_0 = k$, then the sequence $\lbrace X_n : n \in \mathbb{N} \rbrace$ is a Markov process. The state at toss $n+1$ only depends on the state at $n$; there is no memory. Let's denote the probability that the gambler loses it all with a bankroll of $k$ by $u_k$. To obtain the asked probabilities, we can condition on the first toss using the law of total probability: $$\begin{split}u_k &= \text{P(ruin | win first)P(win first) + P(ruin | lose first)P(lose first)} \ &= u_{k + 1}p + u_{k - 1}(1 - p) \end{split} $$ This is defined for $0 < k < M$ with the boundary conditions $u_0 = 1$ and $u_N = 0$. The solution is $$ u_k = \cases{\frac{\left(\frac{1-p}{p}\right)^k - \left(\frac{1-p}{p}\right)^N}{1 - \left(\frac{1-p}{p}\right)^N}, \qquad p \neq \frac{1}{2} \ 1 - \frac{k}{N}, \,\,\,\,\qquad\qquad p = \frac{1}{2} } $$ Similarly, the probability $v_k$ that the gambler reaches the house limit starting with a bankroll of $k$ is defined by the same recurrence relation, but the boundary conditions are $v_0 = 0$ and $v_N = 1$. We find $v_k = 1 - u_k$. The probability that the gambler plays forever is zero. End of explanation """ uNinf = lambda k,p: ((1-p)/p)**k; uNinfhalf = lambda k: k**0; krange=np.linspace(0, 5000, num=1000); plist = [0.5001,0.501,0.6]; plt.plot(krange, uNinfhalf(krange), linewidth=2, label=" p =< 0.5 "); for p in plist: plt.plot(krange, uNinf(krange,p), linewidth=2, label=" p = %f "%(p)); plt.legend(loc="best"); plt.title('Gambler\'s ruin, no house limit'); plt.xlabel('Initial bankroll');plt.ylabel('Probability of ruin'); plt.show(); """ Explanation: Omitting the detailed proof, taking the limit $N \to \infty$ gives the correct result for the case without a house limit, $$ \lim_{N \to \infty} u_k = \cases{\left(\frac{1-p}{p}\right)^k, \qquad p > \frac{1}{2} \ 1, \,\,\,\,\qquad\qquad p \leq \frac{1}{2} } $$ Therefore, in a fair and unfavourably biased game the gambler always loses no matter the initial bankroll size. In a game that is favourably biased, however, there is a finite probability to never go bankcrupt, which becomes better the larger the initial bankroll and more favourable the bias. End of explanation """ E = lambda k,p,N: (N/(2*p - 1))*((1 - ((1-p)/p)**k)/(1 - ((1-p)/p)**N)) - k/(2*p - 1); # Evaluating E for p < 1/2 runs into numerical problems with the large fractions. Let us regroup: E1 = lambda k,p,N: (N/(2*p - 1))*(((1 - ((1-p)/p)**k)*((1-p)/p)**(-N))/(((1-p)/p)**(-N) - 1)) - k/(2*p - 1); Ehalf = lambda k,N: N*k - k**2; def SimulateGame1_3(k,p,N): NGames = 100; # number of games we simulate, and then average over. ret = []; for ktmp in k: ktmp = int(ktmp); playresults = []; # temp array of results that we finally average over for i1 in range(NGames): Nplays = 1; # number of games we have managed to play for this k ktmp1 = ktmp; while (True): if (ktmp1 == 0) or (ktmp1 == N): break; if (np.random.uniform(0,1) <= p): ktmp1 += 1; else: ktmp1 += -1; Nplays += 1; playresults.append(Nplays); ret.append(np.mean(playresults)); # Expected number of games for this k return ret; N = 100; krange=np.linspace(0, N, num=100); plist = [0.51,0.55,0.7]; p1list = [0.3,0.45,0.49]; for p in p1list: plt.plot(krange, E1(krange,p,N), linewidth=2, label=" p = %f "%(p)); plt.plot(krange, SimulateGame1_3(krange,p,N),color='#c42d41', alpha=0.6); plt.plot(krange, Ehalf(krange,N), linewidth=2, label=" p = 0.5 "); plt.plot(krange, SimulateGame1_3(krange,0.5,N),color='#c42d41', alpha=0.6, label = 'Simulated'); for p in plist: plt.plot(krange, E(krange,p,N), linewidth=2, label=" p = %f "%(p)); plt.plot(krange, SimulateGame1_3(krange,p,N),color='#c42d41', alpha=0.6); plt.legend(bbox_to_anchor=(1.04,1), loc="upper left"); plt.title('Gambler\'s ruin, house limit = %d'%(N)); plt.xlabel('Initial bankroll');plt.ylabel('Expected number of games'); plt.show(); """ Explanation: To find the expected number of games to be played before reaching $0$ or $N$, $E_k \equiv E(Y_k)$, where the random variable $Y_k$ is the number of games needed starting with bankroll $k$, we can again condition on the first game: $$\begin{split}E_k &= \text{E(}Y_k\text{ | win first)P(win first) + E(}Y_k\text{ | lose first)P(lose first)} \ &= (1 + E_{k + 1})p + (1 + E_{k - 1})(1 - p) \end{split} $$ with the boundary conditions $E_0 = E_N = 0$. Using the standard theory of recurrence relations, we find $$ E_k = \cases{\frac{N}{2 p - 1} \frac{1- \left(\frac{1-p}{p}\right)^k}{1 - \left(\frac{1-p}{p}\right)^N} - \frac{k}{2 p - 1}, \qquad p \neq \frac{1}{2} \ N k - k^2, \,\,\,\,\qquad\qquad p = \frac{1}{2} } $$ End of explanation """ from scipy.stats import norm Gaussian = lambda x,mu,var: np.exp(-(x - mu)**2.0 / (2.0 * var))/np.sqrt(2.0*np.pi*var); xmin = 350; xmax = 650; xrange=np.linspace(xmin, xmax, num=xmax - xmin); headprob = 0.5; Ntosses = 1000; mub = Ntosses*headprob; varb = Ntosses*headprob*(1 - headprob); # We must normalize the Gaussian: z = (550 - mub)/np.sqrt(varb); print(z); print(1-norm.cdf(z)); plt.xlim(xmin,xmax); plt.ylim(0,0.03); plt.plot(xrange, Gaussian(xrange, mub, varb),color='#c42d41',alpha=0.8,linewidth=2); plt.xlabel('Number of heads');plt.ylabel('Proportion of bets'); plt.annotate('Very rare event', xy=(550, 0.0005), xytext=(565, 0.005), arrowprops=dict(facecolor='black', shrink=0.05), ) plt.title('Game 2'); plt.show(); """ Explanation: Game 2 Let $X_N$ be the random variable denoting the number of heads in a series of $N$ coin tosses. The corresponding probability density function is the binomial distribution, $$ X_N \sim \text{Binomial}(p, N),$$ where $p = 0.5$ for a fair coin. The mean is $Np$ and the variance is $Np(1-p)$. For large $N$, certainly for $N = 1000$, the Central Limit Theorem asserts that the binomial distribution can be approximated by a Gaussian: $$ X_N \sim \text{N}[Np, Np(1-p)].$$ To answer the question of whether or not to take the bet, let us consider the expectation of its payout: $$\begin{split}E\text{(game 2)} &= E(\text{win})P(\text{win}) + E(\text{lose})P(\text{lose}) \ &= (-10 + 20)P(\text{win}) + (-10)P(\text{lose}) \ &= -10 + 20P(\text{win})\ &= -10 + 20P(X_{1000} > 550). \end{split}$$ Let us evaluate the probability numerically: End of explanation """ from scipy import sparse # Matrix representations of the operators bvecU = np.array([1,0]); # gambler basis vectors bvecL = np.array([0,1]); NBankR = 40; # House limit BankRmatdim = NBankR - 1; bvecBankR = np.arange(1,NBankR); matWin = sparse.spdiags([1]*(BankRmatdim -1),-1,BankRmatdim ,BankRmatdim ); matLose = sparse.spdiags([1]*(BankRmatdim -1),1,BankRmatdim ,BankRmatdim ); matBankRId = sparse.identity(BankRmatdim); matLL = sparse.csr_matrix(np.outer(bvecL,bvecL)); matUU = sparse.csr_matrix(np.outer(bvecU,bvecU)); matHad = sparse.csr_matrix(np.array([[1,1],[1,-1]])/np.sqrt(2)); matG = sparse.kron(matWin, matLL) + sparse.kron(matLose, matUU); matS = sparse.csr_matrix(matG.dot(sparse.kron(matBankRId, matHad))); # Definition of the initial state vecGamblerT0 = bvecL; # The initial gambler luck state, in this case fully 'lucky' vecCasinoT0 = np.eye(BankRmatdim)[:,int(BankRmatdim/2)]; # The initial bankroll state, only site int(BankRmatdim/2) = 1 stateT0 = np.kron(vecCasinoT0,vecGamblerT0); # The initial total system state (casino + gambler) # Gambling events def Propagate(T,sT0): '''Iterates for T gambles.''' if (T == 0): s = sT0; else: s = matS.dot(sT0); for i in range(T-1): s = matS.dot(s); return s; Tlist = [0,1,2,3,10,20]; i = 1; plt.figure(figsize=(15,6)); plt.subplots_adjust(hspace=0.35); for T in Tlist: state = Propagate(T,stateT0); # The resulting array has meaning only in terms of our chosen basis. # To compute the total probability of having a bankroll value k, we # must trace over the gambler's internal state. state = [np.abs(i2)**2 for i2 in state] # In Python list comprehension is preferred state_BankR = np.array(state[::2]) + np.array(state[1::2]); # Trace over the gambler's Hilbert space # state_BankR contains the probabilities for the bank roll values plt.subplot(2,3,i); i += 1; plt.xlim(0,NBankR); plt.xlabel('Bankroll'); plt.ylabel('Probability'); plt.bar(bvecBankR,state_BankR,width=1.0, color='#42aaf4', alpha=0.9, label=" T = %d "%(T)); plt.legend(loc="upper left"); plt.suptitle('Bankroll at a quantum casino after T plays and starting fully \'lucky\''); plt.show(); # Let us also plot the evolution of the expected bankroll as a function of T expWin = []; for T in range(20): state = Propagate(T,stateT0); state = [np.abs(i2)**2 for i2 in state] state_BankR = np.array(state[::2]) + np.array(state[1::2]); expWin.append(np.sum(bvecBankR*state_BankR)); plt.xlabel('Number of games'); plt.ylabel('Expected bankroll'); plt.plot(range(20),expWin); plt.show(); """ Explanation: We see that the needed number of heads, 550 out of 1000 tosses, is 3.16 standard deviations away from the mean. The probability $P(X_{1000} > 550) \approx 0.0008.$ The expectation becomes $$E\text{(game 2)} = -10 + 20\times 0.0008 \approx -9.98.$$ This game should certainly not be played. The bet can be taken when $E\text{(game 2)}>0$, for example when the game costs EUR 0.016 or less with a payout of EUR 20, or pays EUR 12500 or more for the bet price of EUR 10. Game 3 This time we are dealing with a Discrete Time Quantum Walk. There are many interesting features to study, but let us focus our attention here to calculating the expected winnings when gambling in a quantum casino, similarly in spirit to Game 1. Let us introduce our gambler named $\left|{\psi}\right\rangle$ who is in a quantum superposition of 'lucky' (L) and 'unlucky' (U), $$\left|{\psi}\right\rangle = g_1 \left|{L}\right\rangle + g_2 \left|{U}\right\rangle,$$ where $g_{1,2} \in \mathbb{C}$ such that $|g_1|^2 + |g_2|^2 = 1$. The quantum casino named $\left|{\phi}\right\rangle$, like the classical one, accommodates for all non-zero bankrolls (B) within the house limit $N$ ($\mathbb{N}_N = \lbrace 1, \ldots, N-1 \rbrace$): $$\left|{\phi}\right\rangle = \sum_{B \in \mathbb{N}_N} c_B \left|{B}\right\rangle,$$ where $c_B \in \mathbb{C}$ such that $\sum_{B \in \mathbb{N}_N} |c_B|^2 = 1$. In a relaxed casino $\mathbb{N}_N \to \mathbb{Z}$. Assuming the gambler and the casino are not entangled, the total state of the system, $\left|{\psi}\right\rangle$, is then the product state $$\left|{\psi}\right\rangle = \left|{\phi}\right\rangle \otimes \left|{\psi}\right\rangle.$$ The game operates as follows: First, the gambler flips a quantum coin that determines the luck state. For the component of the gambler in the state 'lucky' the bankroll jumps up by 1; conversely, if the gambler is in the state 'unlucky', the bankroll jumps down by 1. The gamble operator $G$ is then $$G = \sum_{B \in \mathbb{N}N} \left|{B+1}\right\rangle\left\langle{B}\right| \otimes \left|{L}\right\rangle\left\langle{L}\right| + \sum{B \in \mathbb{N}_N} \left|{B-1}\right\rangle\left\langle{B}\right| \otimes \left|{U}\right\rangle\left\langle{U}\right|. $$ The coin toss that determines the state of the gambler is taken to be the Hadamard coin, which in the basis $\left|{U}\right\rangle = (1,0)^\mathrm{T}$ and $\left|{L}\right\rangle = (0,1)^\mathrm{T}$ reads $$H = \frac{1}{\sqrt{2}}\begin{pmatrix}1 & \;\;1\ 1 & -1\\end{pmatrix}. $$ The total operator progressing the walk by one step, $S$, is then given by $$\begin{split} S &= G(\text{Id} \otimes H), \end{split} $$ where the identity operation on the bankroll space reflects the fact that the bankroll is not modified during the coin flip $H$. The state after $k$ gambles is $$S^k \ket{\Psi_0}, $$ where $\ket{\Psi_0}$ is the initial state. In our chosen basis $\left|{U}\right\rangle\left\langle{U}\right| = \begin{pmatrix}1 & 0\ 0 & 0\\end{pmatrix}$ and $\left|{L}\right\rangle\left\langle{L}\right| = \begin{pmatrix}0 & 0\ 0 & 1\\end{pmatrix}$. Let us adopt the basis $\left|{B}\right\rangle = (0, \ldots, 1, \ldots, 0)^\mathrm{T}$ where the '1' occurs at position $B \in \mathbb{N}_N = \lbrace 1, \ldots, N-1 \rbrace$, and rest of the $N - 1$ entries are zero. Then $$ \begin{split} \sum_{B \in \mathbb{N}N} \left|{B-1}\right\rangle\left\langle{B}\right| &= \begin{pmatrix}0 & 1 & & & \ & 0 & 1& & \ & & \ddots & & \ & & & 0 & 1\ & & & & 0 \end{pmatrix}, \ & \ \sum{B \in \mathbb{N}_N} \left|{B+1}\right\rangle\left\langle{B}\right| &= \begin{pmatrix}0 & & & & \ 1 & 0 & & & \ & & \ddots & & \ & & 1 & 0 & \ & & & 1 & 0 \end{pmatrix}, \end{split} $$ where the matrices are $(N - 1) \times (N - 1)$ in size. End of explanation """
thonstad/acoustical_monitoring
notebooks/Synchronization.ipynb
bsd-3-clause
# the cross-correlation function in statsmodels does not use FFT so it is really slow # from statsmodels.tsa.stattools import ccf # res = ccf(ts1[1][200000:400000,1],ts2[1][200000:400000,1]) """ Explanation: Let's try to find the lag of asynchrony by looking at the cross-correlation. End of explanation """ # Warning - the envelope heights could be different for different cameras if there one is buffered # Envelope based on the hilbert transform fails: import scipy.signal.signaltools as sigtool env = np.abs(sigtool.hilbert(ts1[1][:,1])) #plt.plot(env) """ Explanation: Cross-correlation on the signals is a bad idea! Two many oscillations. Instead, we should get the envelope of the signal and cross-correlate. End of explanation """ plt.plot(np.abs(ts1[1][:,1])) plt.title('Absolute value of the signal') # Another unsuccessful way of getting the envelope: # hilb = sigtool.hilbert(ts1[1][:,1]) # env = (ts1[1][:,1] ** 2 + hilb ** 2) ** 0.5 # plt.plot(env) # Creating a Butterworth filter b, a = signal.butter(4, 7./48000, 'low') # filtering # output_signal = signal.filtfilt(b, a, 2*ts1[1][:,1]*ts1[1][:,1]) output_signal1 = signal.filtfilt(b, a, np.abs(ts1[1][:,1])) output_signal2 = signal.filtfilt(b, a, np.abs(ts2[1][:,1])) plt.plot(np.sqrt(output_signal1[700000:1080000])) plt.plot(np.sqrt(output_signal2[700000:1080000])) plt.title('Zooming on the displacement') """ Explanation: We will try to get the envelopse by taking absolute values and then passing a low-pass filter. End of explanation """ c = signal.fftconvolve(output_signal1[800000:1080000],output_signal2[800000:1080000][::-1], mode='full') plt.plot(c[::-1]) print(c.shape[0]) print(c.argmax()) offset = c.shape[0] - c.argmax()- 280000 plt.plot(280000+offset,c[c.argmax()],'ro') plt.title('Offset is ' + str (280000+offset)) plt.plot(np.sqrt(output_signal1[800000:1080000])) plt.plot(np.sqrt(output_signal2[800000+offset:(1080000+offset)])) plt.title('Aligned envelopes') """ Explanation: Calculate cross-correlation with FFT: End of explanation """ plt.plot(np.sqrt(output_signal1[np.abs(offset):])) plt.plot(np.sqrt(output_signal2[:])) """ Explanation: On a large scale the envelopes seem aligned: End of explanation """ plt.plot(ts1[1][700000:1200000,1]) plt.plot(ts2[1][700000+offset:1200000+offset,1]) plt.title('Aligned signals') """ Explanation: Offset the original data: End of explanation """
tpin3694/tpin3694.github.io
machine-learning/adaboost_classifier.ipynb
mit
# Load libraries from sklearn.ensemble import AdaBoostClassifier from sklearn import datasets """ Explanation: Title: Adaboost Classifier Slug: adaboost_classifier Summary: How to conduct adaboost classifier and boosting in scikit-learn for machine learning in Python. Date: 2017-09-18 12:00 Category: Machine Learning Tags: Trees And Forests Authors: Chris Albon <a alt="Adaboost" href="https://machinelearningflashcards.com"> <img src="adaboost_classifier/AdaBoost_print.png" class="flashcard center-block"> </a> Preliminaries End of explanation """ # Load data iris = datasets.load_iris() X = iris.data y = iris.target """ Explanation: Load Iris Flower Dataset End of explanation """ # Create adaboost-decision tree classifer object clf = AdaBoostClassifier(n_estimators=50, learning_rate=1, random_state=0) """ Explanation: Create Adaboost Classifier The most important parameters are base_estimator, n_estimators, and learning_rate. base_estimator is the learning algorithm to use to train the weak models. This will almost always not needed to be changed because by far the most common learner to use with AdaBoost is a decision tree -- this parameter's default argument. n_estimators is the number of models to iteratively train. learning_rate is the contribution of each model to the weights and defaults to 1. Reducing the learning rate will mean the weights will be increased or decreased to a small degree, forcing the model train slower (but sometimes resulting in better performance scores). loss is exclusive to AdaBoostRegressor and sets the loss function to use when updating weights. This defaults to a linear loss function however can be changed to square or exponential. End of explanation """ # Train model model = clf.fit(X, y) """ Explanation: Train Adaboost Classifer End of explanation """
sysid/nbs
lstm/LTSM_BasicStockMarket.ipynb
mit
dpath = 'data/basic/' #path_to_dataset = dpath + 'household_power_consumption.txt' %mkdir -p dpath !wget -P $dpath https://raw.githubusercontent.com/jaungiers/LSTM-Neural-Network-for-Time-Series-Prediction/master/sinwave.csv !wget -P $dpath https://raw.githubusercontent.com/jaungiers/LSTM-Neural-Network-for-Time-Series-Prediction/master/sp500.csv import matplotlib.pyplot as plt %matplotlib inline import time import warnings import numpy as np from numpy import newaxis from keras.layers.core import Dense, Activation, Dropout from keras.layers.recurrent import LSTM from keras.models import Sequential warnings.filterwarnings("ignore") import pandas as pd def load_ts(filename): df = pd.read_csv(filename, header=None) data = df[0].tolist() return data #filename = 'sinwave.csv' filename = 'sp500.csv' print('> Loading data...: ', dpath+filename) #X_train, y_train, X_test, y_test = load_data(dpath+'sp500.csv', seq_len, True) ts = load_ts(dpath + filename) ts[:10] plt.plot(ts) seq_len = 50 def load_data(ts, seq_len, normalise_window): sequence_length = seq_len + 1 result = [] # create gliding window for index in range(len(ts) - sequence_length): result.append(ts[index: index + sequence_length]) if normalise_window: result = normalise_windows(result) result = np.array(result) print("Data shape: ", result.shape) print(result[:4, :]) row = round(0.9 * result.shape[0]) train = result[:row, :] test = result[row:, :] print("Test shape: ", test.shape) #np.random.shuffle(train) x_train = train[:, :-1] y_train = train[:, -1] x_test = test[:, :-1] print("xtest shape: ", x_test.shape) y_test = test[:, -1] x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) return [x_train, y_train, x_test, y_test] def normalise_windows(window_data): normalised_data = [] for window in window_data: normalised_window = [((float(p) / float(window[0])) - 1) for p in window] normalised_data.append(normalised_window) return normalised_data print('> Loading data...') #X_train, y_train, X_test, y_test = load_data(ts, seq_len, False) X_train, y_train, X_test, y_test = load_data(ts, seq_len, True) X_train.shape, y_train.shape, X_test.shape, y_test.shape X_train[0,:seq_len, 0] X_train[1,:seq_len, 0] y_train[:5] #plt.plot(X_train[0,:,0]) #plt.plot(y_train) fig = plt.figure(facecolor='white') ax = fig.add_subplot(211) ax.plot(X_train[0, :, 0], label='X') ax.legend() ax = fig.add_subplot(212) ax.plot(y_train, label='y') ax.legend() def build_model(layers): model = Sequential() model.add(LSTM( input_dim=layers[0], output_dim=layers[1], return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM( layers[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense( output_dim=layers[3])) model.add(Activation("linear")) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("Compilation Time : ", time.time() - start) return model print('> Data Loaded. Compiling...') model = build_model([1, 50, 100, 1]) model.summary() start_time = time.time() epochs = 50 model.fit( X_train, y_train, batch_size=512, nb_epoch=epochs, validation_split=0.05) print("Training time: ", time.time() - start_time) def plot_results(predicted_data, true_data, figsize=(12,6)): fig = plt.figure(facecolor='white', figsize=figsize) ax = fig.add_subplot(111) ax.plot(true_data, label='True Data') plt.plot(predicted_data, label='Prediction') plt.legend() plt.show() """ Explanation: LTSM Basic http://www.jakob-aungiers.com/articles/a/LSTM-Neural-Network-for-Time-Series-Prediction End of explanation """ def predict_point_by_point(model, data): #Predict each timestep given the last sequence of true data, in effect only predicting 1 step ahead each time predicted = model.predict(data) predicted = np.reshape(predicted, (predicted.size,)) return predicted start_time = time.time() predicted = predict_point_by_point(model, X_test) predicted[:5] print("Prediction time: ", time.time() - start_time) plot_results(predicted, y_test) """ Explanation: If you’re observant you’ll have noticed in our load_data() function above we split the data in to train/test sets as is standard practice for machine learning problems. However what we need to watch out for here is what we actually want to achieve in the prediction of the time series. If we were to use the test set as it is, we would be running each window full of the true data to predict the next time step. This is fine if we are only looking to predict one time step ahead, however if we’re looking to predict more than one time step ahead, maybe looking to predict any emergent trends or functions (e.g. the sin function in this case) using the full test set would mean we would be predicting the next time step but then disregarding that prediction when it comes to subsequent time steps and using only the true data for each time step. s You can see below the graph of using this approach to predict only one time step ahead at each step in time: End of explanation """ def predict_sequence_full(model, data, window_size): #Shift the window by 1 new prediction each time, re-run predictions on new window curr_frame = data[0] predicted = [] # loop over entire testdata for i in range(len(data)): predicted.append(model.predict(curr_frame[newaxis,:,:])[0,0]) #get element from shape(1,1) curr_frame = curr_frame[1:] #move window curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0) #fill frame with prediction return predicted start_time = time.time() predicted = predict_sequence_full(model, X_test, seq_len) predicted[:5] print("Prediction time: ", time.time() - start_time) plot_results(predicted, y_test) """ Explanation: If however we want to do real magic and predict many time steps ahead we only use the first window from the testing data as an initiation window. At each time step we then pop the oldest entry out of the rear of the window and append the prediction for the next time step to the front of the window, in essence shifting the window along so it slowly builds itself with predictions, until the window is full of only predicted values (in our case, as our window is of size 50 this would occur after 50 time steps). We then keep this up indefinitely, predicting the next time step on the predictions of the previous future time steps, to hopefully see an emerging trend. End of explanation """ start_time = time.time() predicted = predict_point_by_point(model, X_test) predicted[:5] print("Prediction time: ", time.time() - start_time) plot_results(predicted, y_test, figsize=(20,10)) """ Explanation: Overlaid with the true data we can see that with just 1 epoch and a reasonably small training set of data the LSTM has already done a pretty damn good job of predicting the sin function. You can see that as we predict more and more into the future the error margin increases as errors in the prior predictions are amplified more and more when they are used for future predictions. As such we see that the LSTM hasn’t got the frequency quite right and it drifts the more we try to predict it. However as the sin function is a very easy oscillating function with zero noise it can predict it to a good degree. A NOT-SO-SIMPLE STOCK MARKET We predicted a several hundred time steps of a sin wave on an accurate point-by-point basis. So we can now just do the same on a stock market time series and make a shit load of money right? Well, no. A stock time series is unfortunately not a function that can be mapped. It can best described more as a random walk, which makes the whole prediction thing considerably harder. But what about the LSTM identifying any underlying hidden trends? Well, let’s take a look. Here is a CSV file where I have taken the adjusted daily closing price of the S&P 500 equity index from January 2000 – August 2016. I’ve stripped out everything to make it in the exact same format as our sin wave data and we will now run it through the same model we used on the sin wave with the same train/test split. There is one slight change we need to make to our data however, because a sin wave is already a nicely normalized repeating pattern it works well running the raw data points through the network. However running the adjusted returns of a stock index through a network would make the optimization process shit itself and not converge to any sort of optimums for such large numbers. So to combat this we will take each n-sized window of training/testing data and normalize each one to reflect percentage changes from the start of that window (so the data at point i=0 will always be 0). We’ll use the following equations to normalise and subsequently de-normalise at the end of the prediction process to get a real world number out of the prediction: n = normalised list [window] of price changes p = raw list [window] of adjusted daily return prices End of explanation """ start_time = time.time() predicted = predict_sequence_full(model, X_test, seq_len) predicted[:5] print("Prediction time: ", time.time() - start_time) plot_results(predicted, y_test, figsize=(20,10)) def plot_results_multiple(predicted_data, true_data, prediction_len, figsize=(12,6)): fig = plt.figure(facecolor='white', figsize=figsize) ax = fig.add_subplot(111) ax.plot(true_data, label='True Data') #Pad the list of predictions to shift it in the graph to it's correct start for i, data in enumerate(predicted_data): padding = [None for p in range(i * prediction_len)] plt.plot(padding + data, label='Prediction'+str(i)) plt.legend() plt.show() def predict_sequences_multiple(model, data, window_size, prediction_len): #Predict sequence of 50 steps before shifting prediction run forward by 50 steps prediction_seqs = [] for i in range(len(data)//prediction_len): curr_frame = data[i*prediction_len] predicted = [] for j in range(prediction_len): predicted.append(model.predict(curr_frame[newaxis,:,:])[0,0]) curr_frame = curr_frame[1:] curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0) prediction_seqs.append(predicted) return prediction_seqs start_time = time.time() predictions = predict_sequences_multiple(model, X_test, seq_len, 50) #predicted = predict_sequence_full(model, X_test, seq_len) #predicted = predict_point_by_point(model, X_test) print("Prediction time: ", time.time() - start_time) plot_results_multiple(predictions, y_test, 50, figsize=(20,10)) """ Explanation: Running the data on a single point-by-point prediction as mentioned above gives something that matches the returns pretty closely. But this is deceptive! Why? Well if you look more closely, the prediction line is made up of singular prediction points that have had the whole prior true history window behind them. Because of that, the network doesn’t need to know much about the time series itself other than that each next point most likely won’t be too far from the last point. So even if it gets the prediction for the point wrong, the next prediction will then factor in the true history and disregard the incorrect prediction, yet again allowing for an error to be made. We can’t see what is happening in the brain of the LSTM, but I would make a strong case that for this prediction of what is essentially a random walk (and as a matter of point, I have made a completely random walk of data that mimics the look of a stock index, and the exact same thing holds true there as well!) is “predicting” the next point with essentially a Gaussian distribution, allowing the essentially random prediction to not stray too wildly from the true data. So what would we look at if we wanted to see whether there truly was some underlying pattern discernable in just the price movements? Well we would do the same as for the sin wave problem and let the network predict a sequence of points rather than just the next one. Doing that we can now see that unlike the sin wave which carried on as a sin wave sequence that was almost identical to the true data, our stock data predictions converge very quickly into some sort of equilibrium. End of explanation """
rsignell-usgs/notebook
ROMS/sandy_sgrid.ipynb
mit
from netCDF4 import Dataset url = ('http://geoport.whoi.edu/thredds/dodsC/clay/usgs/users/' 'jcwarner/Projects/Sandy/triple_nest/00_dir_NYB05.ncml') nc = Dataset(url) """ Explanation: pysgrid only works with raw netCDF4 (for now!) End of explanation """ import pysgrid # The object creation is a little bit slow. Can we defer some of the loading/computations? sgrid = pysgrid.from_nc_dataset(nc) sgrid # We need a better __repr__ and __str__ !!! """ Explanation: The sgrid object End of explanation """ sgrid.edge1_coordinates, sgrid.edge1_dimensions, sgrid.edge1_padding u_var = sgrid.u u_var.center_axis, u_var.node_axis v_var = sgrid.v v_var.center_axis, v_var.node_axis """ Explanation: The object knows about sgrid conventions End of explanation """ u_var.center_slicing v_var.center_slicing """ Explanation: Being generic is nice! This is an improvement up on my first design ;-) ... End of explanation """ u_velocity = nc.variables[u_var.variable] v_velocity = nc.variables[v_var.variable] """ Explanation: (Don't be scared, you do not need the sgrid object to get the variables. This just shows that there is a one-to-one mapping from the sgrid object to the netCDF4 object.) End of explanation """ from datetime import datetime, timedelta from netCDF4 import date2index t_var = nc.variables['ocean_time'] start = datetime(2012, 10, 30, 0, 0) time_idx = date2index(start, t_var, select='nearest') v_idx = 0 # Slice of the slice! u_data = u_velocity[time_idx, v_idx, u_var.center_slicing[-2], u_var.center_slicing[-1]] v_data = v_velocity[time_idx, v_idx, v_var.center_slicing[-2], v_var.center_slicing[-1]] """ Explanation: ... but we need a better way to deal with the slice of the slice! End of explanation """ angle = sgrid.angle angles = nc.variables[angle.variable][angle.center_slicing] """ Explanation: Some thing for the angle information End of explanation """ from pysgrid.processing_2d import avg_to_cell_center u_avg = avg_to_cell_center(u_data, u_var.center_axis) v_avg = avg_to_cell_center(v_data, v_var.center_axis) """ Explanation: Average velocity vectors to cell centers End of explanation """ from pysgrid.processing_2d import rotate_vectors u_rot, v_rot = rotate_vectors(u_avg, v_avg, angles) """ Explanation: Rotate vectors by angles End of explanation """ from pysgrid.processing_2d import vector_sum uv_vector_sum = vector_sum(u_rot, v_rot) """ Explanation: Speed End of explanation """ grid_cell_centers = sgrid.centers # Array of lon, lat pairs. lon_var_name, lat_var_name = sgrid.face_coordinates sg_lon = getattr(sgrid, lon_var_name) sg_lat = getattr(sgrid, lat_var_name) lon_data = grid_cell_centers[..., 0][sg_lon.center_slicing] lat_data = grid_cell_centers[..., 1][sg_lat.center_slicing] """ Explanation: Lon, lat of the center grid (This is kind of clunky... or maybe I just do not get the sgrid concept beyond the ROMS world.) End of explanation """ %matplotlib inline import numpy as np import matplotlib.pyplot as plt import cartopy.crs as ccrs from cartopy.io import shapereader from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER def make_map(projection=ccrs.PlateCarree(), figsize=(9, 9)): fig, ax = plt.subplots(figsize=figsize, subplot_kw=dict(projection=projection)) gl = ax.gridlines(draw_labels=True) gl.xlabels_top = gl.ylabels_right = False gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER return fig, ax sub = 5 scale = 0.06 fig, ax = make_map() kw = dict(scale=1.0/scale, pivot='middle', width=0.003, color='black') q = plt.quiver(lon_data[::sub, ::sub], lat_data[::sub, ::sub], u_rot[::sub, ::sub], v_rot[::sub, ::sub], zorder=2, **kw) cs = plt.pcolormesh(lon_data[::sub, ::sub], lat_data[::sub, ::sub], uv_vector_sum[::sub, ::sub], zorder=1, cmap=plt.cm.rainbow) _ = ax.coastlines('10m') """ Explanation: Plotting End of explanation """
philmui/datascience
lecture06.stats/lecture06.eu.data.ipynb
mit
import pandas as pd df = pd.DataFrame() df """ Explanation: Data Import, Merge, Wrangle We will be using the real dataset for extra-EU trade percentages for a few different years to illutrate the real-world usage of data import, cleanse, merge and wrangle. End of explanation """ for chunk in pd.read_csv('data/ext_lt_invcur.tsv', sep='\t', chunksize=100): df = pd.concat([df, chunk]) df.head(3) """ Explanation: Concatenating datasets Let's incrementally read in the EU trade dataset End of explanation """ df = pd.DataFrame() # let's figure out how to split out the 1st column's values for chunk in pd.read_csv('data/ext_lt_invcur.tsv', sep='\t', chunksize=100): data_rows = [row for row in chunk.ix[:,0].str.split(',')] data_cols = chunk.columns[0].split(',') print(data_rows[:2], data_cols) break; df = pd.concat([df, chunk]) """ Explanation: Transforming a column Let's preprocess this dataset to split out the 1st column into more manageable pieces. End of explanation """ df = pd.DataFrame() for chunk in pd.read_csv('data/ext_lt_invcur.tsv', sep='\t', chunksize=100): data_rows = [row for row in chunk.ix[:,0].str.split(',')] # create a column split list generator data_cols = [col.split('\\')[0] for col in chunk.columns[0].split(',')] print(data_rows[:2], data_cols) break; df = pd.concat([df, chunk]) """ Explanation: Looks like we are getting the right value split. However, the last column name split looks odd. End of explanation """ df = pd.DataFrame() for chunk in pd.read_csv('data/ext_lt_invcur.tsv', sep='\t', chunksize=100): data_rows = [row for row in chunk.ix[:,0].str.split(',')] # create a column split list generator data_cols = [col.split('\\')[0] for col in chunk.columns[0].split(',')] clean_df = pd.DataFrame(data_rows, columns=data_cols) # now we can concat by "column" which means axis=1 new_df = pd.concat([clean_df, chunk], axis=1) print(new_df) break; df = pd.concat([df, chunk]) """ Explanation: The last column name is now called "geo" -- simple and elegant. Now, we need to merge the list of lists to create the cleaned dataframe: End of explanation """ df = pd.DataFrame() for chunk in pd.read_csv('data/ext_lt_invcur.tsv', sep='\t', chunksize=100): data_rows = [row for row in chunk.ix[:,0].str.split(',')] # create a column split list generator data_cols = [col.split('\\')[0] for col in chunk.columns[0].split(',')] clean_df = pd.DataFrame(data_rows, columns=data_cols) # now we can concat by "column" which means axis=1 new_df = pd.concat([clean_df, chunk.drop(chunk.columns[0], axis=1)], axis=1) print(new_df) break; df = pd.concat([df, chunk]) """ Explanation: We notice that we still have our own column[0] around -- let's drop it to clean up the DF. End of explanation """ df = pd.DataFrame() for chunk in pd.read_csv('data/ext_lt_invcur.tsv', sep='\t', chunksize=100): data_rows = [row for row in chunk.ix[:,0].str.split(',')] # create a column split list generator data_cols = [col.split('\\')[0] for col in chunk.columns[0].split(',')] clean_df = pd.DataFrame(data_rows, columns=data_cols) # now we can concat by "column" which means axis=1 new_df = pd.concat([clean_df, chunk.drop(chunk.columns[0], axis=1)], axis=1) df = pd.concat([df, new_df]) df.head(3) """ Explanation: This looks a lot cleaner! We are finally ready to run this for all input data: End of explanation """ df.shape df.describe(include='all') df.loc[df['sitc06']=='TOTAL'] """ Explanation: We have concat data both horizontally and vertically, and able to clean up the column name as well as transforming a messy column by splitting down its components. End of explanation """
ubcgif/gpgLabs
notebooks/mag/Mag_Induced2D.ipynb
mit
import numpy as np from geoscilabs.mag import Mag, Simulator %matplotlib inline """ Explanation: This is the <a href="https://jupyter.org/">Jupyter Notebook</a>, an interactive coding and computation environment. For this lab, you do not have to write any code, you will only be running it. To use the notebook: - "Shift + Enter" runs the code within the cell (so does the forward arrow button near the top of the document) - You can alter variables and re-run cells - If you want to start with a clean slate, restart the Kernel either by going to the top, clicking on Kernel: Restart, or by "esc + 00" (if you do this, you will need to re-run the following block of code before running any other cells in the notebook) This notebook uses code adapted from SimPEG - Cockett, R., S. Kang, L.J. Heagy, A. Pidlisecky, D.W. Oldenburg (2015, in review), SimPEG: An open source framework for simulation and gradient based parameter estimation in geophysical applications. Computers and Geosciences End of explanation """ #Input parameters fileName = 'https://github.com/geoscixyz/geosci-labs/raw/main/assets/mag/data/DO27_TMI.dat' xyzd = np.genfromtxt(fileName, skip_header=3) B = np.r_[60308, 83.8, 25.4] survey, dobj = Mag.createMagSurvey(xyzd, B) # View the data and chose a profile param = Simulator.ViewMagSurvey2D(survey, dobj) display(param) # Define the parametric model interactively model = Simulator.ViewPrism(param.result) display(model) """ Explanation: How do we define direction of an earth magnetic field? Earth magnetic field is a vector. To define a vector we need to choose a coordinate system. We use right-handed system: - X (Easting), - Y (Northing), and - Z (Up). Here we consider an earth magnetic field ($\vec{B_0}$), of which intensity is one. To define this unit vector, we use inclinatino and declination: - Declination: An angle from geographic North (Ng) (positive clockwise) - Inclination: Vertical angle from the N-E plane (positive down) <img src="https://github.com/geoscixyz/geosci-labs/raw/main/images/mag/earthfield.png?raw=true" style="width: 60%; height: 60%"> </img> What's data: total field anomaly We consider a typical form of magnetic data. To illustrate this we consider an suceptible object embedded in the earth. Based upon the earth magnetic field ($\vec{B}_0$), this object will generate anomalous magnetic field ($\vec{B}_A$). We define an unit vector $\hat{B}_0$ for the earth field as $$ \hat{B}_0 = \frac{\vec{B}_0}{|\vec{B}_0|}$$ We measure both earth and anomalous magnetic field such that $$ \vec{B} = \vec{B}_0 + \vec{B}_A$$ Total field anomaly, $\triangle \vec{B}$ can be defined as $$ |\triangle \vec{B}| = |\vec{B}|-|\vec{B}_E| $$ If $|\vec{B}|\ll|\vec{B}_E|$, then that is total field anomaly $\triangle \vec{B}$ is the projection of the anomalous field onto the direction of the earth field: $$ |\triangle \vec{B}| \simeq \vec{B}_A \cdot \hat{B}_0=|\vec{B}_A|cos\theta$$ <img src="https://github.com/geoscixyz/geosci-labs/raw/main/images/mag/totalfieldanomaly.png?raw=true" style="width: 50%; height: 50%"> Define a 3D prism Our model is a rectangular prism. Parameters to define this prism are given below: dx: length in Easting (x) direction (meter) dy: length in Northing (y) direction (meter) dz: length in Depth (z) direction (meter) below the receiver depth: top boundary of the prism (meter) pinc: inclination of the prism (reference is a unit northing vector; degree) pdec: declination of the prism (reference is a unit northing vector; degree) You can also change the height of the survey grid above the ground - rx_h: height of the grid (meter) Green dots show a plane where we measure data. End of explanation """ plotwidget = Simulator.PFSimulator(model, param) display(plotwidget) """ Explanation: Magnetic applet Based on the prism that you made above, below Magnetic applet computes magnetic field at receiver locations, and provide both 2D map (left) and profile line (right). For the prism, you can alter: - sus: susceptibility of the prism Parameters for the earth field are: - Einc: inclination of the earth field (degree) - Edec: declination of the earth field (degree) - Bigrf: intensity of the earth field (nT) For data, you can view: - tf: total field anomaly, - bx :x-component, - by :y-component, - bz :z-component You can simulate and view remanent magnetization effect with parameters: - irt: "induced", "remanent", or "total" - Q: Koenigsberger ratio ($\frac{M_{rem}}{M_{ind}}$) - rinc: inclination of the remanent magnetization (degree) - rdec: declination of the remanent magnetization (degree) End of explanation """
massimo-nocentini/on-python
vigenere/vigenere-cryptoanalysis.ipynb
mit
import itertools from itertools import * from copy import copy, deepcopy from heapq import * from random import * import matplotlib.pyplot as plt from collections import Counter from sympy import * init_printing() %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 10.0) """ Explanation: Vigenere cipher cryptoanalysis This notebook studies the cryptoanalysis of the Vigenere cipher, which is polyalphabetic, namely two occurrences $a_{1}, a_{2}$ of character $a$ belonging to the plaintext are associated with occurrences $c_{1}, c_{2}$ in the ciphertext, such that $c_{1}\neq c_{2}$ with very high probability. Our implementation closely follows a class lecture given by Prof. Orazio Puglisi within Cryptography course at the University of Florence. In order to fully understand the last part, where the concept of mutual coincidence index is crucial, we rest on the explanation at pag. 20 of this notes. End of explanation """ def make_alphabet_entry(i): alpha = i + ord('a') return chr(alpha),i A = dict(map(make_alphabet_entry, range(26))) A.update({' ':26}) inverse_A = {v:k for k,v in A.items()} A """ Explanation: alphabet Let $\mathcal{A}$ be our alphabet composed of standard English characters plus the space character. It will be sufficient to encode simple and (not so quite) short messages. To define it as a group $\frac{\mathbb{Z}}{n\mathbb{Z}}$ in the Python language we use a dict object, which can be reversed because it is a bijection. End of explanation """ def encode(s, alphabet=A): return list(map(lambda i: alphabet[i], s)) def decode(e, inverse_alphabet=inverse_A): return "".join(map(lambda i: inverse_alphabet[i], e)) """ Explanation: string encoding and the plaintext We define a function encode that consumes a string and produces a list of integer in our field. In parallel, function decode goes backwards, it consumes a list of integers and return a string. Such functions are useful in order to use the cipher and analyze it using str objects instead of the coded version using lists of integers. End of explanation """ def clean_text(text): remove_chars = [',', '.', ';', ':', '-', '(', ')', "'", '"'] for rc in remove_chars: text = text.replace(rc, '') text = text.replace('\n', ' ') return "".join(filter(lambda c: not c.isdigit(), text)) with open('rest_plain_text.txt', 'r') as f: plain_text = clean_text(f.read().lower()) encoded_plain_text = encode(plain_text) chunk = 500 "{}...".format(plain_text[:chunk]) """ Explanation: The following plaintext is a prose taken from here; before using it we have to swipe out punctuation marks: End of explanation """ assert decode(encode(plain_text)) == plain_text """ Explanation: With the following assert we ensure that function decode is the inverse of function encode: End of explanation """ def generate_random_key(given_key=None, required_length=None, max_length=None, alphabet=A): if given_key is not None: return given_key if required_length is None and max_length is None: max_length = len(alphabet) # the minimum length of the key is 3 to build interesting cases length = required_length if required_length else randint(3, max_length) key = [0] * length # -1 in the following max limit because it is inclusive in the sense of `randint`. for i in range(length): key[i] = randint(0, len(alphabet)-1) return key #key = encode("ericsmullyan") secret_key = generate_random_key(required_length=17) """ Explanation: key Let $\textbf{k}=(k_{0},\ldots,k_{m-1}) \in \left(\frac{\mathbb{Z}}{n\mathbb{Z}}\right)^{m}$ be a key of length $m\in\mathbb{N}$. In order to have a meaningful analysis we build a function generate_random_key which generate a random key of random length, keeping it safe...we will uncover it only at the end to check our work: End of explanation """ def encrypt(message, key, alphabet=A): n = len(alphabet) return [(p+v)%n for p,v in zip(message, cycle(key))] def decrypt(cipher, key, alphabet=A): n = len(alphabet) return [(c-v)%n for c,v in zip(cipher, cycle(key))] """ Explanation: encryption and the ciphertext Now we are in the position to define encrypt and decrypt functions, both of them consumes an encoded message, namely a list of integers, and a key. The encryption work by repeating the key as long as necessary to match the same length of the input $\textbf{x}=(x_{0},\ldots,x_{h})$, where $h > m-1$ otherwise the cipher is a OneTimePad which is unbreakable, $$(\underbrace{x_{0},\ldots,x_{m-1}}{k{0},\ldots,k_{m-1}} \underbrace{x_{m},\ldots,x_{m-1}}{k{0},\ldots,k_{m-1}} \ldots \underbrace{x_{lm},\ldots,x_{h}}{k{0},\ldots,k_{h \mod m}}) = (y_{0},\ldots,y_{h}) = \textbf{y}$$ truncating the very right block to match plaintext suffix. At last, the ciphertext $\textbf{y}$ is obtained by addition modulo $n$ of corresponding symbols, where $n$ is the length of the alphabet. Decryption follows the same scheme, using modular subtraction. End of explanation """ cipher_text = encrypt(encoded_plain_text, secret_key) '{}...'.format(decode(cipher_text)[:chunk]) """ Explanation: the following is the ciphertext produced by the application of function encrypt to the plaintext: End of explanation """ assert decode(decrypt(cipher_text, secret_key)) == plain_text """ Explanation: Assuming to know the secret_key, we ensure that function decrypt is the inverse of function encrypt: End of explanation """ def frequencies(lst, alphabet=A, inverse_alphabet=inverse_A): """ Produces a `dict` counting occcurrences of each object of the alphabet within the iterable. `frequencies` consumes an iterable $lst$ and an alphabet $A$, produces a dictionary of entries $(k,v)$ where $k$ is a character in $A$ and $v$ is the number of occurrences of $k$ in $lst$ """ counter = Counter(lst) return {k:counter[v] for k,v in alphabet.items()} # Counter handles the case of missing key returning 0 def length_from_frequencies(freqs): """ Returns the length of the original sequence by summation of symbols frequencies. """ return sum(freqs.values()) def coincidence_index(freqs, alphabet=A): """ Produces the I_{c} relative to frequencies matched against an alphabet. """ denom = length_from_frequencies(freqs) if denom in range(2): return None def mapper(a): v = freqs[a] if a in freqs else 0 return v*(v-1) return sum(map(mapper, alphabet.keys()))/(denom*(denom-1)) def draw_frequencies_histogram(seq, alphabet=A, y_maxlimit=None, normed=None): #plaintext_length = len(plain_text) #freqs = [plaintext_frequencies[inverse_A[ia]] for ia in sorted(inverse_A.keys())] n, bins, patches = plt.hist(seq, len(alphabet),facecolor='green', alpha=0.5) plt.xlabel('alphabet symbols') plt.ylabel('frequencies' + (', normed respect: {}'.format(str(normed)) if normed else '')) #plt.xticks(range(-1, len(alphabet)), sorted(alphabet.keys())) if y_maxlimit: plt.ylim([0, y_maxlimit]) plt.grid(True) plt.show() return None """ Explanation: coincidence index Let $I_{c}$ be the coincidence index of a sequence $\alpha$, over an alphabet $A$, defined as $$ I_{c}(\alpha) = \sum_{i=0}^{n}{\frac{f_{a_{i}}^{(\alpha)}(f_{a_{i}}^{(\alpha)}-1)}{m(m-1)}} $$ where $n$ is the length of the alphabet, $m$ is the length of $\alpha$ and $f_{a_{i}}^{^{(\alpha)}}$ is the frequency of symbol $a_{i}\in A$, namely the number of occurrences of $a_{i}$, in $\alpha$. In other words, $I_{c}$ is the probability to sample two occurrences of the same symbol $a$, forall $a\in A$, from the sequence $\alpha$. Index $I_{c}$ is invariant to shifting by the same constant $v$. Let $\alpha,\beta$ be two sequences of integers of length $l$ such that $\beta_{i} \equiv_{n} \alpha_{i} + v$, forall $i\in\lbrace0,\ldots,l\rbrace$; moreover, let $q_{\gamma_{i}}^{(\gamma)}$ be the probability to sample two occurrences of $\gamma_{i}$ from a sequence $\gamma$, we can state the relations $$ I_{c}(\beta) = \sum_{a_{i}\in\frac{\mathbb{Z}}{n\mathbb{Z}}}{q_{a_{i}}^{(\beta)}} = \sum_{a_{i}\in\frac{\mathbb{Z}}{n\mathbb{Z}}}{q_{a_{i}-v}^{(\alpha)}} = \sum_{\hat{a}{i}\in\frac{\mathbb{Z}}{n\mathbb{Z}}}{q{\hat{a}{i}}^{(\alpha)}} = I{c}(\alpha) $$ where $\hat{a}{i}\equiv{n}a_{i}-v$, proving the invariance of $I_{c}$ when a sequence is produced by another one shifted by a constant $v\in\frac{\mathbb{Z}}{n\mathbb{Z}}$. End of explanation """ plaintext_frequencies = frequencies(encoded_plain_text) draw_frequencies_histogram(encoded_plain_text, y_maxlimit=1800) plaintext_frequencies """ Explanation: frequencies comparison The following are frequencies of alphabet symbols in the plaintext: in the analysis they are used as a dict produced by function frequencies, moreover it is possible to draw an histogram with the relative function. End of explanation """ draw_frequencies_histogram(cipher_text, y_maxlimit=1800) """ Explanation: The following histogram shows frequencies in the ciphertext: using the same y_maxlimit value, we see that they are spread "uniformly" over symbols. End of explanation """ print("coincidence index of *plaintext*: {}\ncoincidence index of *ciphertext*: {}".format( coincidence_index(plaintext_frequencies), coincidence_index(frequencies(cipher_text)))) """ Explanation: A first approach to encryption finishes computing the coincidence indexes of both plaintext and ciphertext: End of explanation """ def spread(message, block_length): return [message[i:i+block_length] for i in range(0, len(message), block_length)] def col(spreaded, c, join=False, joiner=lambda c: ''.join(decode(c))): column = [lst[c] if c < len(lst) else None for lst in spreaded] ready = list(filter(lambda i: i is not None, column)) return joiner(ready) if join else ready def decode_spreaded(spreaded, join_as_str=False): decoded_spread = list(map(decode, spreaded)) return '\n'.join(decoded_spread) if join_as_str else decoded_spread def analyze(cipher_text, max_key_length=None): res = {} # we discard the case where the key length equals the # length of the cipher text, since it is the case of # OneTimePad cipher, which is unbreakable! for d in range(2, len(cipher_text) if max_key_length is None else max_key_length + 1): spreaded = spread(cipher_text, d) res[d] = [] for c in range(d): ci = coincidence_index(frequencies(col(spreaded, c))) if ci: res[d].append(ci) return res def guess_key_length(analysis, threshold=0.06): candidates = {} for k,v in analysis.items(): cs = list(filter(lambda i: i > threshold, v)) if cs and len(cs) > ceiling(k/2): candidates[k] = cs return candidates """ Explanation: finding the key length by spreading The following set of functions allows us to probe the key length by repeatedly fixing a candidate length $l$, then spreading the ciphertext in a matrix with $l$ columns, then computing $I_{c}$ of each column and, finally, report $l$ if the majority of $I_{c}$ scores is greater than a threshold (.06 for English). End of explanation """ analysis = analyze(cipher_text, max_key_length=20) guess = guess_key_length(analysis) guess probing_key_length = 17 """ Explanation: here are $I_{c}$ scores witnesses for the candidate key length after spreading the ciphertext by columns: End of explanation """ spreaded = decode_spreaded(spread(cipher_text, probing_key_length)) print("{}\n...\n{}".format("\n".join(spreaded[:10]), "\n".join(spreaded[-3:]))) """ Explanation: The following is the ciphertext spread over a matrix with a number of column equals the candidate length found in the previous cell. End of explanation """ def mutual_coincidence_index(fst_freqs, snd_freqs, offset=0, alphabet=A, inverse_alphabet=inverse_A): fst_len = length_from_frequencies(fst_freqs) snd_len = length_from_frequencies(snd_freqs) n = len(alphabet) return sum(fst_freqs[k] * snd_freqs[inverse_alphabet[(v+offset) % n]] for k,v in alphabet.items())/(fst_len * snd_len) """ Explanation: mutual coincidence index Once the length $m$ of key $\textbf{k}$ has been "established", we're left with finding actual key symbols $k_{i}$, for $i\in{0,\ldots,m-1}$. In order to fullfil this step, we need another object, which resembles the index of mutual coincidence $I_{c}$ but it is more general, in the sense that sampling occurs on two given sequences instead of the same one. Formally, let $I_{mc}(\alpha, \beta)$ be the index of mutual coincidence of sequences $\alpha$ and $\beta$, defined as $$ I_{mc}(\alpha,\beta) = \sum_{a_{i}\in\frac{\mathbb{Z}}{n\mathbb{Z}}}{q_{a_{i}}^{(\alpha)}q_{a_{i}}^{(\beta)}} $$ where $q_{a_{i}}^{(\nu)}$ is the probability to draw an occurrence of symbol $a_{i}$ in sequence $\nu$. Let $\eta$ and $\gamma$ be two sequences of length $l$, produced by adding shifts $v_{\alpha}$ and $v_{\beta}$ to sequences $\alpha$ and $\beta$, respectively (formally, $\eta_{i} \equiv_{n} \alpha_{i} + v_{\alpha}$ and $\gamma_{i} \equiv_{n} \beta_{i} + v_{\beta}$, forall $i\in\lbrace0,\ldots,l-1\rbrace$). The inequality $$ I_{mc}(\eta,\gamma) = \sum_{a_{i}\in\frac{\mathbb{Z}}{n\mathbb{Z}}}{q_{a_{i}}^{(\eta)}q_{a_{i}}^{(\gamma)}} = \sum_{a_{i}\in\frac{\mathbb{Z}}{n\mathbb{Z}}}{q_{a_{i}-v_{\alpha}}^{(\alpha)}q_{a_{i}-v_{\beta}}^{(\beta)}} = \sum_{a_{i}\in\frac{\mathbb{Z}}{n\mathbb{Z}}}{q_{a_{i}}^{(\alpha)}q_{a_{i}+v_{\alpha}-v_{\beta}}^{(\beta)}} \neq \sum_{a_{i}\in\frac{\mathbb{Z}}{n\mathbb{Z}}}{q_{a_{i}}^{(\alpha)}q_{a_{i}}^{(\beta)}} = I_{mc}(\alpha,\beta) $$ holds unless factor $v_{\alpha}-v_{\beta}$ in each subscript. We can define an even more general version of $I_{mc}$ as $$ I_{mc}(\eta,\gamma,g) = \sum_{a_{i}\in\frac{\mathbb{Z}}{n\mathbb{Z}}}{q_{a_{i}-g}^{(\eta)}q_{a_{i}}^{(\gamma)}} = \sum_{a_{i}\in\frac{\mathbb{Z}}{n\mathbb{Z}}}{q_{a_{i}}^{(\alpha)}q_{a_{i}+v_{\alpha}-v_{\beta}+g}^{(\beta)}} $$ where sequence $\eta$ is shifted back by $g\in\mathbb{N}$. Therefore we can state the equality with the usual definition according to $$ I_{mc}(\eta,\gamma,g) = I_{mc}(\alpha,\beta) \leftrightarrow v_{\beta}-v_{\alpha}=g $$ proving the invariance of $I_{mc}$ when two sequences are produced by shifting two other ones by constant values $q,w$ respectively, providing that $g = w-q$. End of explanation """ def build_offsets_eqs(cipher_text, key_length, indexed_sym, threshold=.06, alphabet=A): n = len(alphabet) eqs = {c:{} for c in range(key_length)} spreaded = spread(cipher_text, key_length) for c,a in itertools.product(range(key_length), repeat=2): if a == c: continue eqs[c][a]=[] for g in range(1,n): column_freqs = frequencies(col(spreaded, c)) another_freqs = frequencies(col(spreaded, a)) mci = mutual_coincidence_index(column_freqs, another_freqs, g) if mci > threshold: eqs[c][a].append(tuple([Eq(indexed_sym[a]-indexed_sym[c],g,evaluate=True), mci])) return eqs k_sym=IndexedBase('k') eqs_dict = build_offsets_eqs(cipher_text, key_length=probing_key_length, indexed_sym=k_sym) """ Explanation: Previous generalization allows us to set the basis to break the cipher. It works as follows: for each pair $(\eta,\gamma)$ of different columns in the spread-matrix form of the ciphertext, fix a $g\in\frac{\mathbb{Z}}{n\mathbb{Z}}$ to compute $I_{mc}(\eta,\gamma,g)$: if such index value is close to $I_{c}(\nu)$, where $\nu$ is a structured sequence using the English language, namely close to $0.065$, then collect equation $k_{i(\beta)}-k_{i(\alpha)}=g$, where function $i(\nu)$ return the zero-based index of column $\nu$ in the matrix spread. Such equations are important since state difference relations over symbols of the key. End of explanation """ eqs_dict[0] """ Explanation: The following cell report the set of difference equation collected respect the first column of the spread matrix: we observe that such set fails to instantiate $k_{6}$ because no likely equation gives a relation for it. On the contrary, it can be the case that collecting equations for a pair of columns with indexes $(c,a)$ yields one or more equations, namely ${k_{a}-k_{c}=g_{u_{0}},\ldots,k_{a}-k_{c}=g_{u_{v}}}$, for some $v\in\mathbb{N}$: to properly explore all keys space, we have to consider the product of each list of "equally-likely" equations. End of explanation """ def explode_key_space(eqs_dict): res = {} for c, eq_dict in eqs_dict.items(): eqs_list = [] for a, eqs in eq_dict.items(): if not eqs: res[c] = [] # no equations in `eqs` causes the product to be empty as well break eqs_list.append([[a] + list(eq_pair) for eq_pair in eqs]) else: # if no empty eqs was found then it is meaningful to cross product them res[c] = list(itertools.product(*eqs_list)) return res eqs_dict_pure = explode_key_space(eqs_dict) eqs_dict_pure print(eqs_dict_pure) """ Explanation: The following function implements the last observation, namely it produces the complete keys space where we've to look for the good one. End of explanation """ def candidate_keys(eqs_dict, indexed_sym, alphabet=A): key_length = len(eqs_dict) n = len(alphabet) candidates=set() for c, eqs_tuples in eqs_dict.items(): for d in range(len(alphabet)): for eq_tuple in eqs_tuples: key = [indexed_sym[i] for i in range(key_length)] key[c] = d for a, eq, mci in eq_tuple: subs_eq = eq.subs(indexed_sym[c],d) key[a] = solve(subs_eq, indexed_sym[a])[0] key[a] = key[a] % n for k in key: if isinstance(k, Indexed): break else: candidates.add(tuple(key)) # `tuple` application to make `key` hashable return candidates possible_keys = candidate_keys(eqs_dict_pure, k_sym) """ Explanation: In order to instantiate candidates keys, for each index column $c$ we use the set of equations ${k_{a}-k_{c}=g_{u}}$, for some $u\in\mathbb{N}$ and $a\in{0,\ldots,m-1}\setminus{c}$ as follows: for each equation $k_{a}-k_{c}=g_{u}$, instantiate $k_{c} = s$, for $s\in\frac{\mathbb{Z}}{n\mathbb{Z}}$, and solve it respect $k_{a}$. Therefore for each column index $c$ we have a candidate key if each symbol $k_{i}$ has been instantiated. End of explanation """ def arbitrary_frequencies(length, filename='rest_plain_text.txt'): with open(filename, 'r') as f: text = clean_text(f.read().lower()) text = text[:length] return frequencies(encode(text)) def attempt_keys(candidate_keys, cipher_text, threshold=.06, arbitrary_freqs=None): sols = set() for key in candidate_keys: decrypted = decrypt(cipher_text, key) freqs = frequencies(decrypted) if arbitrary_freqs: good = True for k,v in arbitrary_freqs.items(): if 1-abs((freqs[k]-v)/v) < .6: good = False if good: sols.add(decode(key)) else: ci = coincidence_index(freqs) if ci > threshold: sols.add(decode(key))#sols.add((ci, decode(key))) return sols sols = attempt_keys(possible_keys, cipher_text, arbitrary_freqs=arbitrary_frequencies(len(cipher_text))) len(sols), sols for sol in sols: key = sol print("key:({})\nplaintext:\n{}\n\n".format( key, decode(decrypt(cipher_text, encode(key)))[:chunk])) decode(secret_key) """ Explanation: The last step to filter out mistaken keys, we use each candidate key to perform a decryption, checking it against frequencies of an arbitrary English prose: End of explanation """
umutkarahan/Project100
sample/Tuples.ipynb
bsd-2-clause
mancoloji = "Barış Manço", "Mançoloji", 1999 print(mancoloji) """ Explanation: Tuples Kullanımı Tuples'lar değişmeyen sıralardır. Bir kere tanımlandığı zaman değiştirmenin imkanı yoktur. Listelere göre tubles farklı tipteki elemanlara sahip olabilir. Örneğin; End of explanation """ benbilirim = ("Barış Manço", "Ben bilirim",1993) """ Explanation: Tuples'lar parantez kullanılarak da tanımlanır End of explanation """ print(benbilirim[0]) """ Explanation: Tuples içindeki elemanlara [ ] ile ulaşmak mümkün. Örneğin, End of explanation """ #benbilirim[0]="Erol Evgin" """ Explanation: Eğer tubles içindeki bir elemanı tekrar tanımlamak istersek hata alırız. End of explanation """ benbilirim = "Erol Evgin", benbilirim[1] , benbilirim[2] print(benbilirim) """ Explanation: Eğer tuples'in içeriği değiştirilmek istenirse, onun çözümü tublesi yeniden tanımlamaktır. End of explanation """ artist, album, year = benbilirim print(artist) print(year) print(album) """ Explanation: Eğer tuples'in içindeki değerleri çıkarıp değişken olarak atamak istersek, End of explanation """ mancoloji = "Barış Manço","Mançoloji","1999",( (1,"Dağlar Dağlar"),(2,"Gülpembe"),(3,"İşte hendek işte deve")) artist, album, year, tracks = mancoloji print(artist) print(year) print(album) print(tracks) for song in tracks: track, title = song print("\t Track Number: {}, Title: {}".format(track, title)) """ Explanation: Tuples içine tuples tanımlanması mümkün. Örneğin End of explanation """ mancoloji = "Barış Manço","Mançoloji","1999",([ (1,"Dağlar Dağlar"),(2,"Gülpembe"),(3,"İşte hendek işte deve")]) mancoloji[3].append((4,"Sarı Çizmeli Mehmet Ağa")) mancoloji[3].append((5,"Kara sevda")) artist, album, year, tracks = mancoloji for song in tracks: track, title = song print("\t Track Number: {}, Title: {}".format(track, title)) """ Explanation: Eğer tuple bir list içeriğiyorsa list değiştirilebilir. Örneğin; End of explanation """
decisionstats/pythonfordatascience
Web+Scraping.ipynb
apache-2.0
r = urllib.request.urlopen('https://www.rottentomatoes.com/franchise/batman_movies').read() #Using Beautiful Soup Library to parse the data soup = BeautifulSoup(r, "lxml") type(soup) len(str(soup.prettify())) soup soup.prettify() #We convert the data to a string format using str. #Note in R we use str for structure, but in Python we use str to convert to charachter ( like as.charachter or paste command would do in R) a=str(soup.prettify()) a[1000:20000] # We try and find location of a particular tag we are interested in. #Note we are using triple quotes to escape scpecial charachters a.find('''class="snippet"''') '''to find a particular class, open the page using chrome, select the particular subpart of page and click inspect''' """ Explanation: Data Pulling End of explanation """ a.find('''class="title"''') a[33075:33200] titles = soup.find_all("div", class_="title") titles titles[1] titlesnew=soup.find_all("div",class_="media franchiseItem") titlesnew titlesnew[0] len(titlesnew) titlesnew[0].a type(titlesnew) dir(titlesnew) titlesnew[0].strong titlesnew[0].strong.a titlesnew[0].strong.a.text titlesnew[0].span titlesnew[0].span.text first_rtcore = titlesnew[0].find('span', class_ = 'meter-value') first_rtcore.text len(titlesnew) years = soup.find_all("span", class_="subtle") years """ Explanation: to find a particular class, open the page using chrome, select the particular subpart of page and click inspect Name of Movie End of explanation """ a[79900:80000] a.find('''class="scoreRow"''') a[97600:97900] b= soup.find('span', {'class' : 'meter-value'}) print(b) years = soup.find_all("span", class_="meter-value") years name=[] rating=[] for i in range(1,11): name=titlesnew[i].strong.a.text rating=titlesnew[i].find('span', class_ = 'meter-value').text print(name,rating) """ Explanation: Ratings from Rotten Tomatoes End of explanation """
catalyst-cooperative/pudl
test/validate/notebooks/validate_bf_eia923.ipynb
mit
%load_ext autoreload %autoreload 2 import sys import pandas as pd import sqlalchemy as sa import pudl import warnings import logging logger = logging.getLogger() logger.setLevel(logging.INFO) handler = logging.StreamHandler(stream=sys.stdout) formatter = logging.Formatter('%(message)s') handler.setFormatter(formatter) logger.handlers = [handler] import matplotlib.pyplot as plt import matplotlib as mpl %matplotlib inline plt.style.use('ggplot') mpl.rcParams['figure.figsize'] = (10,4) mpl.rcParams['figure.dpi'] = 150 pd.options.display.max_columns = 56 pudl_settings = pudl.workspace.setup.get_defaults() ferc1_engine = sa.create_engine(pudl_settings['ferc1_db']) pudl_engine = sa.create_engine(pudl_settings['pudl_db']) pudl_settings """ Explanation: Validation of bf_eia923 This notebook runs sanity checks on the Boiler Fuel data that are reported in EIA Form 923. These are the same tests which are run by the bf_eia923 validation tests by PyTest. The notebook and visualizations are meant to be used as a diagnostic tool, to help understand what's wrong when the PyTest based data validations fail for some reason. End of explanation """ pudl_out_orig = pudl.output.pudltabl.PudlTabl(pudl_engine, freq=None) bf_eia923_orig = pudl_out_orig.bf_eia923() """ Explanation: Get the original EIA 923 data First we pull the original (post-ETL) EIA 923 data out of the database. We will use the values in this dataset as a baseline for checking that latter aggregated data and derived values remain valid. We will also eyeball these values here to make sure they are within the expected range. This may take a minute or two depending on the speed of your machine. End of explanation """ bf_eia923_orig.sample(10) """ Explanation: Validation Against Fixed Bounds Some of the variables reported in this table have a fixed range of reasonable values, like the heat content per unit of a given fuel type. These varaibles can be tested for validity against external standards directly. In general we have two kinds of tests in this section: * Tails: are the exteme values too extreme? Typically, this is at the 5% and 95% level, but depending on the distribution, sometimes other thresholds are used. * Middle: Is the central value of the distribution where it should be? Fields that need checking: These are all contained in the frc_eia923 table data validations, and those should just be re-used if possible. Ugh, names not all the same though. Annoying. * ash_content_pct (BIT, SUB, LIG, coal) * fuel_mmbtu_per_unit (BIT, SUB, LIG, coal, DFO, oil, gas) * sulfur_content_pct (BIT, SUB, LIG, coal) End of explanation """ pudl.validate.plot_vs_bounds(bf_eia923_orig, pudl.validate.bf_eia923_coal_heat_content) """ Explanation: Coal Heat Content End of explanation """ pudl.validate.plot_vs_bounds(bf_eia923_orig, pudl.validate.bf_eia923_oil_heat_content) """ Explanation: Oil Heat Content End of explanation """ pudl.validate.plot_vs_bounds(bf_eia923_orig, pudl.validate.bf_eia923_gas_heat_content) """ Explanation: Gas Heat Content End of explanation """ pudl.validate.plot_vs_bounds(bf_eia923_orig, pudl.validate.bf_eia923_coal_ash_content) """ Explanation: Coal Ash Content (%) End of explanation """ pudl.validate.plot_vs_bounds(bf_eia923_orig, pudl.validate.bf_eia923_coal_sulfur_content) """ Explanation: Coal Sulfur Content (%) End of explanation """ pudl.validate.plot_vs_self(bf_eia923_orig, pudl.validate.bf_eia923_self) """ Explanation: Validating Historical Distributions As a sanity check of the testing process itself, we can check to see whether the entire historical distribution has attributes that place it within the extremes of a historical subsampling of the distribution. In this case, we sample each historical year, and look at the range of values taken on by some quantile, and see whether the same quantile for the whole of the dataset fits within that range End of explanation """ pudl_out_month = pudl.output.pudltabl.PudlTabl(pudl_engine, freq="MS") bf_eia923_month = pudl_out_month.bf_eia923() pudl.validate.plot_vs_agg(bf_eia923_orig, bf_eia923_month, pudl.validate.bf_eia923_agg) """ Explanation: Validate Monthly Aggregation It's possible that the distribution will change as a function of aggregation, or we might make an error in the aggregation process. These tests check that a collection of quantiles for the original and the data aggregated by month have internally consistent values. End of explanation """ pudl_out_year = pudl.output.pudltabl.PudlTabl(pudl_engine, freq="AS") bf_eia923_year = pudl_out_year.bf_eia923() pudl.validate.plot_vs_agg(bf_eia923_orig, bf_eia923_year, pudl.validate.bf_eia923_agg) """ Explanation: Validate Annual Aggregation It's possible that the distribution will change as a function of aggregation, or we might make an error in the aggregation process. These tests check that a collection of quantiles for the original and the data aggregated by year have internally consistent values. End of explanation """
tylere/docker-tmpnb-ee
notebooks/2 - Earth Engine API Examples/2 - EE 101.ipynb
apache-2.0
from IPython.display import Image """ Explanation: Earth Engine 101 This workbook is an introdution to Earth Engine analysis in an IPython Notebook, using the Python API. The content is similar to what is covered in the Introduction to the Earth Engine API workshop using the Earth Engine Javascript "Playground". Let's get started by importing a few moduled used in this tutorial. End of explanation """ print("Hello, world!") """ Explanation: Hello, World To get used to using IPython Notebooks, let's print some simple output back to the notebook. Click on the box below, and then press the play (run) button from the toolbar above. End of explanation """ string = "Hello, world!" print(string) """ Explanation: That works, but we can also first store the content in a variable, and then print out the variable. End of explanation """ import ee ee.Initialize() """ Explanation: Hello, Images Let's work with something more interesting... a dataset provided by Earth Engine. Assuming that this server has been setup with access to the Earth Engine Python API, we should be able to import and initialise the Earth Engine Python module (named 'ee'). If the module loads successfully, nothing will be returned when you run the following code. End of explanation """ Image('http://www.google.com/earth/outreach/images/tutorials_eeintro_05_data_catalog.png') """ Explanation: Next, let's locate a dataset to display. Start by going to the Earth Engine Public Data Catalog (https://earthengine.google.org/#index). End of explanation """ srtm = ee.Image("CGIAR/SRTM90_V4") """ Explanation: Type in the term SRTM in the search box, click the search button, and then select the dataset SRTM Digital Elevation Data Version 4 from the list of results. This will bring up a data description page for the SRTM Digital Elevation Data 30m dataset. The data description page provide a short description of the dataset and links to the data provider, but the key piece of information that we need for working with the dataset in Earth Engine is the Image ID, which for this dataset is CGIAR/SRTM90_V4. Let's use the Image ID to store a reference to this image dataset: End of explanation """ info = srtm.getInfo() print(info) """ Explanation: And now, we can print out information about the dataset, using the .getInfo() method. End of explanation """ from IPython.display import Image Image(url=srtm.getThumbUrl()) """ Explanation: What is returned by the .getInfo() command is a Python dictionary. If needed, we could parse out this information and make use of it in our analysis. Add an Image to the Map IPython Notebooks can be used to display an image, using the Image module: End of explanation """ Image(url=srtm.getThumbUrl({'min':0, 'max':3000})) """ Explanation: Ok, we can see the outlines of the continents, but there is not a lot of contrast between different elevation areas. So let's improve upon that, but adding some visualization parameters. End of explanation """ point = ee.Geometry.Point(-122.0918, 37.422) region_bay_area = point.buffer(50000).bounds().getInfo()['coordinates'] Image(url=srtm.getThumbUrl({'min':0, 'max':1000, 'region':region_bay_area})) """ Explanation: By default, the .getThumbUrl() method returns the entire extent of the image, which in this case is global. We can also specify a region, to show a smaller area. End of explanation """ # Create a reference to the image collection l8 = ee.ImageCollection('LANDSAT/LC8_L1T_TOA') # Filter the collection down to a two week period filtered = l8.filterDate('2013-05-01', '2013-05-15'); # Use the mosaic reducer, to select the most recent pixel in areas of overlap l8_image = filtered.mosaic() # Define a region roughly covering California point = ee.Geometry.Point(-118, 37) region_california = point.buffer(500000).bounds().getInfo()['coordinates'] # And finally display the image. Image(url=l8_image.getThumbUrl({'region':region_california})) """ Explanation: Load and Filter an Image Collection So far we have been working with a single image, but there are also interesting datasets that are distributed as a series of images (such as images collected by satellite). Head back to the Earth Engine Public Data Catalog, search for landsat 8 toa, and load up the data description page for the USGS Landsat 8 TOA Reflectance (Orthorectified) dataset. The ID for this Image Collection is LANDSAT/LC8_L1T_TOA. End of explanation """ Image(url=l8_image.getThumbUrl({ 'region':region_california, 'bands':'B4,B3,B2', 'min':0, 'max':0.3 })) """ Explanation: Playing with Image Bands Using the default image visualization parameters, that doesn't look like much. So we add some visualization data, to display a true color image. End of explanation """ Image(url=l8_image.getThumbUrl({ 'region':region_california, 'bands':'B5,B4,B3', 'min':0, 'max':0.3 })) """ Explanation: And by changing the bands displayed, we can also display a false color image. End of explanation """ filtered = l8.filterDate('2013-01-01', '2014-01-01') """ Explanation: Play with Reducing Image Collections Next expand the date range to cover an entire year, so that there are many overlapping images. We will continue to use the .mosaic() reducer, which retains the last (most recent) pixels in areas of image overlap. Clouds are readily apparent. End of explanation """ l8_image = filtered.mosaic() Image(url=l8_image.getThumbUrl({ 'region':region_california, 'bands':'B4,B3,B2', 'min':0, 'max':0.3 })) """ Explanation: ImageCollection.mosaic Reducer End of explanation """ l8_image = filtered.median() Image(url=l8_image.getThumbUrl({ 'region':region_california, 'bands':'B4,B3,B2', 'min':0, 'max':0.3 })) """ Explanation: ImageCollection.median Reducer End of explanation """ l8_image = filtered.min() Image(url=l8_image.getThumbUrl({ 'region':region_california, 'bands':'B4,B3,B2', 'min':0, 'max':0.3 })) """ Explanation: ImageCollection.min Reducer End of explanation """ l8_image = filtered.max() Image(url=l8_image.getThumbUrl({ 'region':region_california, 'bands':'B4,B3,B2', 'min':0, 'max':0.3 })) """ Explanation: ImageCollection.max Reducer End of explanation """
sotirisnik/dqn
dqn.ipynb
mit
import gym import time import random import numpy as np from collections import deque import os os.environ['TF_CPP_MIN_LOG_LEVEL']='2' """ Explanation: Import essential libraries End of explanation """ import tensorflow as tf import keras.backend.tensorflow_backend as KTF def get_session(gpu_fraction=0.3): num_threads = os.environ.get('OMP_NUM_THREADS') gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction) if num_threads: return tf.Session(config=tf.ConfigProto( gpu_options=gpu_options, intra_op_parallelism_threads=num_threads)) else: return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) def run_on_cpu(): config = tf.ConfigProto( device_count = {'GPU': 0} ) return tf.Session(config=config) KTF.set_session(get_session()) """ Explanation: We declare two function get_session and run_on_cpu. The first one uses cuda and we should allocate one fraction of memory otherwise it will allocate almost all available gpu. The second one is for running the operations in cpu only. End of explanation """ env = gym.make( 'BreakoutDeterministic-v4') """ Explanation: Load Breakout Atari Game - BreakoutDeterministic-v4 use the skip frame that DeepMind used End of explanation """ import cv2 import matplotlib.pyplot as plt %matplotlib inline def to_grayscale( observation ): r, g, b = observation[:,:,0], observation[:,:,1], observation[:,:,2] ret = 0.299 * r + 0.587 * g + 0.114 * b return ( np.array( ret, dtype = np.uint8 ) ) def preprocess_observation( observation ): res = cv2.resize( observation, (84,110) ) crop = res[18:110-8:,:,:] grayscale = to_grayscale( crop ) return ( grayscale ) tmp = preprocess_observation( env.reset() ) plt.imshow( tmp, cmap='gray' ) plt.show() """ Explanation: Preprocess observation <p>1) Resize image</p> <p>2) Crop center image - do not keep score </p> <p>3) Grayscale image - reduce the rgb space to grayscale, save space </p> End of explanation """ from keras.models import Sequential from keras.layers import Dense, Flatten, Activation, Lambda from keras.layers.convolutional import Conv2D from keras.optimizers import RMSprop, Adam from keras import initializers # Build model model = Sequential() init_distr = initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None) #kernel_initializer=init_distr #32 filters of kernel(3,3), stride=4, input shape must be in format row, col, channels #init='uniform', model.add( Lambda(lambda x: x / 255.0, dtype='float32', input_shape=(84,84,4)) ) model.add( Conv2D(32, (8,8), strides=(4,4), padding='same' ) ) model.add( Activation( 'relu' ) ) model.add(Conv2D(64, (4,4), strides=(2,2), padding='same' ) ) model.add( Activation( 'relu' ) ) model.add(Conv2D(64, (3,3), strides=(1,1), padding='same' ) ) model.add( Activation( 'relu' ) ) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(256, activation='relu')) model.add(Dense(128, activation='relu')) model.add( Dense( env.action_space.n, activation='linear' ) ) #model.compile(RMSprop(), 'MSE') #model.compile(loss='mse', optimizer='adam', metrics=['accuracy']) learning_rate = 1e-6#025 model.compile(loss='mse', optimizer=Adam(lr=learning_rate), metrics=['accuracy'] ) model.summary() model.summary() """ Explanation: Build model Our model consists of: 1 normalization layer - divide images by 255 which is the maximum value in grayscale images 3 convolutional layers - relu activation function 3 fully connected layers - relu activation function 1 output layer - linear activation function End of explanation """ #build fixed from keras.models import model_from_json json_string = model.to_json() fixed_model = model_from_json(json_string)#does not load weights fixed_model.compile(loss='mse', optimizer=Adam(lr=learning_rate), metrics=['accuracy'] ) import time gamma = 0.99 alpha = 1#0.999999#00025 max_reward = 0.0 epoch = 0 start_episode = 1 epsilon = 1 epsilon_min = 0.1 exploration_steps = 1000000 #amount that we will reduce from epsilon each time we pick an action using our policy epsilon_discount = ( epsilon - epsilon_min ) / exploration_steps MAX_SIZE = 40000#capacity of deque MIN_MIN_SIZE = 20000#min size for replay in order to start replay which trains our model D = deque( maxlen=MAX_SIZE )#stores states that are used for during training frames = 0#keeps total frames so far def load_deque(): global D pkl_file = open( 'mydeque.pkl', 'rb') D = pickle.load( pkl_file ) pkl_file.close() def save_deque(): output = open( 'mydeque.pkl', 'wb' ) pickle.dump( D, output ) output.close() def load_dqn_model(): global model, fixed_model from keras.models import model_from_json # load json and create model json_file = open('model_background.json', 'r') loaded_model_json = json_file.read() json_file.close() model = model_from_json(loaded_model_json) # load weights into new model model.load_weights("model_background.h5") print("Loaded model from disk") #model.compile(loss='mse', optimizer='adam', metrics=['accuracy']) model.compile(loss='mse', optimizer=Adam(lr=learning_rate), metrics=['accuracy'] ) # load json and create model json_file = open('model_background.json', 'r') loaded_model_json = json_file.read() json_file.close() fixed_model = model_from_json(loaded_model_json) # load weights into new model fixed_model.load_weights("model_background.h5") print("Loaded model from disk - loaded to fixed") #model.compile(loss='mse', optimizer='adam', metrics=['accuracy']) fixed_model.compile(loss='mse', optimizer=Adam(lr=learning_rate), metrics=['accuracy'] ) import pandas as pd import pickle episodes = [] rewards = [] epsilons = [] total_frames = [] def save_train(): global episodes, rewards, epsilons, total_frames #save [episodes, rewards, epsilons ] to csv file d = {'episode': episodes, 'reward': rewards, 'epsilon': epsilons, 'total_frames': total_frames} df = pd.DataFrame(data=d, index=None) if not os.path.isfile('filename.csv'): df.to_csv('filename.csv',header ='column_names', index=None) else: # else it exists so append without writing the header df.to_csv('filename.csv',mode = 'a',header=False, index=None) episodes = [] rewards = [] epsilons = [] total_frames = [] #save model to disk # serialize model to JSON model_json = model.to_json() with open("model_background.json", "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 model.save_weights("model_background.h5") print("Saved model to disk") #save deque to disk #save_deque() def load_train(): global start_episode, epsilon, frames #get last episode and epsilon if not os.path.isfile('filename.csv'): start_episode, epsilon = 1, 1 else: # else it exists so append without writing the header df = pd.read_csv( 'filename.csv') if len(df) == 0: start_episode, epsilon, frames = 1, 1, 0 else: epsilon = list( df['epsilon'].tail(1) )[0] start_episode = list( df['episode'].tail(1) )[0] + 1 frames = list( df['total_frames'].tail(1) )[0] if os.path.isfile('model_background.json'): load_dqn_model() #if os.path.isfile('mydeque.pkl'): #load_deque() load_train() #print( start_episode, epsilon ) #print( type( start_episode ) ) #print( type( epsilon ) ) #print( model.summary() ) #print( D ) total_observe = 12000#total_episodes - total games to play MIN_SIZE = 32#size of the minibatch - sample 32 samples observe_frame = 0 #before starting training, we have to collect some samples - this stage is called "observe" def must_observe(): return ( observe_frame < MIN_MIN_SIZE ) #replay trains our model def replay( ): if len( D ) < MIN_MIN_SIZE: return #print( "sample" ) samples = random.sample( D, MIN_SIZE ) all_x = [] all_y = [] for sample in samples: observation, reward, done, new_observation, action = sample y = model.predict( observation.reshape( ( 1, 84, 84, 4) ) ) Q_next = model.predict( new_observation.reshape( ( 1, 84, 84, 4) ) ) reward = np.clip( reward, -1, 1 ) if done: y[0,action] = reward else: y[0,action] = reward + gamma * ( np.max( Q_next[0] ) ) #print( y ) neural_network_observation = observation.reshape( ( 1, 84, 84, 4) ) all_x.append( neural_network_observation ) all_y.append( y ) #model.fit( neural_network_observation, y, epochs=1, verbose=0 ) #model.train_on_batch( neural_network_observation, y ) all_x = np.array( all_x ).reshape( (MIN_SIZE,84,84,4) ) all_y = np.array( all_y ).reshape( (MIN_SIZE,4) ) #model.train_on_batch( all_x, all_y ) fixed_model.fit(all_x, all_y, epochs=1, batch_size=MIN_SIZE, verbose=0) del all_x, all_y start = time.time() episode = start_episode while episode <= total_observe:#3600*5): #start new game observation = env.reset() observation = preprocess_observation( observation ) #build first state recent_frames = deque(maxlen=4) for i in range( 4 ): recent_frames.append( observation ) total_reward = 0 #print( episode ) cur_lives = 5 step = 0 action = 0 steps = 0 #play untill the game ends while True: #env.render() stack_observation = np.stack(recent_frames,axis=0) if must_observe(): observe_frame += 1 if must_observe() == False: steps += 1 #pick an random action or use our policy if random.uniform(0,1) < epsilon: action = env.action_space.sample() else: Q = model.predict( stack_observation.reshape( ( 1, 84, 84, 4) ) )[0] action = np.argmax( Q ) #step = 0 #execute selected action new_observation, reward, done, info = env.step( action ) #build next stacked state new_observation = preprocess_observation( new_observation )#apply preprocess next_recent_frames = recent_frames.copy() next_recent_frames.append( new_observation ) next_new_observation = np.stack(next_recent_frames,axis=0) memory_reward = reward if info['ale.lives'] < cur_lives: cur_lives = info['ale.lives'] memory_reward = -1 #insert into deque - those samples are later used for training D.append( ( stack_observation, memory_reward, done, next_new_observation, action ) ) total_reward += reward replay()#train model #if game is over if done: if must_observe() == False: episodes.append( episode ) rewards.append( total_reward ) epsilons.append( epsilon ) D.append( ( stack_observation, -1, done, next_new_observation, action ) ) break observation = new_observation recent_frames.append( observation ) #we reduce the epsilon only when we have collected enough samples if must_observe() == False: epsilon = max( epsilon_min, epsilon - epsilon_discount ) if must_observe() == False: frames += steps total_frames.append( frames ) print( "Episode " + str(episode) + " | total reward := " + str(total_reward) + " | steps := " + str(steps) + " total frames := " + str(frames) + " | epsilon := " + str(epsilon) ) else: print( "Observe total frames := " + str(observe_frame) ) #weights are saved per 10 games, also progress is saved on disk if episode % 10 == 0 and episode > 1: if must_observe() == False: model.set_weights( fixed_model.get_weights() ) save_train() if must_observe() == False: episode += 1 end = time.time() print("total time is " + str( end - start ) ) """ Explanation: Build fixed model - that we will update during training. Afterwards we will update the weights when we save them on disk End of explanation """
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive2/machine_learning_in_the_enterprise/solutions/distributed-hyperparameter-tuning.ipynb
apache-2.0
import os # The Google Cloud Notebook product has specific requirements IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version") # Google Cloud Notebook requires dependencies to be installed with '--user' USER_FLAG = "" if IS_GOOGLE_CLOUD_NOTEBOOK: USER_FLAG = "--user" # Install necessary dependencies ! pip3 install {USER_FLAG} --upgrade google-cloud-aiplatform """ Explanation: Running a Hyperparameter Tuning Job with Vertex Training Learning objectives In this notebook, you learn how to: Create a Vertex AI custom job for training a model. Launch hyperparameter tuning job with the Python SDK. Cleanup resources. Overview This notebook demonstrates how to run a hyperparameter tuning job with Vertex Training to discover optimal hyperparameter values for an ML model. To speed up the training process, MirroredStrategy from the tf.distribute module is used to distribute training across multiple GPUs on a single machine. In this notebook, you create a custom-trained model from a Python script in a Docker container. You learn how to modify training application code for hyperparameter tuning and submit a Vertex Training hyperparameter tuning job with the Python SDK. Dataset The dataset used for this tutorial is the horses or humans dataset from TensorFlow Datasets. The trained model predicts if an image is of a horse or a human. Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook. Install additional packages Install the latest version of Vertex SDK for Python. End of explanation """ # Automatically restart kernel after installs import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) """ Explanation: Restart the kernel After you install the additional packages, you need to restart the notebook kernel so it can find the packages. End of explanation """ import os PROJECT_ID = "qwiklabs-gcp-00-b9e7121a76ba" # Replace your Project ID here # Get your Google Cloud project ID from gcloud if not os.getenv("IS_TESTING"): shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID: ", PROJECT_ID) """ Explanation: Set up your Google Cloud project Enable the Vertex AI API and Compute Engine API. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $ into these commands. Set your project ID If you don't know your project ID, you may be able to get your project ID using gcloud. End of explanation """ if PROJECT_ID == "" or PROJECT_ID is None: PROJECT_ID = "qwiklabs-gcp-00-b9e7121a76ba" # Replace your Project ID here """ Explanation: Otherwise, set your project ID here. End of explanation """ ! gcloud config set project $PROJECT_ID """ Explanation: Set project ID End of explanation """ # Import necessary librarary from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") """ Explanation: Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial. End of explanation """ BUCKET_URI = "gs://qwiklabs-gcp-00-b9e7121a76ba" # Replace your Bucket name here REGION = "us-central1" # @param {type:"string"} if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://qwiklabs-gcp-00-b9e7121a76ba": # Replace your Bucket name here BUCKET_URI = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP print(BUCKET_URI) """ Explanation: Create a Cloud Storage bucket The following steps are required, regardless of your notebook environment. When you submit a custom training job using the Cloud SDK, you will need to provide a staging bucket. Set the name of your Cloud Storage bucket below. It must be unique across all Cloud Storage buckets. You may also change the REGION variable, which is used for operations throughout the rest of this notebook. Make sure to choose a region where Vertex AI services are available. You may not use a Multi-Regional Storage bucket for training with Vertex AI. End of explanation """ # Create your bucket ! gsutil mb -l $REGION $BUCKET_URI """ Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket. End of explanation """ # Give access to your Cloud Storage bucket ! gsutil ls -al $BUCKET_URI """ Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents: End of explanation """ # Import necessary libraries import os import sys from google.cloud import aiplatform from google.cloud.aiplatform import hyperparameter_tuning as hpt """ Explanation: Import libraries and define constants End of explanation """ %%writefile Dockerfile FROM gcr.io/deeplearning-platform-release/tf2-gpu.2-5 WORKDIR / # Installs hypertune library RUN pip install cloudml-hypertune # Copies the trainer code to the docker image. COPY trainer /trainer # Sets up the entry point to invoke the trainer. ENTRYPOINT ["python", "-m", "trainer.task"] """ Explanation: Write Dockerfile The first step in containerizing your code is to create a Dockerfile. In the Dockerfile, you'll include all the commands needed to run the image such as installing the necessary libraries and setting up the entry point for the training code. This Dockerfile uses the Deep Learning Container TensorFlow Enterprise 2.5 GPU Docker image. The Deep Learning Containers on Google Cloud come with many common ML and data science frameworks pre-installed. After downloading that image, this Dockerfile installs the CloudML Hypertune library and sets up the entrypoint for the training code. End of explanation """ # Create trainer directory ! mkdir trainer """ Explanation: Create training application code Next, you create a trainer directory with a task.py script that contains the code for your training application. End of explanation """ %%writefile trainer/task.py import argparse import hypertune import tensorflow as tf import tensorflow_datasets as tfds def get_args(): """Parses args. Must include all hyperparameters you want to tune.""" parser = argparse.ArgumentParser() parser.add_argument( '--learning_rate', required=True, type=float, help='learning rate') parser.add_argument( '--momentum', required=True, type=float, help='SGD momentum value') parser.add_argument( '--units', required=True, type=int, help='number of units in last hidden layer') parser.add_argument( '--epochs', required=False, type=int, default=10, help='number of training epochs') args = parser.parse_args() return args def preprocess_data(image, label): """Resizes and scales images.""" image = tf.image.resize(image, (150, 150)) return tf.cast(image, tf.float32) / 255., label def create_dataset(batch_size): """Loads Horses Or Humans dataset and preprocesses data.""" data, info = tfds.load( name='horses_or_humans', as_supervised=True, with_info=True) # Create train dataset train_data = data['train'].map(preprocess_data) train_data = train_data.shuffle(1000) train_data = train_data.batch(batch_size) # Create validation dataset validation_data = data['test'].map(preprocess_data) validation_data = validation_data.batch(64) return train_data, validation_data def create_model(units, learning_rate, momentum): """Defines and compiles model.""" inputs = tf.keras.Input(shape=(150, 150, 3)) x = tf.keras.layers.Conv2D(16, (3, 3), activation='relu')(inputs) x = tf.keras.layers.MaxPooling2D((2, 2))(x) x = tf.keras.layers.Conv2D(32, (3, 3), activation='relu')(x) x = tf.keras.layers.MaxPooling2D((2, 2))(x) x = tf.keras.layers.Conv2D(64, (3, 3), activation='relu')(x) x = tf.keras.layers.MaxPooling2D((2, 2))(x) x = tf.keras.layers.Flatten()(x) x = tf.keras.layers.Dense(units, activation='relu')(x) outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x) model = tf.keras.Model(inputs, outputs) model.compile( loss='binary_crossentropy', optimizer=tf.keras.optimizers.SGD( learning_rate=learning_rate, momentum=momentum), metrics=['accuracy']) return model def main(): args = get_args() # Create Strategy strategy = tf.distribute.MirroredStrategy() # Scale batch size GLOBAL_BATCH_SIZE = 64 * strategy.num_replicas_in_sync train_data, validation_data = create_dataset(GLOBAL_BATCH_SIZE) # Wrap model variables within scope with strategy.scope(): model = create_model(args.units, args.learning_rate, args.momentum) # Train model history = model.fit( train_data, epochs=args.epochs, validation_data=validation_data) # Define Metric hp_metric = history.history['val_accuracy'][-1] hpt = hypertune.HyperTune() hpt.report_hyperparameter_tuning_metric( hyperparameter_metric_tag='accuracy', metric_value=hp_metric, global_step=args.epochs) if __name__ == '__main__': main() """ Explanation: In the next cell, you write the contents of the training script, task.py. This file downloads the horses or humans dataset from TensorFlow datasets and trains a tf.keras functional model using MirroredStrategy from the tf.distribute module. There are a few components that are specific to using the hyperparameter tuning service: The script imports the hypertune library. Note that the Dockerfile included instructions to pip install the hypertune library. The function get_args() defines a command-line argument for each hyperparameter you want to tune. In this example, the hyperparameters that will be tuned are the learning rate, the momentum value in the optimizer, and the number of units in the last hidden layer of the model. The value passed in those arguments is then used to set the corresponding hyperparameter in the code. At the end of the main() function, the hypertune library is used to define the metric to optimize. In this example, the metric that will be optimized is the the validation accuracy. This metric is passed to an instance of HyperTune. End of explanation """ # Set the IMAGE_URI IMAGE_URI = f"gcr.io/{PROJECT_ID}/horse-human:hypertune" # Build the docker image ! docker build -f Dockerfile -t $IMAGE_URI ./ # Push it to Google Container Registry: ! docker push $IMAGE_URI """ Explanation: Build the Container In the next cells, you build the container and push it to Google Container Registry. End of explanation """ # Define required specifications worker_pool_specs = [ { "machine_spec": { "machine_type": "n1-standard-4", "accelerator_type": "NVIDIA_TESLA_T4", "accelerator_count": 2, }, "replica_count": 1, "container_spec": {"image_uri": IMAGE_URI}, } ] metric_spec = {"accuracy": "maximize"} parameter_spec = { "learning_rate": hpt.DoubleParameterSpec(min=0.001, max=1, scale="log"), "momentum": hpt.DoubleParameterSpec(min=0, max=1, scale="linear"), "units": hpt.DiscreteParameterSpec(values=[64, 128, 512], scale=None), } """ Explanation: Create and run hyperparameter tuning job on Vertex AI Once your container is pushed to Google Container Registry, you use the Vertex SDK to create and run the hyperparameter tuning job. You define the following specifications: * worker_pool_specs: Dictionary specifying the machine type and Docker image. This example defines a single node cluster with one n1-standard-4 machine with two NVIDIA_TESLA_T4 GPUs. * parameter_spec: Dictionary specifying the parameters to optimize. The dictionary key is the string assigned to the command line argument for each hyperparameter in your training application code, and the dictionary value is the parameter specification. The parameter specification includes the type, min/max values, and scale for the hyperparameter. * metric_spec: Dictionary specifying the metric to optimize. The dictionary key is the hyperparameter_metric_tag that you set in your training application code, and the value is the optimization goal. End of explanation """ print(BUCKET_URI) # Create a CustomJob JOB_NAME = "horses-humans-hyperparam-job" + TIMESTAMP # TODO 1 my_custom_job = aiplatform.CustomJob( display_name=JOB_NAME, project=PROJECT_ID, worker_pool_specs=worker_pool_specs, staging_bucket=BUCKET_URI, ) """ Explanation: Create a CustomJob. End of explanation """ # Create and run HyperparameterTuningJob # TODO 2 hp_job = aiplatform.HyperparameterTuningJob( display_name=JOB_NAME, custom_job=my_custom_job, metric_spec=metric_spec, parameter_spec=parameter_spec, max_trial_count=15, parallel_trial_count=3, project=PROJECT_ID, search_algorithm=None, ) hp_job.run() """ Explanation: Then, create and run a HyperparameterTuningJob. There are a few arguments to note: max_trial_count: Sets an upper bound on the number of trials the service will run. The recommended practice is to start with a smaller number of trials and get a sense of how impactful your chosen hyperparameters are before scaling up. parallel_trial_count: If you use parallel trials, the service provisions multiple training processing clusters. The worker pool spec that you specify when creating the job is used for each individual training cluster. Increasing the number of parallel trials reduces the amount of time the hyperparameter tuning job takes to run; however, it can reduce the effectiveness of the job overall. This is because the default tuning strategy uses results of previous trials to inform the assignment of values in subsequent trials. search_algorithm: The available search algorithms are grid, random, or default (None). The default option applies Bayesian optimization to search the space of possible hyperparameter values and is the recommended algorithm. End of explanation """ # Set this to true only if you'd like to delete your bucket # TODO 3 delete_bucket = False if delete_bucket or os.getenv("IS_TESTING"): ! gsutil rm -r $BUCKET_URI """ Explanation: It will nearly take 50 mintues to complete the job successfully. Click on the generated link in the output to see your run in the Cloud Console. When the job completes, you will see the results of the tuning trials. Cleaning up To clean up all Google Cloud resources used in this project, you can delete the Google Cloud project you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial: End of explanation """
owenjhwilliams/ASIIT
FindSwirlLocs-AstroScriptV1.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import h5py from importlib import reload import sep f = h5py.File('/Users/Owen/Dropbox/Data/ABL/SBL PIV data/RNV45-RI2.mat') #list(f.keys()) Swirl = np.asarray(f['Swirl']) X = np.asarray(f['X']) Y = np.asarray(f['Y']) X = np.transpose(X,(1,0)) Y = np.transpose(Y,(1,0)) Swirl = np.transpose(Swirl,(2,1,0)) NanLocs = np.isnan(Swirl) uSize = Swirl.shape plt.figure(figsize = [8,3]) plt.pcolor(X,Y,Swirl[:,:,1], cmap='RdBu'); plt.clim([-50, 50]) plt.axis('scaled') plt.xlim([X.min(), X.max()]) plt.ylim([Y.min(), Y.max()]) plt.colorbar() #Find profile of swirl std SwirlStd = np.std(np.nanmean(Swirl,axis=2),axis = 1) plt.plot(SwirlStd,Y[:,1]) plt.ylabel('y(m)') plt.xlabel('Swirl rms') Y[1].shape SwirlStd.shape #Normalize field by the std of Swirl Swirl = Swirl/SwirlStd.reshape(uSize[0],1,1) #match the SwirlStd length (123) with the correct index in Swirl (also 123) plt.figure(figsize = [8,3]) plt.pcolor(X,Y,Swirl[:,:,1], cmap='RdBu'); plt.clim([-200, 200]) plt.axis('scaled') plt.xlim([X.min(), X.max()]) plt.ylim([Y.min(), Y.max()]) plt.colorbar() Swirl[NanLocs] = 0 #Get rid of nans for now """ Explanation: Test script to find all locations with large swirl Aim is to take a velocity field, find all locations with large swirl, and then identify distinct blobs of swirl. This script makes use of the Source Extraction and Photometry (SEP) library End of explanation """ bkg = sep.Background(np.ascontiguousarray(Swirl[:,:,1])) bkg_image = bkg.back() plt.imshow(bkg_image, interpolation='nearest', cmap='gray', origin='lower') plt.colorbar(); bkg_rms = bkg.rms() plt.imshow(bkg_rms, interpolation='nearest', cmap='gray', origin='lower') plt.colorbar(); """ Explanation: Estimate background End of explanation """ #creat filter kernal kern = np.array([[1,2,1], [2,4,2], [1,2,1]]) #Basic default kernal kern = np.array([[1,2,4,2,1],[2,3,5,3,2],[3,6,8,6,3],[2,3,5,3,2],[1,2,4,2,1]]) #Basic default kernal from scipy.stats import multivariate_normal as mvnorm x = np.linspace(-5, 5, 100) y = mvnorm.pdf(x, mean=0, cov=1) #plt.plot(x,y) #mvnorm.pdf( x = np.mgrid[-1:1:.01] y = x; r = (x**2+y**2)**0.5 kern = np.empty(x.shape) #for i in kern.shape[0] # kern[i,:] = mvnorm.pdf(r[i,:], mean=0, cov=1) #plt.imshow(kern) #y = mvnorm.pdf(x, mean=0, cov=1) #pos = np.empty(x.shape + (2,))l #pos[:, :, 0] = x; pos[:, :, 1] = y x = np.mgrid[-10:10:1] x.shape objects = sep.extract(np.ascontiguousarray(Swirl[:,:,1]), 1.5, err=bkg.globalrms,filter_kernel=kern) """ Explanation: Now extract objects End of explanation """ len(objects) from matplotlib.patches import Ellipse #fig, ax = plt.subplots() plt.figure(figsize = [8,3]) plt.pcolor(X,Y,Swirl[:,:,1], cmap='RdBu_r'); ax = plt.gca() plt.clim([-50, 50]) plt.axis('scaled') plt.xlim([X.min(), X.max()]) plt.ylim([Y.min(), Y.max()]) plt.colorbar() scale = (X[1,-1]-X[1,1])/uSize[1] #plt.plot(objects['x']*scale,objects['y']*scale,'go') for i in range(len(objects)): e = Ellipse(xy=(objects['x'][i]*scale, objects['y'][i]*scale), width=6*objects['a'][i]*scale, height=6*objects['b'][i]*scale, angle=objects['theta'][i] * 180. / np.pi) e.set_facecolor('none') e.set_edgecolor('red') ax.add_artist(e) #objects['x'] scale = (X[1,-1]-X[1,1])/uSize[1] objects['x']*scale X[objects['x'],objects['y']] objects['x'] """ Explanation: np.ascontiguousarray(Swirl[:,:,1]).flags how to make array C contiguous End of explanation """
antoniomezzacapo/qiskit-tutorial
community/algorithms/iterative_phase_estimation_algorithm.ipynb
apache-2.0
from math import pi import numpy as np import scipy as sp import matplotlib.pyplot as plt %matplotlib inline # importing Qiskit from qiskit import Aer, IBMQ from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister from qiskit import execute from qiskit.tools.visualization import plot_histogram from qiskit.wrapper.jupyter import * # Load saved IBMQ accounts IBMQ.load_accounts() """ Explanation: <img src="../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left"> Iterative Phase Estimation Algorithm The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial. For more information about how to use the IBM Q Experience (QX), consult the tutorials, or check out the community. Contributors Antonio Córcoles, Jay Gambetta, Rudy Raymond Quantum Phase Estimation (QPE) The Quantum Phase Estimation (QPE) algorithm solves the problem of finding unknown eigenvalues of a unitary operator. The attractiveness of the QPE algorithm is due to the fact that it is a key ingredient of some other very powerful algorithms, like order-finding and Shor's. In a standard textbook, such as Nielsen & Chuang <i>Quantum Computation and Quantum Information</i>, in the QPE, each bit of the phase is encoded in a different qubit on a register using the phase kickback property of controlled-unitary operations. This is followed by an inverse Quantum Fourier Transform operation, which yields an n-bit approximation to the phase by reading the n-qubit register. Iterative Phase Estimation Algorithm (IPEA) The QPE algorithm can, however, be realized in a much smaller qubit system, by iterating the steps on a system of just two qubits. This is called the Iterative Phase Estimation Algorithm (IPEA). Consider the problem of finding $\varphi$ given $|\Psi\rangle$ and $U$ in $U |\Psi\rangle = e^{i \phi} | \Psi \rangle$, with $\phi = 2 \pi \varphi$. Let's assume for now that $\varphi$ can be written as $\varphi = \varphi_1/2 + \varphi_2/4 + ... + \varphi_m/2^m = 0.\varphi_1 \varphi_2 ... \varphi_m$, where we have defined the notation $0.\varphi_1 \varphi_2 ... \varphi_m$. Now, if we have two qubits, $q_0$ and $q_1$, and we initialize them as $q_0 \rightarrow |+\rangle$ and $q_1 \rightarrow |\Psi \rangle$, then, after applying a control-U between $q_0$ and $q_1$ $2^t$ times, the state of $q_0$ can be written as $|0\rangle + e^{i 2 \pi 2^{t} \varphi} | 1 \rangle$. That is, the phase of $U$ has been kicked back into $q_0$ as many times as the control operation has been performed. For $t=0$, we have a total phase in $q_0$ of $e^{i 2 \pi 2^{0} \varphi} = e^{i 2 \pi \varphi} = e^{i 2 \pi 0.\varphi_1 \varphi_2 ... \varphi_m}$ For $t=1$, the phase would be $e^{i 2 \pi 2^{1} \varphi} = e^{i 2 \pi \varphi_1} e^{i 2 \pi 0.\varphi_2 \varphi_3 ... \varphi_m}$ For $t=2$, $e^{i 2 \pi 2^{2} \varphi} = e^{i 2 \pi 2 \varphi_1} e^{i 2 \pi \varphi_2} e^{i 2 \pi 0.\varphi_3 \varphi_4 ... \varphi_m}$ And for $t=m-1$, $e^{i 2 \pi 2^{m-1} \varphi} = e^{i 2 \pi 2^{m-2} \varphi_1} e^{i 2 \pi 2^{m-3} \varphi_2} ... e^{i 2 \pi 2^{-1} \varphi_m} = e^{i 2 \pi 0.\varphi_m}$. Note that if we perform a Hadamard operation on the state $|0\rangle + e^{i 2 \pi 0.\varphi_m}|1\rangle$ and perform a measurement in the standard basis, we obtain $|0\rangle$ if $\varphi_m = 0$ and $|1\rangle$ if $\varphi_m = 1$. In the first step of the IPEA, we directly measure the least significant bit of the phase $\varphi$, $\varphi_m$, by initializing the 2-qubit register as described above, performing $2^{m-1}$ control-$U$ operations between the qubits, and measuring $q_0$ in the diagonal basis. For the second step, we initialize the register in the same way and apply $2^{m-2}$ control-$U$ operations. The phase in $q_0$ after these operations is now $e^{i 2 \pi 0.\varphi_{m-1}\varphi_{m}}= e^{i 2 \pi 0.\varphi_{m-1}} e^{i 2 \pi \varphi_m/4}$. We see that prior to extracting the phase bit $\varphi_{m-1}$, we must perform a phase correction of $\varphi_m /2$. This is equivalent to a rotation around the $Z-$axis of angle $-\varphi_m /4$. Therefore, the $k$th step of the IPEA, giving $\varphi_{m-k+1}$, consists of the register initialization ($q_0$ in $|+\rangle$, $q_1$ in $|\Psi\rangle$), the application of control-$U$ $2^{m-k}$ times, a rotation around $Z$ of angle $\omega_k = -2 \pi 0.0\varphi_{k+1} ... \varphi_m$, a Hadamard transform to $q_0$, and a measurement of $q_0$ in the standard basis. Note that $q_1$ remains in the state $|\Psi\rangle$ throughout the algorithm. IPEA circuit Let's first initialize the API and import the necessary packages End of explanation """ # We first define controlled gates used in the IPEA def cu1fixed(qProg, c, t, a): qProg.u1(-a, t) qProg.cx(c, t) qProg.u1(a, t) qProg.cx(c, t) def cu5pi8(qProg, c, t): cu1fixed(qProg, c, t, -5.0*pi/8.0) # We then prepare quantum and classical registers and the circuit qr = QuantumRegister(2) cr = ClassicalRegister(4) circuitName="IPEAonSimulator" ipeaCircuit = QuantumCircuit(qr, cr) # Apply IPEA ipeaCircuit.h(qr[0]) for i in range(8): cu5pi8(ipeaCircuit, qr[0], qr[1]) ipeaCircuit.h(qr[0]) ipeaCircuit.measure(qr[0], cr[0]) ipeaCircuit.reset(qr[0]) ipeaCircuit.h(qr[0]) for i in range(4): cu5pi8(ipeaCircuit, qr[0], qr[1]) ipeaCircuit.u1(-pi/2, qr[0]).c_if(cr, 1) ipeaCircuit.h(qr[0]) ipeaCircuit.measure(qr[0], cr[1]) ipeaCircuit.reset(qr[0]) ipeaCircuit.h(qr[0]) for i in range(2): cu5pi8(ipeaCircuit, qr[0], qr[1]) ipeaCircuit.u1(-pi/4, qr[0]).c_if(cr, 1) ipeaCircuit.u1(-pi/2, qr[0]).c_if(cr, 2) ipeaCircuit.u1(-3*pi/4, qr[0]).c_if(cr, 3) ipeaCircuit.h(qr[0]) ipeaCircuit.measure(qr[0], cr[2]) ipeaCircuit.reset(qr[0]) ipeaCircuit.h(qr[0]) cu5pi8(ipeaCircuit, qr[0], qr[1]) ipeaCircuit.u1(-pi/8, qr[0]).c_if(cr, 1) ipeaCircuit.u1(-2*pi/8, qr[0]).c_if(cr, 2) ipeaCircuit.u1(-3*pi/8, qr[0]).c_if(cr, 3) ipeaCircuit.u1(-4*pi/8, qr[0]).c_if(cr, 4) ipeaCircuit.u1(-5*pi/8, qr[0]).c_if(cr, 5) ipeaCircuit.u1(-6*pi/8, qr[0]).c_if(cr, 6) ipeaCircuit.u1(-7*pi/8, qr[0]).c_if(cr, 7) ipeaCircuit.h(qr[0]) ipeaCircuit.measure(qr[0], cr[3]) backend = Aer.get_backend('qasm_simulator') shots = 1000 results = execute(ipeaCircuit, backend=backend, shots=shots).result() plot_histogram(results.get_counts()) """ Explanation: Now you can try the following circuit in the quantum simulator for a phase of $-5\pi/8 = 2 \pi \varphi$ and $m=4$. Note that the IPEA cannot be run in the real device in this form, due to the current lack of feedback capability. End of explanation """ %%qiskit_job_status # We then prepare quantum and classical registers and the circuit qr = QuantumRegister(5) cr = ClassicalRegister(5) realStep1Circuit = QuantumCircuit(qr, cr) # Apply IPEA realStep1Circuit.h(qr[0]) for i in range(8): cu5pi8(realStep1Circuit, qr[0], qr[1]) realStep1Circuit.h(qr[0]) realStep1Circuit.measure(qr[0], cr[0]) #connect to remote API to be able to use remote simulators and real devices print("Available backends:", [Aer.backends(), IBMQ.backends()]) backend = IBMQ.get_backend("ibmq_5_tenerife") shots = 1000 job_exp1 = execute(realStep1Circuit, backend=backend, shots=shots) results1 = job_exp1.result() plot_histogram(results1.get_counts()) """ Explanation: The results are given in terms of $\varphi = 0.\varphi_1 \varphi_2 \varphi_3 \varphi_4$, with the least significant digit ($\varphi_4$) as the leftmost bit in the classical register. The result is $\varphi = 11/16$, from which $\phi = 2\pi \varphi = 11 \pi/8 = 2 \pi - 5\pi/8$, as encoded in the circuit. IPEA in the real device As we have mentioned before, we currently lack the ability to use measurement feedback or feedforward, along with qubit resetting, on the real device in the Quantum Experience. However, we still can implement a segmentized version of the IPEA by extracting the information about the phase one bit at a time. Try the following four circuits in the real device. They estimate the same phase as in the previous example (-5$\pi/8$), one bit at a time, from least ($\varphi_4$) to most ($\varphi_1$) significant bit. End of explanation """ %%qiskit_job_status realStep2Circuit = QuantumCircuit(qr, cr) # Apply IPEA realStep2Circuit.h(qr[0]) for i in range(4): cu5pi8(realStep2Circuit, qr[0], qr[1]) realStep2Circuit.u1(-pi/2, qr[0]) # Assuming the value of the measurement on Step 1 realStep2Circuit.h(qr[0]) realStep2Circuit.measure(qr[0], cr[0]) job_exp2 = execute(realStep2Circuit, backend=backend, shots=shots) results2 = job_exp2.result() plot_histogram(results2.get_counts()) """ Explanation: In the first step of IPEA as above, we obtain the bit "1" with probability close to one. We then proceed to the second step of IPEA, assuming that we have identified the result of the first step correctly, as below. End of explanation """ %%qiskit_job_status realStep3Circuit = QuantumCircuit(qr, cr) # Apply IPEA realStep3Circuit.h(qr[0]) for i in range(2): cu5pi8(realStep3Circuit, qr[0], qr[1]) realStep3Circuit.u1(-3*pi/4, qr[0]) # Assuming the value of the measurement on Step 1 and Step 2 realStep3Circuit.h(qr[0]) realStep3Circuit.measure(qr[0], cr[0]) job_exp3 = execute(realStep3Circuit, backend=backend, shots=shots) results3 = job_exp3.result() plot_histogram(results3.get_counts()) """ Explanation: In the second step of IPEA as above, we obtain the bit "1" with probability close to one. We then proceed to the third step of IPEA, assuming that we have identified the result of the first and second steps correctly, as below. End of explanation """ %%qiskit_job_status realStep4Circuit = QuantumCircuit(qr, cr) # Apply IPEA realStep4Circuit.h(qr[0]) cu5pi8(realStep4Circuit, qr[0], qr[1]) realStep4Circuit.u1(-3*pi/8, qr[0]) # Assuming the value of the measurement on Step 1, 2, and 3 realStep4Circuit.h(qr[0]) realStep4Circuit.measure(qr[0], cr[0]) job_exp4 = execute(realStep4Circuit, backend=backend, shots=shots) results4 = job_exp4.result() plot_histogram(results4.get_counts()) """ Explanation: In the third step of IPEA as above, we obtain the bit "0" with probability close to one. We then proceed to the fourth step of IPEA, assuming that we have identified the result of the first, second, and third steps correctly, as below. End of explanation """
nceder/nceder.github.io
course_materials/data-cleaning/data_cleaning.ipynb
gpl-3.0
#Using dir() and help() import pandas """ Explanation: # Extracting & Cleaning Data with Python $~$ “I Have a Data File, Now What?” $~$ Naomi Ceder naomi@naomiceder.tech @NaomiCeder projects.naomiceder.tech My main qualification? I'm quite old... <img src="/notebooks/old.jpg"> Who am I? Python since 2001 Author of Quick Python Book, 2nd Edition (3rd edition late 2017) Python Software Foundation, Vice Chair of Board of Directors Python Training (ask your company to hire me) Long time data grunt - student records, product info, transactions, etc. About you... Do you have data cleaning issues? What problems are you trying to solve? Workshop Philosophy “All happy families are alike; each unhappy family is unhappy in its own way.” - Tolstoy “What could possibly go wrong?” “If anything can go wrong, it will” - Murphy Doing things the hardway - can't we just use pandas? If things work you can use high level tools and never care... but what about when they don't work? Pandas objects do take additional memory overhead. I need you to... Ask questions Try the samples Raise issues Stop me if I'm being boring or irrelevant Listening to me drone on will be boring... for all of us. Workshop plan Introduction Tools Getting Help Debugging Sample problems - code and discussion (depending on interest) plain text - word counting Moby Dick Fixed Width CSV JSON XML Excel HTML/Scraping binary Tools Python 3.6 pip conda if you're using the Anaconda distribution Jupyter notebook (handy, not required) https://notebooks.azure.com/naomiceder/libraries/data-cleaning virtual environments virtual environments docker pandas This notebook https://notebooks.azure.com/naomiceder/libraries/data-cleaning Github... http://projects.naomiceder.tech/data-cleaning/ Getting help Python - docs.python.org Library documentation dir() and help() from Python shell Python.org documentation Python Tutorial (sleep with this under your pillow!) - https://docs.python.org/3/tutorial/index.html Standard Library - https://docs.python.org/3/library/index.html pandas - Tutorials, cookbook, etc - http://pandas.pydata.org/pandas-docs/stable/tutorials.html More Python libraries - PyPI, soon to be known as "the warehouse" PyPI - https://pypi.python.org/pypi “The warehouse” - https://warehouse.python.org, https://pypi.org/ End of explanation """ for x in range(10): # do various things here... y = x*x print("y: ", y) """ Explanation: Debugging Use print a lot simple fast “Did the code get here, and what was x when it did?” Not so good for edge cases in loops, large structures, etc. a bit awkward to clean up afterwards End of explanation """ import logging # create the logger logger = logging.getLogger('my_process') logger.setLevel(logging.DEBUG) # set up file for debug level messages file_handler = logging.FileHandler('process.log') file_handler.setLevel(logging.DEBUG) logger.addHandler(file_handler) # setup console for errors only console_handler = logging.StreamHandler() console_handler.setLevel(logging.ERROR) logger.addHandler(console_handler) logger.debug("This goes only to the file") logger.error("This only goes to the console and the file") print(open('process.log').read()) """ Explanation: logging https://docs.python.org/3/howto/logging-cookbook.html, https://docs.python.org/3/howto/logging.html useful in production configurable levels multiple handlers file record more work to set up less clean-up, just set the debug levels End of explanation """ import pdb for x in range(10): # do various things here... y = x*x pdb.set_trace() """ Explanation: Python (or IDE) debugger https://docs.python.org/3/library/pdb.html good for complex situations conditional break points stepping through a process more work and time than print needs some clean-up End of explanation """ import sys sys.byteorder # example of binary files import struct answer = 42 month = 5 day = 6 # pack 3 ints into binary buffer = struct.pack("III", answer, month, day) print("Buffer as bytes:", buffer) # write to file in binary mode open("test_binary", "wb").write(buffer) print("Decoded to string :", buffer.decode()) print("Unpacked to tuple:", struct.unpack("III", buffer)) # read from file in binary mode buffer2 = open("test_binary", "rb").read() print("Read from file:", buffer2) print(int(buffer[0])) print(buffer[0]) """ Explanation: Opening and reading files Opening encoding and errors binary vs text universal newline "with" context handler Binary files open as binary bytes, not strings struct module "endianness" or byte order Format specifier to unpack must match the one used to pack End of explanation """ b = [0, 9, 32, 48, 65, 66, 67] b_string = bytes(b) print(b_string.decode()) """ Explanation: strings vs. bytes no difference in Python 2 series of bytes, but treated as strings conversion needed for unicode big difference in Python 3 bytes - series of bytes, but really treated as bytes (0-256 integers) string - Unicode by default conversion needed between the two, bytes are not a string! End of explanation """ # text file example open("test", "w", newline='').write("this is\nä\x80\ff\r\ntest\xc3\x28") print("this is\nä\x80\ff\r\ntest\xc3\x28") text = open("test", "r", newline='\r\n').read() text2 = open("test2", "r").read() print(text2) text2 """ Explanation: Text files open as text treat as series of lines need encoding (UTF-8 by default) Unicode - https://docs.python.org/3/howto/unicode.html errors settings Reading series of lines - no need to read explicitly newlines still present context handlers - "with" End of explanation """ moby = [] with open("moby_dick_01.txt") as ch01: for line in ch01: moby.append(line) print(moby[:10]) """ Explanation: Pitfalls and solutions unknown encoding use binary set errors to ignore, replace, or something else newline issues strip(), rstrip(), then add back newline option Plain text NLTK data cleaning/normalization Problem - word count of Moby Dick * dictionary * collections counter * NLTK tokens End of explanation """ with open("heathrowdata.txt") as LHR_input: for row in LHR_input: print(row) lines = LHR.split("\n") header_1 = lines[0].split() header_2 = [""] * 2 + lines[1].split() print(header_2) header = ["\n".join(x) for x in zip(header_1, header_2)] print(header) #df = pandas.read_fwf(LHR) records = [dict(zip(header, [y for y in line.split() ])) for line in lines[2:]] print(records[2]) df = pandas.DataFrame([x.split() for x in lines]) df2 = pandas.DataFrame(records[2:]) print(df.shape) print(df2) help(pandas.read_fwf) """ Explanation: Data Cleaning - String Fu string methods - upper(), lower(), replace(), find(), index(), in, startswith(), endswith(), etc str.translate() regular expressions - https://docs.python.org/3/howto/regex.html ‘Some people, when confronted with a problem, think "I know, I'll use regular expressions." Now they have two problems’ - Jamie Zawiski Fixed Width Records Problem - London weather * plain python * FixedWidth library * pandas http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_fwf.html https://pypi.python.org/pypi/FixedWidth/0.99.3 http://docs.astropy.org/en/stable/io/ascii/ End of explanation """ with open("temp_data_01.txt") as input_file: for row in input_file: print(row) with open("temp_data_01.csv") as input_file: for row in input_file: print(row) with open("temp_data_pipes_01.txt") as input_file: for row in input_file: print(row) with open("Meteorite_Landings.tsv") as input_file: for row in input_file: print(row) with open("london.json") as input_file: for row in input_file: print(row) with open("landslide.json") as input_file: for row in input_file: print(row) open("test2", "wb").write(b"this,is,a\ntest\x00,null,file") import csv for x in csv.reader(open("test2", "r")): print(x) # Cleaning NULL (\x00) bytes from a data file fi = open('my.csv', 'rb') data = fi.read() fi.close() fo = open('mynew.csv', 'wb') fo.write(data.replace('\x00', '')) fo.close() # alternative reader = csv.reader(x.replace('\0', '') for x in mycsv) with open("chicago.json") as input_file: for row in input_file: print(row) with open("mars.json") as input_file: for row in input_file: print(row) with open("landslide.xml") as input_file: for row in input_file: print(row) with open("observations.xml") as input_file: for row in input_file: print(row) with open("weather_01.xml") as input_file: for row in input_file: print(row) """ Explanation: Delimited (CSV) files https://docs.python.org/3/library/csv.html End of explanation """ import sys import struct print(get_size(LHR)) print(get_size(lines)) print(get_size(records)) print(get_size(df2)) df2.info(memory_usage='deep') def get_size(obj, seen=None): """Recursively finds size of objects""" size = sys.getsizeof(obj) if seen is None: seen = set() obj_id = id(obj) if obj_id in seen: return 0 # Important mark as seen *before* entering recursion to gracefully handle # self-referential objects seen.add(obj_id) if isinstance(obj, dict): size += sum([get_size(v, seen) for v in obj.values()]) size += sum([get_size(k, seen) for k in obj.keys()]) elif hasattr(obj, '__dict__'): size += get_size(obj.__dict__, seen) elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)): size += sum([get_size(i, seen) for i in obj]) return size """ Explanation: JSON files one giant object, vs list of objects XML files xmltodict HTML and Scraping Excel files End of explanation """ import sqlalchemy # SQLAlchemy example """ Explanation: Tips and tricks Use commandline (unix) tools sets for uniqueness dictionaries for matching list and dictionary comprehensions Optimizations avoid premature optimizations processor time is generally cheaper than human time beware of loops - move things out avoid repeated string operations (concatenation, etc) parallelization and concurrency avoid reading and especially avoid writing to disk (or DB, or virtual memory) divide and conquer (map/reduce) sorting can help (binary search) Very large files (Example: based on Grainger product feed -> MongoDB, combining 4 files) * 4 delimited flat files, unsorted * items - ~2 million rows; sku, description, categories, dimensions, compliance, brand, MPN, etc. * attributes - 20 million rows; sku, attr_id, attr_name, attr_value * alternates/accessories - sku, type, alt_sku * cross reference - sku, competitor, alt part number Testing? problems with testing over very large data sets Small sample for sanity check Loud errors Be sparing with exceptions Getting Data Where is the data? Database Online Files Data from a database Control over what fields are extracted and how Control over output format less likely to need cleaning/normalization Python tool of choice - SQLAlchemy and/or low level drivers ORM Standardized interface for queries Results as objects End of explanation """ #FTP example #SFTP example # API example """ Explanation: Data from online sources FTP - Python module - ftplib SFTP - FTP (sort of) over SSH - pysftp API - REST interfaces over http/https - requests Scraping - http/https - requests/scrapy/beautiful soup/ect End of explanation """
olinguyen/shogun
doc/ipython-notebooks/pca/pca_notebook.ipynb
gpl-3.0
%pylab inline %matplotlib inline import os SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data') # import all shogun classes from shogun import * """ Explanation: Principal Component Analysis in Shogun By Abhijeet Kislay (GitHub ID: <a href='https://github.com/kislayabhi'>kislayabhi</a>) This notebook is about finding Principal Components (<a href="http://en.wikipedia.org/wiki/Principal_component_analysis">PCA</a>) of data (<a href="http://en.wikipedia.org/wiki/Unsupervised_learning">unsupervised</a>) in Shogun. Its <a href="http://en.wikipedia.org/wiki/Dimensionality_reduction">dimensional reduction</a> capabilities are further utilised to show its application in <a href="http://en.wikipedia.org/wiki/Data_compression">data compression</a>, image processing and <a href="http://en.wikipedia.org/wiki/Facial_recognition_system">face recognition</a>. End of explanation """ #number of data points. n=100 #generate a random 2d line(y1 = mx1 + c) m = random.randint(1,10) c = random.randint(1,10) x1 = random.random_integers(-20,20,n) y1=m*x1+c #generate the noise. noise=random.random_sample([n]) * random.random_integers(-35,35,n) #make the noise orthogonal to the line y=mx+c and add it. x=x1 + noise*m/sqrt(1+square(m)) y=y1 + noise/sqrt(1+square(m)) twoD_obsmatrix=array([x,y]) #to visualise the data we must plot it. rcParams['figure.figsize'] = 7, 7 figure,axis=subplots(1,1) xlim(-50,50) ylim(-50,50) axis.plot(twoD_obsmatrix[0,:],twoD_obsmatrix[1,:],'o',color='green',markersize=6) #the line from which we generated the data is plotted in red axis.plot(x1[:],y1[:],linewidth=0.3,color='red') title('One-Dimensional sub-space with noise') xlabel("x axis") _=ylabel("y axis") """ Explanation: Some Formal Background (Skip if you just want code examples) PCA is a useful statistical technique that has found application in fields such as face recognition and image compression, and is a common technique for finding patterns in data of high dimension. In machine learning problems data is often high dimensional - images, bag-of-word descriptions etc. In such cases we cannot expect the training data to densely populate the space, meaning that there will be large parts in which little is known about the data. Hence it is expected that only a small number of directions are relevant for describing the data to a reasonable accuracy. The data vectors may be very high dimensional, they will therefore typically lie closer to a much lower dimensional 'manifold'. Here we concentrate on linear dimensional reduction techniques. In this approach a high dimensional datapoint $\mathbf{x}$ is 'projected down' to a lower dimensional vector $\mathbf{y}$ by: $$\mathbf{y}=\mathbf{F}\mathbf{x}+\text{const}.$$ where the matrix $\mathbf{F}\in\mathbb{R}^{\text{M}\times \text{D}}$, with $\text{M}<\text{D}$. Here $\text{M}=\dim(\mathbf{y})$ and $\text{D}=\dim(\mathbf{x})$. From the above scenario, we assume that The number of principal components to use is $\text{M}$. The dimension of each data point is $\text{D}$. The number of data points is $\text{N}$. We express the approximation for datapoint $\mathbf{x}^n$ as:$$\mathbf{x}^n \approx \mathbf{c} + \sum\limits_{i=1}^{\text{M}}y_i^n \mathbf{b}^i \equiv \tilde{\mathbf{x}}^n.$$ * Here the vector $\mathbf{c}$ is a constant and defines a point in the lower dimensional space. * The $\mathbf{b}^i$ define vectors in the lower dimensional space (also known as 'principal component coefficients' or 'loadings'). * The $y_i^n$ are the low dimensional co-ordinates of the data. Our motive is to find the reconstruction $\tilde{\mathbf{x}}^n$ given the lower dimensional representation $\mathbf{y}^n$(which has components $y_i^n,i = 1,...,\text{M})$. For a data space of dimension $\dim(\mathbf{x})=\text{D}$, we hope to accurately describe the data using only a small number $(\text{M}\ll \text{D})$ of coordinates of $\mathbf{y}$. To determine the best lower dimensional representation it is convenient to use the square distance error between $\mathbf{x}$ and its reconstruction $\tilde{\mathbf{x}}$:$$\text{E}(\mathbf{B},\mathbf{Y},\mathbf{c})=\sum\limits_{n=1}^{\text{N}}\sum\limits_{i=1}^{\text{D}}[x_i^n - \tilde{x}i^n]^2.$$ * Here the basis vectors are defined as $\mathbf{B} = [\mathbf{b}^1,...,\mathbf{b}^\text{M}]$ (defining $[\text{B}]{i,j} = b_i^j$). * Corresponding low dimensional coordinates are defined as $\mathbf{Y} = [\mathbf{y}^1,...,\mathbf{y}^\text{N}].$ * Also, $x_i^n$ and $\tilde{x}_i^n$ represents the coordinates of the data points for the original and the reconstructed data respectively. * The bias $\mathbf{c}$ is given by the mean of the data $\sum_n\mathbf{x}^n/\text{N}$. Therefore, for simplification purposes we centre our data, so as to set $\mathbf{c}$ to zero. Now we concentrate on finding the optimal basis $\mathbf{B}$( which has the components $\mathbf{b}^i, i=1,...,\text{M} $). Deriving the optimal linear reconstruction To find the best basis vectors $\mathbf{B}$ and corresponding low dimensional coordinates $\mathbf{Y}$, we may minimize the sum of squared differences between each vector $\mathbf{x}$ and its reconstruction $\tilde{\mathbf{x}}$: $\text{E}(\mathbf{B},\mathbf{Y}) = \sum\limits_{n=1}^{\text{N}}\sum\limits_{i=1}^{\text{D}}\left[x_i^n - \sum\limits_{j=1}^{\text{M}}y_j^nb_i^j\right]^2 = \text{trace} \left( (\mathbf{X}-\mathbf{B}\mathbf{Y})^T(\mathbf{X}-\mathbf{B}\mathbf{Y}) \right)$ where $\mathbf{X} = [\mathbf{x}^1,...,\mathbf{x}^\text{N}].$ Considering the above equation under the orthonormality constraint $\mathbf{B}^T\mathbf{B} = \mathbf{I}$ (i.e the basis vectors are mutually orthogonal and of unit length), we differentiate it w.r.t $y_k^n$. The squared error $\text{E}(\mathbf{B},\mathbf{Y})$ therefore has zero derivative when: $y_k^n = \sum_i b_i^kx_i^n$ By substituting this solution in the above equation, the objective becomes $\text{E}(\mathbf{B}) = (\text{N}-1)\left[\text{trace}(\mathbf{S}) - \text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right)\right],$ where $\mathbf{S}$ is the sample covariance matrix of the data. To minimise equation under the constraint $\mathbf{B}^T\mathbf{B} = \mathbf{I}$, we use a set of Lagrange Multipliers $\mathbf{L}$, so that the objective is to minimize: $-\text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right)+\text{trace}\left(\mathbf{L}\left(\mathbf{B}^T\mathbf{B} - \mathbf{I}\right)\right).$ Since the constraint is symmetric, we can assume that $\mathbf{L}$ is also symmetric. Differentiating with respect to $\mathbf{B}$ and equating to zero we obtain that at the optimum $\mathbf{S}\mathbf{B} = \mathbf{B}\mathbf{L}$. This is a form of eigen-equation so that a solution is given by taking $\mathbf{L}$ to be diagonal and $\mathbf{B}$ as the matrix whose columns are the corresponding eigenvectors of $\mathbf{S}$. In this case, $\text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right) =\text{trace}(\mathbf{L}),$ which is the sum of the eigenvalues corresponding to the eigenvectors forming $\mathbf{B}$. Since we wish to minimise $\text{E}(\mathbf{B})$, we take the eigenvectors with the largest corresponding eigenvalues. Whilst the solution to this eigen-problem is unique, this only serves to define the solution subspace since one may rotate and scale $\mathbf{B}$ and $\mathbf{Y}$ such that the value of the squared loss is exactly the same. The justification for choosing the non-rotated eigen solution is given by the additional requirement that the principal components corresponds to directions of maximal variance. Maximum variance criterion We aim to find that single direction $\mathbf{b}$ such that, when the data is projected onto this direction, the variance of this projection is maximal amongst all possible such projections. The projection of a datapoint onto a direction $\mathbf{b}$ is $\mathbf{b}^T\mathbf{x}^n$ for a unit length vector $\mathbf{b}$. Hence the sum of squared projections is: $$\sum\limits_{n}\left(\mathbf{b}^T\mathbf{x}^n\right)^2 = \mathbf{b}^T\left[\sum\limits_{n}\mathbf{x}^n(\mathbf{x}^n)^T\right]\mathbf{b} = (\text{N}-1)\mathbf{b}^T\mathbf{S}\mathbf{b} = \lambda(\text{N} - 1)$$ which ignoring constants, is simply the negative of the equation for a single retained eigenvector $\mathbf{b}$(with $\mathbf{S}\mathbf{b} = \lambda\mathbf{b}$). Hence the optimal single $\text{b}$ which maximises the projection variance is given by the eigenvector corresponding to the largest eigenvalues of $\mathbf{S}.$ The second largest eigenvector corresponds to the next orthogonal optimal direction and so on. This explains why, despite the squared loss equation being invariant with respect to arbitrary rotation of the basis vectors, the ones given by the eigen-decomposition have the additional property that they correspond to directions of maximal variance. These maximal variance directions found by PCA are called the $\text{principal} $ $\text{directions}.$ There are two eigenvalue methods through which shogun can perform PCA namely * Eigenvalue Decomposition Method. * Singular Value Decomposition. EVD vs SVD The EVD viewpoint requires that one compute the eigenvalues and eigenvectors of the covariance matrix, which is the product of $\mathbf{X}\mathbf{X}^\text{T}$, where $\mathbf{X}$ is the data matrix. Since the covariance matrix is symmetric, the matrix is diagonalizable, and the eigenvectors can be normalized such that they are orthonormal: $\mathbf{S}=\frac{1}{\text{N}-1}\mathbf{X}\mathbf{X}^\text{T},$ where the $\text{D}\times\text{N}$ matrix $\mathbf{X}$ contains all the data vectors: $\mathbf{X}=[\mathbf{x}^1,...,\mathbf{x}^\text{N}].$ Writing the $\text{D}\times\text{N}$ matrix of eigenvectors as $\mathbf{E}$ and the eigenvalues as an $\text{N}\times\text{N}$ diagonal matrix $\mathbf{\Lambda}$, the eigen-decomposition of the covariance $\mathbf{S}$ is $\mathbf{X}\mathbf{X}^\text{T}\mathbf{E}=\mathbf{E}\mathbf{\Lambda}\Longrightarrow\mathbf{X}^\text{T}\mathbf{X}\mathbf{X}^\text{T}\mathbf{E}=\mathbf{X}^\text{T}\mathbf{E}\mathbf{\Lambda}\Longrightarrow\mathbf{X}^\text{T}\mathbf{X}\tilde{\mathbf{E}}=\tilde{\mathbf{E}}\mathbf{\Lambda},$ where we defined $\tilde{\mathbf{E}}=\mathbf{X}^\text{T}\mathbf{E}$. The final expression above represents the eigenvector equation for $\mathbf{X}^\text{T}\mathbf{X}.$ This is a matrix of dimensions $\text{N}\times\text{N}$ so that calculating the eigen-decomposition takes $\mathcal{O}(\text{N}^3)$ operations, compared with $\mathcal{O}(\text{D}^3)$ operations in the original high-dimensional space. We then can therefore calculate the eigenvectors $\tilde{\mathbf{E}}$ and eigenvalues $\mathbf{\Lambda}$ of this matrix more easily. Once found, we use the fact that the eigenvalues of $\mathbf{S}$ are given by the diagonal entries of $\mathbf{\Lambda}$ and the eigenvectors by $\mathbf{E}=\mathbf{X}\tilde{\mathbf{E}}\mathbf{\Lambda}^{-1}$ On the other hand, applying SVD to the data matrix $\mathbf{X}$ follows like: $\mathbf{X}=\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}$ where $\mathbf{U}^\text{T}\mathbf{U}=\mathbf{I}\text{D}$ and $\mathbf{V}^\text{T}\mathbf{V}=\mathbf{I}\text{N}$ and $\mathbf{\Sigma}$ is a diagonal matrix of the (positive) singular values. We assume that the decomposition has ordered the singular values so that the upper left diagonal element of $\mathbf{\Sigma}$ contains the largest singular value. Attempting to construct the covariance matrix $(\mathbf{X}\mathbf{X}^\text{T})$from this decomposition gives: $\mathbf{X}\mathbf{X}^\text{T} = \left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)\left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)^\text{T}$ $\mathbf{X}\mathbf{X}^\text{T} = \left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)\left(\mathbf{V}\mathbf{\Sigma}\mathbf{U}^\text{T}\right)$ and since $\mathbf{V}$ is an orthogonal matrix $\left(\mathbf{V}^\text{T}\mathbf{V}=\mathbf{I}\right),$ $\mathbf{X}\mathbf{X}^\text{T}=\left(\mathbf{U}\mathbf{\Sigma}^\mathbf{2}\mathbf{U}^\text{T}\right)$ Since it is in the form of an eigen-decomposition, the PCA solution given by performing the SVD decomposition of $\mathbf{X}$, for which the eigenvectors are then given by $\mathbf{U}$, and corresponding eigenvalues by the square of the singular values. CPCA Class Reference (Shogun) CPCA class of Shogun inherits from the CPreprocessor class. Preprocessors are transformation functions that doesn't change the domain of the input features. Specifically, CPCA performs principal component analysis on the input vectors and keeps only the specified number of eigenvectors. On preprocessing, the stored covariance matrix is used to project vectors into eigenspace. Performance of PCA depends on the algorithm used according to the situation in hand. Our PCA preprocessor class provides 3 method options to compute the transformation matrix: $\text{PCA(EVD)}$ sets $\text{PCAmethod == EVD}$ : Eigen Value Decomposition of Covariance Matrix $(\mathbf{XX^T}).$ The covariance matrix $\mathbf{XX^T}$ is first formed internally and then its eigenvectors and eigenvalues are computed using QR decomposition of the matrix. The time complexity of this method is $\mathcal{O}(D^3)$ and should be used when $\text{N > D.}$ $\text{PCA(SVD)}$ sets $\text{PCAmethod == SVD}$ : Singular Value Decomposition of feature matrix $\mathbf{X}$. The transpose of feature matrix, $\mathbf{X^T}$, is decomposed using SVD. $\mathbf{X^T = UDV^T}.$ The matrix V in this decomposition contains the required eigenvectors and the diagonal entries of the diagonal matrix D correspond to the non-negative eigenvalues.The time complexity of this method is $\mathcal{O}(DN^2)$ and should be used when $\text{N < D.}$ $\text{PCA(AUTO)}$ sets $\text{PCAmethod == AUTO}$ : This mode automagically chooses one of the above modes for the user based on whether $\text{N>D}$ (chooses $\text{EVD}$) or $\text{N<D}$ (chooses $\text{SVD}$) PCA on 2D data Step 1: Get some data We will generate the toy data by adding orthogonal noise to a set of points lying on an arbitrary 2d line. We expect PCA to recover this line, which is a one-dimensional linear sub-space. End of explanation """ #convert the observation matrix into dense feature matrix. train_features = RealFeatures(twoD_obsmatrix) #PCA(EVD) is choosen since N=100 and D=2 (N>D). #However we can also use PCA(AUTO) as it will automagically choose the appropriate method. preprocessor = PCA(EVD) #since we are projecting down the 2d data, the target dim is 1. But here the exhaustive method is detailed by #setting the target dimension to 2 to visualize both the eigen vectors. #However, in future examples we will get rid of this step by implementing it directly. preprocessor.set_target_dim(2) #Centralise the data by subtracting its mean from it. preprocessor.init(train_features) #get the mean for the respective dimensions. mean_datapoints=preprocessor.get_mean() mean_x=mean_datapoints[0] mean_y=mean_datapoints[1] """ Explanation: Step 2: Subtract the mean. For PCA to work properly, we must subtract the mean from each of the data dimensions. The mean subtracted is the average across each dimension. So, all the $x$ values have $\bar{x}$ subtracted, and all the $y$ values have $\bar{y}$ subtracted from them, where:$$\bar{\mathbf{x}} = \frac{\sum\limits_{i=1}^{n}x_i}{n}$$ $\bar{\mathbf{x}}$ denotes the mean of the $x_i^{'s}$ Shogun's way of doing things : Preprocessor PCA performs principial component analysis on input feature vectors/matrices. It provides an interface to set the target dimension by $\text{set_target_dim method}.$ When the $\text{init()}$ method in $\text{PCA}$ is called with proper feature matrix $\text{X}$ (with say $\text{N}$ number of vectors and $\text{D}$ feature dimension), a transformation matrix is computed and stored internally.It inherenty also centralizes the data by subtracting the mean from it. End of explanation """ #Get the eigenvectors(We will get two of these since we set the target to 2). E = preprocessor.get_transformation_matrix() #Get all the eigenvalues returned by PCA. eig_value=preprocessor.get_eigenvalues() e1 = E[:,0] e2 = E[:,1] eig_value1 = eig_value[0] eig_value2 = eig_value[1] """ Explanation: Step 3: Calculate the covariance matrix To understand the relationship between 2 dimension we define $\text{covariance}$. It is a measure to find out how much the dimensions vary from the mean $with$ $respect$ $to$ $each$ $other.$$$cov(X,Y)=\frac{\sum\limits_{i=1}^{n}(X_i-\bar{X})(Y_i-\bar{Y})}{n-1}$$ A useful way to get all the possible covariance values between all the different dimensions is to calculate them all and put them in a matrix. Example: For a 3d dataset with usual dimensions of $x,y$ and $z$, the covariance matrix has 3 rows and 3 columns, and the values are this: $$\mathbf{S} = \quad\begin{pmatrix}cov(x,x)&cov(x,y)&cov(x,z)\cov(y,x)&cov(y,y)&cov(y,z)\cov(z,x)&cov(z,y)&cov(z,z)\end{pmatrix}$$ Step 4: Calculate the eigenvectors and eigenvalues of the covariance matrix Find the eigenvectors $e^1,....e^M$ of the covariance matrix $\mathbf{S}$. Shogun's way of doing things : Step 3 and Step 4 are directly implemented by the PCA preprocessor of Shogun toolbar. The transformation matrix is essentially a $\text{D}$$\times$$\text{M}$ matrix, the columns of which correspond to the eigenvectors of the covariance matrix $(\text{X}\text{X}^\text{T})$ having top $\text{M}$ eigenvalues. End of explanation """ #find out the M eigenvectors corresponding to top M number of eigenvalues and store it in E #Here M=1 #slope of e1 & e2 m1=e1[1]/e1[0] m2=e2[1]/e2[0] #generate the two lines x1=range(-50,50) x2=x1 y1=multiply(m1,x1) y2=multiply(m2,x2) #plot the data along with those two eigenvectors figure, axis = subplots(1,1) xlim(-50, 50) ylim(-50, 50) axis.plot(x[:], y[:],'o',color='green', markersize=5, label="green") axis.plot(x1[:], y1[:], linewidth=0.7, color='black') axis.plot(x2[:], y2[:], linewidth=0.7, color='blue') p1 = Rectangle((0, 0), 1, 1, fc="black") p2 = Rectangle((0, 0), 1, 1, fc="blue") legend([p1,p2],["1st eigenvector","2nd eigenvector"],loc='center left', bbox_to_anchor=(1, 0.5)) title('Eigenvectors selection') xlabel("x axis") _=ylabel("y axis") """ Explanation: Step 5: Choosing components and forming a feature vector. Lets visualize the eigenvectors and decide upon which to choose as the $principle$ $component$ of the data set. End of explanation """ #The eigenvector corresponding to higher eigenvalue(i.e eig_value2) is choosen (i.e e2). #E is the feature vector. E=e2 """ Explanation: In the above figure, the blue line is a good fit of the data. It shows the most significant relationship between the data dimensions. It turns out that the eigenvector with the $highest$ eigenvalue is the $principle$ $component$ of the data set. Form the matrix $\mathbf{E}=[\mathbf{e}^1,...,\mathbf{e}^M].$ Here $\text{M}$ represents the target dimension of our final projection End of explanation """ #transform all 2-dimensional feature matrices to target-dimensional approximations. yn=preprocessor.apply_to_feature_matrix(train_features) #Since, here we are manually trying to find the eigenvector corresponding to the top eigenvalue. #The 2nd row of yn is choosen as it corresponds to the required eigenvector e2. yn1=yn[1,:] """ Explanation: Step 6: Projecting the data to its Principal Components. This is the final step in PCA. Once we have choosen the components(eigenvectors) that we wish to keep in our data and formed a feature vector, we simply take the vector and multiply it on the left of the original dataset. The lower dimensional representation of each data point $\mathbf{x}^n$ is given by $\mathbf{y}^n=\mathbf{E}^T(\mathbf{x}^n-\mathbf{m})$ Here the $\mathbf{E}^T$ is the matrix with the eigenvectors in rows, with the most significant eigenvector at the top. The mean adjusted data, with data items in each column, with each row holding a seperate dimension is multiplied to it. Shogun's way of doing things : Step 6 can be performed by shogun's PCA preprocessor as follows: The transformation matrix that we got after $\text{init()}$ is used to transform all $\text{D-dim}$ feature matrices (with $\text{D}$ feature dimensions) supplied, via $\text{apply_to_feature_matrix methods}$.This transformation outputs the $\text{M-Dim}$ approximation of all these input vectors and matrices (where $\text{M}$ $\leq$ $\text{min(D,N)}$). End of explanation """ x_new=(yn1 * E[0]) + tile(mean_x,[n,1]).T[0] y_new=(yn1 * E[1]) + tile(mean_y,[n,1]).T[0] """ Explanation: Step 5 and Step 6 can be applied directly with Shogun's PCA preprocessor (from next example). It has been done manually here to show the exhaustive nature of Principal Component Analysis. Step 7: Form the approximate reconstruction of the original data $\mathbf{x}^n$ The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\tilde{\mathbf{x}}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$ End of explanation """ figure, axis = subplots(1,1) xlim(-50, 50) ylim(-50, 50) axis.plot(x[:], y[:],'o',color='green', markersize=5, label="green") axis.plot(x_new, y_new, 'o', color='blue', markersize=5, label="red") title('PCA Projection of 2D data into 1D subspace') xlabel("x axis") ylabel("y axis") #add some legend for information p1 = Rectangle((0, 0), 1, 1, fc="r") p2 = Rectangle((0, 0), 1, 1, fc="g") p3 = Rectangle((0, 0), 1, 1, fc="b") legend([p1,p2,p3],["normal projection","2d data","1d projection"],loc='center left', bbox_to_anchor=(1, 0.5)) #plot the projections in red: for i in range(n): axis.plot([x[i],x_new[i]],[y[i],y_new[i]] , color='red') """ Explanation: The new data is plotted below End of explanation """ rcParams['figure.figsize'] = 8,8 #number of points n=100 #generate the data a=random.randint(1,20) b=random.randint(1,20) c=random.randint(1,20) d=random.randint(1,20) x1=random.random_integers(-20,20,n) y1=random.random_integers(-20,20,n) z1=-(a*x1+b*y1+d)/c #generate the noise noise=random.random_sample([n])*random.random_integers(-30,30,n) #the normal unit vector is [a,b,c]/magnitude magnitude=sqrt(square(a)+square(b)+square(c)) normal_vec=array([a,b,c]/magnitude) #add the noise orthogonally x=x1+noise*normal_vec[0] y=y1+noise*normal_vec[1] z=z1+noise*normal_vec[2] threeD_obsmatrix=array([x,y,z]) #to visualize the data, we must plot it. from mpl_toolkits.mplot3d import Axes3D fig = pyplot.figure() ax=fig.add_subplot(111, projection='3d') #plot the noisy data generated by distorting a plane ax.scatter(x, y, z,marker='o', color='g') ax.set_xlabel('x label') ax.set_ylabel('y label') ax.set_zlabel('z label') legend([p2],["3d data"],loc='center left', bbox_to_anchor=(1, 0.5)) title('Two dimensional subspace with noise') xx, yy = meshgrid(range(-30,30), range(-30,30)) zz=-(a * xx + b * yy + d) / c """ Explanation: PCA on a 3d data. Step1: Get some data We generate points from a plane and then add random noise orthogonal to it. The general equation of a plane is: $$\text{a}\mathbf{x}+\text{b}\mathbf{y}+\text{c}\mathbf{z}+\text{d}=0$$ End of explanation """ #convert the observation matrix into dense feature matrix. train_features = RealFeatures(threeD_obsmatrix) #PCA(EVD) is choosen since N=100 and D=3 (N>D). #However we can also use PCA(AUTO) as it will automagically choose the appropriate method. preprocessor = PCA(EVD) #If we set the target dimension to 2, Shogun would automagically preserve the required 2 eigenvectors(out of 3) according to their #eigenvalues. preprocessor.set_target_dim(2) preprocessor.init(train_features) #get the mean for the respective dimensions. mean_datapoints=preprocessor.get_mean() mean_x=mean_datapoints[0] mean_y=mean_datapoints[1] mean_z=mean_datapoints[2] """ Explanation: Step 2: Subtract the mean. End of explanation """ #get the required eigenvectors corresponding to top 2 eigenvalues. E = preprocessor.get_transformation_matrix() """ Explanation: Step 3 & Step 4: Calculate the eigenvectors of the covariance matrix End of explanation """ #This can be performed by shogun's PCA preprocessor as follows: yn=preprocessor.apply_to_feature_matrix(train_features) """ Explanation: Steps 5: Choosing components and forming a feature vector. Since we performed PCA for a target $\dim = 2$ for the $3 \dim$ data, we are directly given the two required eigenvectors in $\mathbf{E}$ E is automagically filled by setting target dimension = M. This is different from the 2d data example where we implemented this step manually. Step 6: Projecting the data to its Principal Components. End of explanation """ new_data=dot(E,yn) x_new=new_data[0,:]+tile(mean_x,[n,1]).T[0] y_new=new_data[1,:]+tile(mean_y,[n,1]).T[0] z_new=new_data[2,:]+tile(mean_z,[n,1]).T[0] #all the above points lie on the same plane. To make it more clear we will plot the projection also. fig=pyplot.figure() ax=fig.add_subplot(111, projection='3d') ax.scatter(x, y, z,marker='o', color='g') ax.set_xlabel('x label') ax.set_ylabel('y label') ax.set_zlabel('z label') legend([p1,p2,p3],["normal projection","3d data","2d projection"],loc='center left', bbox_to_anchor=(1, 0.5)) title('PCA Projection of 3D data into 2D subspace') for i in range(100): ax.scatter(x_new[i], y_new[i], z_new[i],marker='o', color='b') ax.plot([x[i],x_new[i]],[y[i],y_new[i]],[z[i],z_new[i]],color='r') """ Explanation: Step 7: Form the approximate reconstruction of the original data $\mathbf{x}^n$ The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\tilde{\mathbf{x}}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$ End of explanation """ rcParams['figure.figsize'] = 10, 10 import os def get_imlist(path): """ Returns a list of filenames for all jpg images in a directory""" return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.pgm')] #set path of the training images path_train=os.path.join(SHOGUN_DATA_DIR, 'att_dataset/training/') #set no. of rows that the images will be resized. k1=100 #set no. of columns that the images will be resized. k2=100 filenames = get_imlist(path_train) filenames = array(filenames) #n is total number of images that has to be analysed. n=len(filenames) """ Explanation: PCA Performance Uptill now, we were using the EigenValue Decomposition method to compute the transformation matrix$\text{(N>D)}$ but for the next example $\text{(N<D)}$ we will be using Singular Value Decomposition. Practical Example : Eigenfaces The problem with the image representation we are given is its high dimensionality. Two-dimensional $\text{p} \times \text{q}$ grayscale images span a $\text{m=pq}$ dimensional vector space, so an image with $\text{100}\times\text{100}$ pixels lies in a $\text{10,000}$ dimensional image space already. The question is, are all dimensions really useful for us? $\text{Eigenfaces}$ are based on the dimensional reduction approach of $\text{Principal Component Analysis(PCA)}$. The basic idea is to treat each image as a vector in a high dimensional space. Then, $\text{PCA}$ is applied to the set of images to produce a new reduced subspace that captures most of the variability between the input images. The $\text{Pricipal Component Vectors}$(eigenvectors of the sample covariance matrix) are called the $\text{Eigenfaces}$. Every input image can be represented as a linear combination of these eigenfaces by projecting the image onto the new eigenfaces space. Thus, we can perform the identfication process by matching in this reduced space. An input image is transformed into the $\text{eigenspace,}$ and the nearest face is identified using a $\text{Nearest Neighbour approach.}$ Step 1: Get some data. Here data means those Images which will be used for training purposes. End of explanation """ # we will be using this often to visualize the images out there. def showfig(image): imgplot=imshow(image, cmap='gray') imgplot.axes.get_xaxis().set_visible(False) imgplot.axes.get_yaxis().set_visible(False) from PIL import Image from scipy import misc # to get a hang of the data, lets see some part of the dataset images. fig = pyplot.figure() title('The Training Dataset') for i in range(49): fig.add_subplot(7,7,i+1) train_img=array(Image.open(filenames[i]).convert('L')) train_img=misc.imresize(train_img, [k1,k2]) showfig(train_img) """ Explanation: Lets have a look on the data: End of explanation """ #To form the observation matrix obs_matrix. #read the 1st image. train_img = array(Image.open(filenames[0]).convert('L')) #resize it to k1 rows and k2 columns train_img=misc.imresize(train_img, [k1,k2]) #since Realfeatures accepts only data of float64 datatype, we do a type conversion train_img=array(train_img, dtype='double') #flatten it to make it a row vector. train_img=train_img.flatten() # repeat the above for all images and stack all those vectors together in a matrix for i in range(1,n): temp=array(Image.open(filenames[i]).convert('L')) temp=misc.imresize(temp, [k1,k2]) temp=array(temp, dtype='double') temp=temp.flatten() train_img=vstack([train_img,temp]) #form the observation matrix obs_matrix=train_img.T """ Explanation: Represent every image $I_i$ as a vector $\Gamma_i$ End of explanation """ train_features = RealFeatures(obs_matrix) preprocessor=PCA(AUTO) preprocessor.set_target_dim(100) preprocessor.init(train_features) mean=preprocessor.get_mean() """ Explanation: Step 2: Subtract the mean It is very important that the face images $I_1,I_2,...,I_M$ are $centered$ and of the $same$ size We observe here that the no. of $\dim$ for each image is far greater than no. of training images. This calls for the use of $\text{SVD}$. Setting the $\text{PCA}$ in the $\text{AUTO}$ mode does this automagically according to the situation. End of explanation """ #get the required eigenvectors corresponding to top 100 eigenvalues E = preprocessor.get_transformation_matrix() #lets see how these eigenfaces/eigenvectors look like: fig1 = pyplot.figure() title('Top 20 Eigenfaces') for i in range(20): a = fig1.add_subplot(5,4,i+1) eigen_faces=E[:,i].reshape([k1,k2]) showfig(eigen_faces) """ Explanation: Step 3 & Step 4: Calculate the eigenvectors and eigenvalues of the covariance matrix. End of explanation """ #we perform the required dot product. yn=preprocessor.apply_to_feature_matrix(train_features) """ Explanation: These 20 eigenfaces are not sufficient for a good image reconstruction. Having more eigenvectors gives us the most flexibility in the number of faces we can reconstruct. Though we are adding vectors with low variance, they are in directions of change nonetheless, and an external image that is not in our database could in fact need these eigenvectors to get even relatively close to it. But at the same time we must also keep in mind that adding excessive eigenvectors results in addition of little or no variance, slowing down the process. Clearly a tradeoff is required. We here set for M=100. Step 5: Choosing components and forming a feature vector. Since we set target $\dim = 100$ for this $n \dim$ data, we are directly given the $100$ required eigenvectors in $\mathbf{E}$ E is automagically filled. This is different from the 2d data example where we implemented this step manually. Step 6: Projecting the data to its Principal Components. The lower dimensional representation of each data point $\mathbf{x}^n$ is given by $$\mathbf{y}^n=\mathbf{E}^T(\mathbf{x}^n-\mathbf{m})$$ End of explanation """ re=tile(mean,[n,1]).T[0] + dot(E,yn) #lets plot the reconstructed images. fig2 = pyplot.figure() title('Reconstructed Images from 100 eigenfaces') for i in range(1,50): re1 = re[:,i].reshape([k1,k2]) fig2.add_subplot(7,7,i) showfig(re1) """ Explanation: Step 7: Form the approximate reconstruction of the original image $I_n$ The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\mathbf{x}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$ End of explanation """ #set path of the training images path_train=os.path.join(SHOGUN_DATA_DIR, 'att_dataset/testing/') test_files=get_imlist(path_train) test_img=array(Image.open(test_files[0]).convert('L')) rcParams.update({'figure.figsize': (3, 3)}) #we plot the test image , for which we have to identify a good match from the training images we already have fig = pyplot.figure() title('The Test Image') showfig(test_img) #We flatten out our test image just the way we have done for the other images test_img=misc.imresize(test_img, [k1,k2]) test_img=array(test_img, dtype='double') test_img=test_img.flatten() #We centralise the test image by subtracting the mean from it. test_f=test_img-mean """ Explanation: Recognition part. In our face recognition process using the Eigenfaces approach, in order to recognize an unseen image, we proceed with the same preprocessing steps as applied to the training images. Test images are represented in terms of eigenface coefficients by projecting them into face space$\text{(eigenspace)}$ calculated during training. Test sample is recognized by measuring the similarity distance between the test sample and all samples in the training. The similarity measure is a metric of distance calculated between two vectors. Traditional Eigenface approach utilizes $\text{Euclidean distance}$. End of explanation """ #We have already projected our training images into pca subspace as yn. train_proj = yn #Projecting our test image into pca subspace test_proj = dot(E.T, test_f) """ Explanation: Here we have to project our training image as well as the test image on the PCA subspace. The Eigenfaces method then performs face recognition by: 1. Projecting all training samples into the PCA subspace. 2. Projecting the query image into the PCA subspace. 3. Finding the nearest neighbour between the projected training images and the projected query image. End of explanation """ #To get Eucledian Distance as the distance measure use EuclideanDistance. workfeat = RealFeatures(mat(train_proj)) testfeat = RealFeatures(mat(test_proj).T) RaRb=EuclideanDistance(testfeat, workfeat) #The distance between one test image w.r.t all the training is stacked in matrix d. d=empty([n,1]) for i in range(n): d[i]= RaRb.distance(0,i) #The one having the minimum distance is found out min_distance_index = d.argmin() iden=array(Image.open(filenames[min_distance_index])) title('Identified Image') showfig(iden) """ Explanation: Shogun's way of doing things: Shogun uses CEuclideanDistance class to compute the familiar Euclidean distance for real valued features. It computes the square root of the sum of squared disparity between the corresponding feature dimensions of two data points. $\mathbf{d(x,x')=}$$\sqrt{\mathbf{\sum\limits_{i=0}^{n}}|\mathbf{x_i}-\mathbf{x'_i}|^2}$ End of explanation """
JakeColtman/BayesianSurvivalAnalysis
Full presentation.ipynb
mit
running_id = 0 output = [[0]] with open("E:/output.txt") as file_open: for row in file_open.read().split("\n"): cols = row.split(",") if cols[0] == output[-1][0]: output[-1].append(cols[1]) output[-1].append(True) else: output.append(cols) output = output[1:] for row in output: if len(row) == 6: row += [datetime(2016, 5, 3, 20, 36, 8, 92165), False] output = output[1:-1] def convert_to_days(dt): day_diff = dt / np.timedelta64(1, 'D') if day_diff == 0: return 23.0 else: return day_diff df = pd.DataFrame(output, columns=["id", "advert_time", "male","age","search","brand","conversion_time","event"]) df["lifetime"] = pd.to_datetime(df["conversion_time"]) - pd.to_datetime(df["advert_time"]) df["lifetime"] = df["lifetime"].apply(convert_to_days) df["male"] = df["male"].astype(int) df["search"] = df["search"].astype(int) df["brand"] = df["brand"].astype(int) df["age"] = df["age"].astype(int) df["event"] = df["event"].astype(int) df = df.drop('advert_time', 1) df = df.drop('conversion_time', 1) df = df.set_index("id") df = df.dropna(thresh=2) df.median() df ###Parametric Bayes #Shout out to Cam Davidson-Pilon ## Example fully worked model using toy data ## Adapted from http://blog.yhat.com/posts/estimating-user-lifetimes-with-pymc.html ## Note that we've made some corrections censor = np.array(df["event"].apply(lambda x: 0 if x else 1).tolist()) alpha = pm.Uniform("alpha", 0,50) beta = pm.Uniform("beta", 0,50) @pm.observed def survival(value=df["lifetime"], alpha = alpha, beta = beta ): return sum( (1-censor)*(np.log( alpha/beta) + (alpha-1)*np.log(value/beta)) - (value/beta)**(alpha)) mcmc = pm.MCMC([alpha, beta, survival ] ) mcmc.sample(10000) pm.Matplot.plot(mcmc) mcmc.trace("alpha")[:] """ Explanation: The first step in any data analysis is acquiring and munging the data An example data set can be found at: https://jakecoltman.gitlab.io/website/post/pydata/ Download the file output.txt and transform it into a format like below where the event column should be 0 if there's only one entry for an id, and 1 if there are two entries: End date = datetime.datetime(2016, 5, 3, 20, 36, 8, 92165) id,time_to_convert,age,male,event,search,brand End of explanation """ #Solution to question 4: def weibull_median(alpha, beta): return beta * ((log(2)) ** ( 1 / alpha)) plt.hist([weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))]) #Solution to question 4: ### Increasing the burn parameter allows us to discard results before convergence ### Thinning the results removes autocorrelation mcmc = pm.MCMC([alpha, beta, survival ] ) mcmc.sample(10000, burn = 3000, thin = 20) pm.Matplot.plot(mcmc) #Solution to Q5 ## Adjusting the priors impacts the overall result ## If we give a looser, less informative prior then we end up with a broader, shorter distribution ## If we give much more informative priors, then we get a tighter, taller distribution censor = np.array(df["event"].apply(lambda x: 0 if x else 1).tolist()) ## Note the narrowing of the prior alpha = pm.Normal("alpha", 1.7, 10000) beta = pm.Normal("beta", 18.5, 10000) ####Uncomment this to see the result of looser priors ## Note this ends up pretty much the same as we're already very loose #alpha = pm.Uniform("alpha", 0, 30) #beta = pm.Uniform("beta", 0, 30) @pm.observed def survival(value=df["lifetime"], alpha = alpha, beta = beta ): return sum( (1-censor)*(np.log( alpha/beta) + (alpha-1)*np.log(value/beta)) - (value/beta)**(alpha)) mcmc = pm.MCMC([alpha, beta, survival ] ) mcmc.sample(10000, burn = 5000, thin = 20) pm.Matplot.plot(mcmc) #plt.hist([weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))]) ## Solution to bonus ## Super easy to do in the Bayesian framework, all we need to do is look at what % of samples ## meet our criteria medians = [weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))] testing_value = 15.6 number_of_greater_samples = sum([x >= testing_value for x in medians]) 100 * (number_of_greater_samples / len(medians)) #Cox model """ Explanation: Problems: 2 - Try to fit your data from section 1 3 - Use the results to plot the distribution of the median -------- 4 - Try adjusting the number of samples, the burn parameter and the amount of thinning to correct get good answers 5 - Try adjusting the prior and see how it affects the estimate -------- 6 - Try to fit a different distribution to the data 7 - Compare answers Bonus - test the hypothesis that the true median is greater than a certain amount For question 2, note that the median of a Weibull is: $$β(log 2)^{1/α}$$ End of explanation """ #Fitting solution cf = lifelines.CoxPHFitter() cf.fit(df, 'lifetime', event_col = 'event') cf.summary """ Explanation: If we want to look at covariates, we need a new approach. We'll use Cox proprtional hazards. More information here. End of explanation """ #Solution to 1 fig, axis = plt.subplots(nrows=1, ncols=1) cf.baseline_survival_.plot(ax = axis, title = "Baseline Survival") # Solution to prediction regressors = np.array([[1,45,0,0]]) survival = cf.predict_survival_function(regressors) survival #Solution to plotting multiple regressors fig, axis = plt.subplots(nrows=1, ncols=1, sharex=True) regressor1 = np.array([[1,45,0,1]]) regressor2 = np.array([[1,23,1,1]]) survival_1 = cf.predict_survival_function(regressor1) survival_2 = cf.predict_survival_function(regressor2) plt.plot(survival_1,label = "32 year old male") plt.plot(survival_2,label = "46 year old female") plt.legend(loc = "lower left") #Difference in survival odds = survival_1 / survival_2 plt.plot(odds, c = "red") """ Explanation: Once we've fit the data, we need to do something useful with it. Try to do the following things: 1 - Plot the baseline survival function 2 - Predict the functions for a particular set of features 3 - Plot the survival function for two different set of features 4 - For your results in part 3 caculate how much more likely a death event is for one than the other for a given period of time End of explanation """ ##Solution to 1 from pyBMA import CoxPHFitter bmaCox = pyBMA.CoxPHFitter.CoxPHFitter() bmaCox.fit(df, "lifetime", event_col= "event", priors= [0.5]*4) print(bmaCox.summary) #Low probability for everything favours parsimonious models bmaCox = pyBMA.CoxPHFitter.CoxPHFitter() bmaCox.fit(df, "lifetime", event_col= "event", priors= [0.1]*4) print(bmaCox.summary) #Low probability for everything favours parsimonious models bmaCox = pyBMA.CoxPHFitter.CoxPHFitter() bmaCox.fit(df, "lifetime", event_col= "event", priors= [0.9]*4) print(bmaCox.summary) #Low probability for everything favours parsimonious models bmaCox = pyBMA.CoxPHFitter.CoxPHFitter() bmaCox.fit(df, "lifetime", event_col= "event", priors= [0.3, 0.9, 0.001, 0.3]) print(bmaCox.summary) """ Explanation: Model selection Difficult to do with classic tools (here) Problem: 1 - Calculate the BMA coefficient values 2 - Compare these results to past the lifelines results 3 - Try running with different priors End of explanation """
jpn--/larch
book/example/101_swissmetro_mnl.ipynb
gpl-3.0
# TEST import os import pandas as pd pd.set_option("display.max_columns", 999) pd.set_option('expand_frame_repr', False) pd.set_option('display.precision', 3) import larch larch._doctest_mode_ = True from pytest import approx import larch.numba as lx import larch.numba as lx """ Explanation: 101: Swissmetro MNL Mode Choice End of explanation """ m = lx.Model() """ Explanation: This example is a mode choice model built using the Swissmetro example dataset. First we can create a Model object: End of explanation """ m.title = "swissmetro example 01 (simple logit)" """ Explanation: We can attach a title to the model. The title does not affect the calculations as all; it is merely used in various output report styles. End of explanation """ m.availability_co_vars = { 1: "TRAIN_AV * (SP!=0)", 2: "SM_AV", 3: "CAR_AV * (SP!=0)", } """ Explanation: We need to identify the availability and choice variables. The Swissmetro dataset, as with all Biogeme data, is only in co format, so we must define alternative availability as an expression for each alternative, using a dictionary to map alternative codes and expressions. End of explanation """ m.choice_co_code = 'CHOICE' """ Explanation: In the Swissmetro example dataset, as in many discrete choice modeling applications, there is one and only one chosen alternative for each case, so the choices can be described as a single expression that evaluates to the code of the chosen alternative. End of explanation """ from larch.roles import P,X m.utility_co[1] = P("ASC_TRAIN") m.utility_co[2] = 0 m.utility_co[3] = P("ASC_CAR") m.utility_co[1] += X("TRAIN_TT") * P("B_TIME") m.utility_co[2] += X("SM_TT") * P("B_TIME") m.utility_co[3] += X("CAR_TT") * P("B_TIME") m.utility_co[1] += X("TRAIN_CO*(GA==0)") * P("B_COST") m.utility_co[2] += X("SM_CO*(GA==0)") * P("B_COST") m.utility_co[3] += X("CAR_CO") * P("B_COST") """ Explanation: We will also write utility functions for each alternative. Since the data is only in co format, we must use only the utility_co form for the utility functions. End of explanation """ m.ordering = [ ("ASCs", 'ASC.*',), ("LOS", 'B_.*',), ] """ Explanation: Larch will find all the parameters in the model, but we'd like to output them in a rational order. We can use the ordering method to do this: End of explanation """ import pandas as pd raw_data = pd.read_csv(lx.example_file('swissmetro.csv.gz')).rename_axis(index='CASEID') raw_data.head() """ Explanation: Now we can prepare the data, which is available in the data warehouse that comes with Larch. End of explanation """ keep = raw_data.eval("PURPOSE in (1,3) and CHOICE != 0") selected_data = raw_data[keep] """ Explanation: The swissmetro example models exclude some observations. We can use pandas to identify the observations we would like to keep. End of explanation """ ds = lx.Dataset.construct.from_idco(selected_data, alts={1:'Train', 2:'SM', 3:'Car'}) ds """ Explanation: When you've created the data you need, you can pass the dataframe to the larch.DataFrames constructor. Since the swissmetro data is in idco format, we'll need to explicitly identify the alternative codes as well. End of explanation """ m.datatree = ds """ Explanation: You might notice we have not carefully constructed this object to include only the relevant data or the various simple transformations used in the utility definition above. Larch can do this itself, if you assign this DataFrames not as the actual set of data used in model estimation, but rather as the dataservice that can be used as the source to create these computational arrays. End of explanation """ m.set_cap(15) m.maximize_loglike(method='SLSQP') # TEST r = _ assert r.loglike == approx(-5331.252006971916) m.calculate_parameter_covariance() m.parameter_summary() # TEST assert m.parameter_summary().data.to_markdown() == """ | | Value | Std Err | t Stat | Signif | Null Value | |:----------------------|--------:|----------:|---------:|:---------|-------------:| | ('ASCs', 'ASC_CAR') | -0.155 | 0.0432 | -3.58 | *** | 0 | | ('ASCs', 'ASC_TRAIN') | -0.701 | 0.0549 | -12.78 | *** | 0 | | ('LOS', 'B_COST') | -0.0108 | 0.000518 | -20.91 | *** | 0 | | ('LOS', 'B_TIME') | -0.0128 | 0.000569 | -22.46 | *** | 0 | """[1:-1] """ Explanation: We can estimate the models and check the results match up with those given by Biogeme: End of explanation """
TurkuNLP/BINF_Programming
supplementary/Sets and exceptions.ipynb
gpl-2.0
s=set() #this is how you create a set s.add(5) #this is how you add items to sets s.add("hi") s.add(5) s.add("ho") s.add("hi") s1=set([1,2,3,4,5]) #and you can also create sets from lists or any other iterables s2=set([4,5,6,7]) #sets allow basic set operations print("s1",s1) print("s2",s2) print("s1&s2",s1&s2) #intersection print("s1-s2",s1-s2) #difference print("s1|s2",s1|s2) #union print("s1 before union update",s1) s1.update(s2)#union in-place print("s1 after union update",s1) """ Explanation: Sets Data structure for storing sets of items. End of explanation """ s1.remove(11) """ Explanation: Set.remove() causes an error in case the item is not there. End of explanation """ import traceback #needed if we want to print the error try: #Code to run s1.remove(11) except KeyError: #react on key errors #Code to run at the moment when an error happens in between try and except print("Error. Happened. I'm inside except!") traceback.print_exc() #print the erorr (note - iPython notebook catches this and turns into the red block seemingly in the wrong place) #error processed now, program continues print("woohoo made it to the end") """ Explanation: Exceptions and try/except The try ... except block can be used to intercept these errors and recover End of explanation """
metpy/MetPy
v0.9/_downloads/53923345d98c487825399f76f4de00e7/Station_Plot_with_Layout.ipynb
bsd-3-clause
import cartopy.crs as ccrs import cartopy.feature as cfeature import matplotlib.pyplot as plt import pandas as pd from metpy.calc import wind_components from metpy.cbook import get_test_data from metpy.plots import (add_metpy_logo, simple_layout, StationPlot, StationPlotLayout, wx_code_map) from metpy.units import units """ Explanation: Station Plot with Layout Make a station plot, complete with sky cover and weather symbols, using a station plot layout built into MetPy. The station plot itself is straightforward, but there is a bit of code to perform the data-wrangling (hopefully that situation will improve in the future). Certainly, if you have existing point data in a format you can work with trivially, the station plot will be simple. The StationPlotLayout class is used to standardize the plotting various parameters (i.e. temperature), keeping track of the location, formatting, and even the units for use in the station plot. This makes it easy (if using standardized names) to re-use a given layout of a station plot. End of explanation """ with get_test_data('station_data.txt') as f: data_arr = pd.read_csv(f, header=0, usecols=(1, 2, 3, 4, 5, 6, 7, 17, 18, 19), names=['stid', 'lat', 'lon', 'slp', 'air_temperature', 'cloud_fraction', 'dew_point_temperature', 'weather', 'wind_dir', 'wind_speed'], na_values=-99999) data_arr.set_index('stid', inplace=True) """ Explanation: The setup First read in the data. We use numpy.loadtxt to read in the data and use a structured numpy.dtype to allow different types for the various columns. This allows us to handle the columns with string data. End of explanation """ # Pull out these specific stations selected = ['OKC', 'ICT', 'GLD', 'MEM', 'BOS', 'MIA', 'MOB', 'ABQ', 'PHX', 'TTF', 'ORD', 'BIL', 'BIS', 'CPR', 'LAX', 'ATL', 'MSP', 'SLC', 'DFW', 'NYC', 'PHL', 'PIT', 'IND', 'OLY', 'SYR', 'LEX', 'CHS', 'TLH', 'HOU', 'GJT', 'LBB', 'LSV', 'GRB', 'CLT', 'LNK', 'DSM', 'BOI', 'FSD', 'RAP', 'RIC', 'JAN', 'HSV', 'CRW', 'SAT', 'BUY', '0CO', 'ZPC', 'VIH'] # Loop over all the whitelisted sites, grab the first data, and concatenate them data_arr = data_arr.loc[selected] # Drop rows with missing winds data_arr = data_arr.dropna(how='any', subset=['wind_dir', 'wind_speed']) # First, look at the names of variables that the layout is expecting: simple_layout.names() """ Explanation: This sample data has way too many stations to plot all of them. Instead, we just select a few from around the U.S. and pull those out of the data file. End of explanation """ # This is our container for the data data = {} # Copy out to stage everything together. In an ideal world, this would happen on # the data reading side of things, but we're not there yet. data['longitude'] = data_arr['lon'].values data['latitude'] = data_arr['lat'].values data['air_temperature'] = data_arr['air_temperature'].values * units.degC data['dew_point_temperature'] = data_arr['dew_point_temperature'].values * units.degC data['air_pressure_at_sea_level'] = data_arr['slp'].values * units('mbar') """ Explanation: Next grab the simple variables out of the data we have (attaching correct units), and put them into a dictionary that we will hand the plotting function later: End of explanation """ # Get the wind components, converting from m/s to knots as will be appropriate # for the station plot u, v = wind_components(data_arr['wind_speed'].values * units('m/s'), data_arr['wind_dir'].values * units.degree) data['eastward_wind'], data['northward_wind'] = u, v # Convert the fraction value into a code of 0-8, which can be used to pull out # the appropriate symbol data['cloud_coverage'] = (8 * data_arr['cloud_fraction']).fillna(10).values.astype(int) # Map weather strings to WMO codes, which we can use to convert to symbols # Only use the first symbol if there are multiple wx_text = data_arr['weather'].fillna('') data['present_weather'] = [wx_code_map[s.split()[0] if ' ' in s else s] for s in wx_text] """ Explanation: Notice that the names (the keys) in the dictionary are the same as those that the layout is expecting. Now perform a few conversions: Get wind components from speed and direction Convert cloud fraction values to integer codes [0 - 8] Map METAR weather codes to WMO codes for weather symbols End of explanation """ proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=35, standard_parallels=[35]) """ Explanation: All the data wrangling is finished, just need to set up plotting and go: Set up the map projection and set up a cartopy feature for state borders End of explanation """ # Change the DPI of the resulting figure. Higher DPI drastically improves the # look of the text rendering plt.rcParams['savefig.dpi'] = 255 # Create the figure and an axes set to the projection fig = plt.figure(figsize=(20, 10)) add_metpy_logo(fig, 1080, 290, size='large') ax = fig.add_subplot(1, 1, 1, projection=proj) # Add some various map elements to the plot to make it recognizable ax.add_feature(cfeature.LAND) ax.add_feature(cfeature.OCEAN) ax.add_feature(cfeature.LAKES) ax.add_feature(cfeature.COASTLINE) ax.add_feature(cfeature.STATES) ax.add_feature(cfeature.BORDERS, linewidth=2) # Set plot bounds ax.set_extent((-118, -73, 23, 50)) # # Here's the actual station plot # # Start the station plot by specifying the axes to draw on, as well as the # lon/lat of the stations (with transform). We also the fontsize to 12 pt. stationplot = StationPlot(ax, data['longitude'], data['latitude'], transform=ccrs.PlateCarree(), fontsize=12) # The layout knows where everything should go, and things are standardized using # the names of variables. So the layout pulls arrays out of `data` and plots them # using `stationplot`. simple_layout.plot(stationplot, data) plt.show() """ Explanation: The payoff End of explanation """ # Just winds, temps, and dewpoint, with colors. Dewpoint and temp will be plotted # out to Farenheit tenths. Extra data will be ignored custom_layout = StationPlotLayout() custom_layout.add_barb('eastward_wind', 'northward_wind', units='knots') custom_layout.add_value('NW', 'air_temperature', fmt='.1f', units='degF', color='darkred') custom_layout.add_value('SW', 'dew_point_temperature', fmt='.1f', units='degF', color='darkgreen') # Also, we'll add a field that we don't have in our dataset. This will be ignored custom_layout.add_value('E', 'precipitation', fmt='0.2f', units='inch', color='blue') # Create the figure and an axes set to the projection fig = plt.figure(figsize=(20, 10)) add_metpy_logo(fig, 1080, 290, size='large') ax = fig.add_subplot(1, 1, 1, projection=proj) # Add some various map elements to the plot to make it recognizable ax.add_feature(cfeature.LAND) ax.add_feature(cfeature.OCEAN) ax.add_feature(cfeature.LAKES) ax.add_feature(cfeature.COASTLINE) ax.add_feature(cfeature.STATES) ax.add_feature(cfeature.BORDERS, linewidth=2) # Set plot bounds ax.set_extent((-118, -73, 23, 50)) # # Here's the actual station plot # # Start the station plot by specifying the axes to draw on, as well as the # lon/lat of the stations (with transform). We also the fontsize to 12 pt. stationplot = StationPlot(ax, data['longitude'], data['latitude'], transform=ccrs.PlateCarree(), fontsize=12) # The layout knows where everything should go, and things are standardized using # the names of variables. So the layout pulls arrays out of `data` and plots them # using `stationplot`. custom_layout.plot(stationplot, data) plt.show() """ Explanation: or instead, a custom layout can be used: End of explanation """
Scripta-Qumranica-Electronica/Data-Processing
Text_Extraction/Retrieving-text-with-the-SQE-API.ipynb
mit
import sys, json, copy from pprint import pprint try: import requests except ImportError: !conda install --yes --prefix {sys.prefix} requests import requests try: from genson import SchemaBuilder except ImportError: !conda install --yes --prefix {sys.prefix} genson from genson import SchemaBuilder api = "https://api.qumranica.org/v1" """ Explanation: Retrieving text with the SQE API Text retrieval using the SQE API works for both authenticated and unauthenticated requests using a JSON Web Token in the Header of the request. This token is provided in the response to successful login to an activated user account. If protected data is requested without proper authentication, then an access error is returned. This document will describe access to publicly accessible transcriptions, so the issue of authentication is not relevant for this use case. First pull in the dependencies End of explanation """ r = requests.get(f"{api}/editions") editions = r.json()['editions'] for edition in editions[0:5]: ## Let's only sample a couple entries print(json.dumps(edition, indent=2, sort_keys=True, ensure_ascii=False)) """ Explanation: Making requests The SQE API accepts standard HTTP requests to defined endpoints and will always return a JSON object as a response. I highly recommend exploring the API using our interactive online SQE API documentation. You can get a birds eye view of all the endpoints there, read descriptions of those endpoints, the possible inputs, and the outputs including full specifications of all the data objects used in the communication. Finding all available scrolls Try, for instance, downloading a list of scrolls with the GET /editions endpoint. End of explanation """ def editionIdByManuscriptName(name): eid = [] for edition in editions: for version in edition: if name == version['name']: eid.append(version['id']) return eid manuscriptName = '4Q51' selectedEdition = editionIdByManuscriptName(manuscriptName) if len(selectedEdition) > 0: selectedEdition = selectedEdition[0] print(f"The edition id for primary version of {manuscriptName} is {selectedEdition}.") """ Explanation: You can also use the little python function editionIdByManuscriptName here to find a edition_id in the API response by its canonical manuscript name. The function returns a list, since there may be more than one version of the edition; the first version of the edition listed is the parent from which all others were forked. End of explanation """ r = requests.get(f"{api}/editions/{selectedEdition}") edition = r.json() print(json.dumps(edition, indent=2, sort_keys=True, ensure_ascii=False)) """ Explanation: Information about a specific edition The API transaction editions/{editionId} will provide detailed information about the requested edition including its primary version and any derivative versions. End of explanation """ r = requests.get(f"{api}/editions/{selectedEdition}/text-fragments") textFragments = r.json()["textFragments"] for textFragment in textFragments[0:min(len(textFragments), 10)]: ## Let's just look at the first ten pprint(textFragment, indent=2) selectedTextFragment = textFragments[0]["id"] """ Explanation: Information about the transcribed text Text in the SQE database is divided into sections of (presumably) continuous text called "text fragments". The text fragments are composed of lines, the lines are further composed of signs. Each sign can be part of one or more ordering schemes, can have one or more interpretations, and can be linked to one or more words. The GET editions/{editionId}/text-fragments endpoint returns the list of text fragments for an edition, in the editor's suggested order. End of explanation """ r = requests.get(f"{api}/editions/{selectedEdition}/text-fragments/{selectedTextFragment}") text = r.json() builder = SchemaBuilder() builder.add_object(text) print(json.dumps(builder.to_schema(), indent=2, sort_keys=False, ensure_ascii=False)) """ Explanation: Transcriptions There are several different ways to work with transcribed text. After downloading it with the GET editions/{editionId}/text-fragments/{textFragmentId} endpoint, you may want to serialize it into something more human freindly or better suited to your computational analysis. The transcriptions in the database are a DAG, but this call provides ordered arrays along with the necessary information to parse the DAG. The object returned is fairly complex, so I will go through it step by step. The returned object has the following schema, which is explained in detail below. End of explanation """ print(json.dumps(text, indent=2, sort_keys=True, ensure_ascii=False)) """ Explanation: An actual object looks like this. End of explanation """ trimmedTextObject = copy.deepcopy(text) del trimmedTextObject["textFragments"] pprint(text, depth=3) """ Explanation: Structure of the text object The text object contains several top level properties. It contains a lincense with the copyright holder and collaborators automatically generated from the user information in the database. It provides a list of editors (this serves as a key for all the editorId properties at all levels of the text object). And it provides edition name and a unique manuscriptId. End of explanation """ pprint(text["textFragments"][0], depth=3) """ Explanation: Nested objects The textFragments property contains a list of text fragments. In this case we asked for only one, so there is only one entity in the list. Each text fragment entity has a list of lines, which provides the line name, the line id, and a list of signs in the line (the signs have been removed here to make it more readable). End of explanation """ trimmedSigns = text["textFragments"][0]["lines"][0]["signs"] for sign in trimmedSigns[0:10]: pprint(sign) """ Explanation: Lines and Sign interpretation metadata The line contains a list of signs, each of which will contain a list of interpretations and of possible next interpretations. The next interpretation ids can be used to reconstruct all possible reading orders of the signs. The order of signs in the list is the default ordering, which should match the order of the text on the manuscript itself. Each element will have one or more sign interpretaions in the "signInterpretations" property. These entities have an id a "signInterpretation" which may be a character or may be empty if the sign interpretation has to do with formatting (like a space, or start of damage, etc.). The formatting metadata associated with the sign interpretation is in the "attributes" entity. Each attribute has an id, a code, and possible a numerical value. The codes are: attribute_value_id | name | string_value | description --- | --- | --- | --- 1 | sign_type | LETTER | Type of char 2 | sign_type | SPACE | Type of char 3 | sign_type | POSSIBLE_VACAT | Type of char 4 | sign_type | VACAT | Type of char 5 | sign_type | DAMAGE | Type of char 6 | sign_type | BLANK LINE | Type of char 7 | sign_type | PARAGRAPH_MARKER | Type of char 8 | sign_type | LACUNA | Type of char 9 | sign_type | BREAK | Type of char 10 | break_type | LINE_START | Defines a Metasign as marking of line 11 | break_type | LINE_END | Defines a Metasign as marking of line 12 | break_type | COLUMN_START | Defines a Metasign as marking of line 13 | break_type | COLUMN_END | Defines a Metasign as marking of line 14 | break_type | MANUSCRIPT_START | Defines a Metasign as marking of line 15 | break_type | MANUSCRIPT_END | Defines a Metasign as marking of line 17 | might_be_wider | TRUE | Set to true if the width of the sign mght be wider than the given width 18 | readability | INCOMPLETE_BUT_CLEAR | The trad. DJD marking of readability 19 | readability | INCOMPLETE_AND_NOT_CLEAR | The trad. DJD marking of readability 20 | is_reconstructed | TRUE | true if the letter is totally reconstructed (brackets are not part of the sign stream!) 21 | editorial_flag | CONJECTURE | Opinions of the editor like conjecture 22 | editorial_flag | SHOULD_BE_ADDED | Opinions of the editor like conjecture 23 | editorial_flag | SHOULD_BE_DELETED | Opinions of the editor like conjecture 24 | correction | OVERWRITTEN | Correction marks added by a scribe 25 | correction | HORIZONTAL_LINE | Correction marks added by a scribe 26 | correction | DIAGONAL_LEFT_LINE | Correction marks added by a scribe 27 | correction | DIAGONAL_RIGHT_LINE | Correction marks added by a scribe 28 | correction | DOT_BELOW | Correction marks added by a scribe 29 | correction | DOT_ABOVE | Correction marks added by a scribe 30 | correction | LINE_BELOW | Correction marks added by a scribe 31 | correction | LINE_ABOVE | Correction marks added by a scribe 32 | correction | BOXED | Correction marks added by a scribe 33 | correction | ERASED | Correction marks added by a scribe 34 | relative_position | ABOVE_LINE | Position relative to line context 35 | relative_position | BELOW_LINE | Position relative to line context 36 | relative_position | LEFT_MARGIN | Position relative to line context 37 | relative_position | RIGHT_MARGIN | Position relative to line context 38 | relative_position | MARGIN | Position relative to line context 39 | relative_position | UPPER_MARGIN | Position relative to line context 40 | relative_position | LOWER_MARGIN | Position relative to line context End of explanation """ def readFragments(text): formattedString = "" for textFragment in text['textFragments']: formattedString += f"\nText fragment {textFragment['textFragmentName']}:\n" formattedString = readLines(textFragment, formattedString) return formattedString def readLines(textFragment, formattedString): for line in textFragment['lines']: formattedString += f"line {line['lineName']}:\n" formattedString = readSigns(line, formattedString) + "\n" return formattedString def readSigns(line, formattedString): for signs in line['signs']: for signInterpretation in signs['signInterpretations']: attributes = list(map(lambda x: x['attributeValueId'], signInterpretation['attributes'])) ## Get a list of attribute ids if 20 not in attributes: ## let's omit reconstructions (attribute id 20) if 1 in attributes: ## id 1 marks a letter formattedString += signInterpretation['signInterpretation'] elif 2 in attributes: ## id 2 marks a space formattedString += " " return formattedString r = requests.get(f"{api}/editions/{selectedEdition}/text-fragments/{selectedTextFragment + 3}") ## Let's grab a bigger text text = r.json() print(readFragments(text)) """ Explanation: Serializing the data to a string Perhaps the most simple output type for this data would be a string representation. This can be achieved by iterating over the data and building a string representation. In this example we will omit reconstructed text (i.e., text with an attribute having the id 20, see line 18 below). End of explanation """ r = requests.get(f"{api}/editions/{selectedEdition}/text-fragments/{selectedTextFragment + 3}") ## Let's grab a bigger text text = r.json() simplifiedTextObject = {} for textFragment in text['textFragments']: simplifiedTextObject[textFragment["textFragmentName"]] = [] for line in textFragment['lines']: lineObject = {} lineObject[line['lineName']] = [] for sign in line['signs']: for signInterpretation in sign['signInterpretations']: attributes = list(map(lambda x: x['attributeValueId'], signInterpretation['attributes'])) ## Get a list of attribute ids if 20 not in attributes: ## let's omit reconstructions (attribute id 20) if 1 in attributes: ## id 1 marks a letter lineObject[line['lineName']].append(signInterpretation['signInterpretation']) elif 2 in attributes: ## id 2 marks a space lineObject[line['lineName']].append(" ") simplifiedTextObject[textFragment["textFragmentName"]].append(lineObject) pprint(simplifiedTextObject, indent=2) """ Explanation: Serializing the data to a simpler object We can also serialize the data to a more simple data structure for computational purposes. End of explanation """
mattmcd/PyBayes
scripts/edward_simple.ipynb
apache-2.0
# Generative model mu_x = 10.0 sigma_x = 2.0 x_s = edm.Normal(mu_x, sigma_x) # Sample data produced by model n_samples = 100 samples = np.zeros(n_samples) with tf.Session() as sess: for i in range(n_samples): samples[i] = sess.run(x_s) # Descriptive statistics print('Mean: {}'.format(np.mean(samples))) print('StDev: {}'.format(np.std(samples))) # Tear down model and work off observations only tf.reset_default_graph() # Model for data N = 100 theta_mu = tf.Variable(0.0) theta_sigma = tf.Variable(1.0) x = edm.Normal(loc=tf.ones(N)*theta_mu, scale=tf.ones(N)*theta_sigma) x_train = samples[:N] # Descriptive statistics for observed data print('Mean: {}'.format(np.mean(x_train))) print('StDev: {}'.format(np.std(x_train))) """ Explanation: Probabilistic Programming Probabilistic programming involves constructing generative models of data and using inference to determine the parameters of these models. Simple 'hello world' example: assume our data is noisy measurement of a constant value. How can we infer the value and the uncertainty? End of explanation """ mle = edi.MAP({}, {x: x_train}) mle.run() sess = ed.get_session() sess.run([theta_mu, theta_sigma]) """ Explanation: Point estimate of model parameters End of explanation """ tf.reset_default_graph() theta_mu_d = edm.Normal(0.0, 1.0) theta_sigma_d = edm.InverseGamma(0.01, 0.01) x_d = edm.Normal(loc=tf.ones(N)*theta_mu_d, scale=tf.ones(N)*theta_sigma_d) q_mu = edm.Normal(tf.Variable(0.0), 1.0) q_sigma = edm.InverseGamma(tf.nn.softplus(tf.Variable(0.01)), tf.nn.softplus(tf.Variable(0.01))) infer = edi.KLqp({theta_mu_d: q_mu, theta_sigma_d: q_sigma}, {x_d: x_train}) infer.run() sess = ed.get_session() sess.run([q_mu, q_sigma]) _ = plt.hist([sess.run(q_mu) for _ in range(10000)], bins=20) _ = plt.hist([sess.run(q_sigma) for _ in range(10000)], bins=20) """ Explanation: Posterior estimate of model parameters End of explanation """
pysg/caiq
CAIQ.ipynb
mit
pyplot.scatter(VolumenLiqVAP,PresionVAP, color = 'red', label = 'Líquido') pyplot.scatter(VolumenVapVAP,PresionVAP, color = 'blue', label = 'Vapor') pyplot.title('Diagrama Densidad-Presión') pyplot.legend(loc="upper right") pyplot.xlabel('Densidad [=] -') pyplot.ylabel('Presión [=] bar') """ Explanation: PyTher: UNA LIBRERÍA Python DE CÓDIGO ABIERTO PARA MODELADO TERMODINÁMICO Andrés Salazar*, Martín Cismondi Instituto de Investigación y Desarrollo de Ingeniería de Procesos y Química Aplicada (Universidad Nacional de Córdoba - CONICET) Av. Haya de la Torre s/n, Córdoba - Argentina E-mail: (andres.pyther@gmail.com) Software Abierto Evitar muchas versiones de código con unos pocos cambios que dificultan su revisión por parte del mundo y hasta nosotros mismos Git: Un controlador de versiones para organizar nuestro código y mostrarlo al mundo El flujo de trabajo en Git es simple. Código en nuestro computador que se "sube" a un repositorio en Internet, para que el mundo (y nosotros mismo) lo puedan "bajar", ver, copiar, distribuir libremente Termodinámica Comportamiento del equilibrio de fases con ecuaciones cúbicas de estado PyTher A never-ending search for the truth: Thermodynamics in the uncertain era of the internet Crecimiento exponencial de los datos termofísicos reportados en la lietratura especializada. Difícil de revisar, procesar y obtener valor de los datos. MichaelFrenkel, 2015 Pyther es una librería desarrollada principalmente en Python, Fortran y C, para ser utilizada en el entorno interactivo Jupyter, enfocada en la obtención, procesamiento y visualización de la creciente cantidad de datos relevantes para el analisis de problemas en termodinámica del equilibrio de fases, siguiendo la filosofía open source y open science. Arquitectura de PyTher Capacidades en desarrollo Implementaciones Obtención, procesamiento y visualización de datos Parámetros y ecuaciones de estado (SRK, PR y RKPR) Correlaciones para propiedades termofísicas de sustancias puras Digramas de equilibrio de fases de sustancia puras PyTher utiliza la base de datos DIPPR-2003 para calcular 13 propiedades termofísicas de sustancia pura: Densidad del sólido y líquido Presión de vapor y calor de vaporización Capacidad calorífica del sólido, líquido y gas ideal Segundo coeficiente virial Viscosidad de líquido y vapor Conductividad térmica del líquido y del vapor Tensión superficial trabajo futuro... Digrama para sustancia pura End of explanation """ import numpy as np import pandas as pd import pyther as pt import matplotlib.pyplot as plt %matplotlib inline """ Explanation: Pero, los más interesante es el procesamiento de datos, por ejemplo... End of explanation """ thermodynamic_correlations = pt.Thermodynamic_correlations() components = ["METHANE", "n-TETRACOSANE", "n-PENTACOSANE", "ETHANE", "ISOBUTANE", "PROPANE", "3-METHYLHEPTANE"] property_thermodynamics = "Vapour_Pressure" temperature = [180.4, 181.4, 185.3, 210, 800] Vapour_Pressure = thermodynamic_correlations.property_cal(components, property_thermodynamics, temperature) print("Vapour Pressure = {0}". format(Vapour_Pressure)) """ Explanation: Especificar multiples sustancias puras y varias condiciones Es común que se requiera analizar multiples sustancias de forma simultanea con la especificación de varias condiciones de un modelo (por ejemplo valores de temperaturas), de tal forma que se obtendran una gran cantidad de valores (por ejemplo una propiedad termodinámica) que conforman un set de datos (en principio desestructurados), que en general demanda mucho tiempo en revisar y (limpiar datos) para encontrar resultados que no interesan o son invalidos (por ejemplo predicciones fuera del rango de aplicación de un modelos termodinámico) Veamos un caso típico... Se obtiene una cierta cantidad de datos desestructurados y sin formato que dificulta diferenciar los resultados "buenos de los malos" End of explanation """ temp_enter = thermodynamic_correlations.temperature_enter thermodynamic_correlations.data_temperature(components, temperature, Vapour_Pressure, temp_enter) """ Explanation: Al incrementar el número de sustancias, de ensayos, de experimentos, etc... Se comienza a complicar el procesamiento de los datos, por tanto, conviene integrar PyTher con librerías especializadas para el procesamiento de datos como Pandas para obtener resultados más eficientes. Función = data_temperature(components, temperature, Vapour_Pressure, temp_enter) Por ejemplo, la función data_temperature() organiza los resultados obtenidos (Vapour_Pressure), diferenciando las temperaturas fuera del intervalo de aplicación de la correlación termodinámica (NaN), de forma estructurada y poder exportarlo en un formato conveniente: (csv, txt, hoja de cálculo, etc). End of explanation """ from IPython.display import IFrame documentation_pyther = "http://pyther.readthedocs.io/es/latest/index.html" IFrame(documentation_pyther, width=950, height=350) """ Explanation: Documentación PyTher cuenta con documentación abierta (incluyendo errores cometidos, porque open science también es publicar los muchos desaciertos que allanaron el camino de los pocos aciertos) y disponible en Internet con una revisión y construcción continua... End of explanation """
besser82/shogun
doc/ipython-notebooks/structure/multilabel_structured_prediction.ipynb
bsd-3-clause
import os SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data') from __future__ import print_function try: from sklearn.datasets import make_classification except ImportError: import pip pip.main(['install', '--user', 'scikit-learn']) from sklearn.datasets import make_classification import numpy as np X, Y = make_classification(n_samples=1000, n_features=2, n_informative=2, n_redundant=0, n_clusters_per_class=2) # adding some static offset to the data X = X + 1 """ Explanation: Multi-Label Classification with Shogun Machine Learning Toolbox Abinash Panda (github: abinashpanda) Thanks Thoralf Klein for taking time to help me on this project! ;) This notebook presents training of multi-label classification using structured SVM presented in shogun. We would be using MultilabelModel for multi-label classfication. We begin with brief introduction to Multi-Label Structured Prediction [1] followed by corresponding API in Shogun. Then we are going to implement a toy example (for illustration) before getting to the real one. Finally, we evaluate the multi-label classification on well-known datasets [2]. We showed that SHOGUNs [3] implementation delivers same accuracy as scikit-learn and same or better training time. Introduction Multi-Label Structured Prediction Multi-Label Structured Prediction combines the aspects of multi-label prediction and structured output. Structured prediction typically involves an input $\mathbf{x}$ (can be structured) and a structured output $\mathbf{y}$. Given a training set ${(x^i, y^i)}_{i=1,...,n} \subset \mathcal{X} \times \mathbb{P}(\mathcal{Y})$ where $\mathcal{Y}$ is a structured output set of potentially very large size (in this case $\mathcal{Y} = {y_1, y_2, ...., y_q}$ where $q$ is total number of possible classes). A joint feature map $\psi(x, y)$ is defined to incorporate structure information into the labels. The joint feature map $\psi(x, y)$ for MultilabelModel is defined as $\psi(x, y) \rightarrow x \otimes y$ where $\otimes$ is the tensor product. We formulate the prediction as: $h(x) = {y \in \mathcal{Y} : f(x, y) > 0}$ The compatibility function, $f(x, y)$, acts on individual inputs and outputs, as in single-label prediction, but the prediction step consists of collecting all outputs of positive scores instead of finding the outputs of maximal score. Multi-Label Models In this notebook, we are going to compare the performance of two multi-label models: * MultilabelModel model : with constant entry term $0$ in joint feature vector to not model bias term. * MultilabelModel model_with_bias : with constant entry $1$ in the joint feature vector to model bias term. The joint feature vector are: * model$\leftrightarrow \psi(x, y) = [x || 0] \otimes y$. * model_with_bias$\leftrightarrow \psi(x, y) = [x || 1] \otimes y$. For comparision of the two models, we are going to perform on the datasets with binary labels. Experiment 1 : Binary Label Data Generation of some synthetic data First of all we create some synthetic data for our toy example. We add some static offset to the data to compare the models with/without threshold. End of explanation """ from shogun import features, MultilabelSOLabels, MultilabelModel def create_features(X, constant): feats = features( np.c_[X, constant * np.ones(X.shape[0])].T) return feats from shogun import MultilabelSOLabels def create_labels(Y, n_classes): try: n_samples = Y.shape[0] except AttributeError: n_samples = len(Y) labels = MultilabelSOLabels(n_samples, n_classes) for i, sparse_label in enumerate(Y): try: sparse_label = sorted(sparse_label) except TypeError: sparse_label = [sparse_label] labels.set_sparse_label(i, np.array(sparse_label, dtype=np.int32)) return labels def split_data(X, Y, ratio): num_samples = X.shape[0] train_samples = int(ratio * num_samples) return (X[:train_samples], Y[:train_samples], X[train_samples:], Y[train_samples:]) X_train, Y_train, X_test, Y_test = split_data(X, Y, 0.9) feats_0 = create_features(X_train, 0) feats_1 = create_features(X_train, 1) labels = create_labels(Y_train, 2) model = MultilabelModel(feats_0, labels) model_with_bias = MultilabelModel(feats_1, labels) """ Explanation: Preparation of data and model To create a multi-label model in shogun, we'll first create an instance of MultilabelModel and initialize it by the features and labels. The labels should be MultilabelSOLables. It should be initialized by providing with the n_labels (number of examples) and n_classes (total number of classes) and then individually adding a label using set_sparse_label() method. End of explanation """ from shogun import StochasticSOSVM, DualLibQPBMSOSVM, StructuredAccuracy from time import time sgd = StochasticSOSVM(model, labels) sgd_with_bias = StochasticSOSVM(model_with_bias, labels) start = time() sgd.train() print(">>> Time taken for SGD *without* threshold tuning = %f" % (time() - start)) start = time() sgd_with_bias.train() print(">>> Time taken for SGD *with* threshold tuning = %f" % (time() - start)) """ Explanation: Training and Evaluation of Structured Machines with/without Threshold In Shogun, several solvers and online solvers have been implemented for SO-Learning. Let's try to train the model using an online solver StochasticSOSVM. End of explanation """ def evaluate_machine(machine, X_test, Y_test, n_classes, bias): if bias: feats_test = create_features(X_test, 1) else: feats_test = create_features(X_test, 0) test_labels = create_labels(Y_test, n_classes) out_labels = machine.apply(feats_test) evaluator = StructuredAccuracy() jaccard_similarity_score = evaluator.evaluate(out_labels, test_labels) return jaccard_similarity_score print(">>> Accuracy of SGD *without* threshold tuning = %f " % evaluate_machine(sgd, X_test, Y_test, 2, False)) print(">>> Accuracy of SGD *with* threshold tuning = %f " %evaluate_machine(sgd_with_bias, X_test, Y_test, 2, True)) """ Explanation: Accuracy For measuring accuracy in multi-label classification, Jaccard Similarity Coefficients $\big(J(A, B) = \frac{|A \cap B|}{|A \cup B|}\big)$ is used : $Accuracy = \frac{1}{p}\sum_{i=1}^{p}\frac{ |Y_i \cap h(x_i)|}{|Y_i \cup h(x_i)|}$ This is available in MultilabelAccuracy for MultilabelLabels and StructuredAccuracy for MultilabelSOLabels. End of explanation """ import matplotlib.pyplot as plt %matplotlib inline def get_parameters(weights): return -weights[0]/weights[1], -weights[2]/weights[1] def scatter_plot(X, y): zeros_class = np.where(y == 0) ones_class = np.where(y == 1) plt.scatter(X[zeros_class, 0], X[zeros_class, 1], c='b', label="Negative Class") plt.scatter(X[ones_class, 0], X[ones_class, 1], c='r', label="Positive Class") def plot_hyperplane(machine_0, machine_1, label_0, label_1, title, X, y): scatter_plot(X, y) x_min, x_max = np.min(X[:, 0]) - 0.5, np.max(X[:, 0]) + 0.5 y_min, y_max = np.min(X[:, 1]) - 0.5, np.max(X[:, 1]) + 0.5 xx = np.linspace(x_min, x_max, 1000) m_0, c_0 = get_parameters(machine_0.get_w()) m_1, c_1 = get_parameters(machine_1.get_w()) yy_0 = m_0 * xx + c_0 yy_1 = m_1 * xx + c_1 plt.plot(xx, yy_0, "k--", label=label_0) plt.plot(xx, yy_1, "g-", label=label_1) plt.xlim((x_min, x_max)) plt.ylim((y_min, y_max)) plt.grid() plt.legend(loc="best") plt.title(title) plt.show() fig = plt.figure(figsize=(10, 10)) plot_hyperplane(sgd, sgd_with_bias, "Boundary for machine *without* bias for class 0", "Boundary for machine *with* bias for class 0", "Binary Classification using SO-SVM with/without threshold tuning", X, Y) """ Explanation: Plotting the Data along with the Boundary End of explanation """ from shogun import SparseMultilabel_obtain_from_generic def plot_decision_plane(machine, title, X, y, bias): plt.figure(figsize=(24, 8)) plt.suptitle(title) plt.subplot(1, 2, 1) x_min, x_max = np.min(X[:, 0]) - 0.5, np.max(X[:, 0]) + 0.5 y_min, y_max = np.min(X[:, 1]) - 0.5, np.max(X[:, 1]) + 0.5 xx = np.linspace(x_min, x_max, 200) yy = np.linspace(y_min, y_max, 200) x_mesh, y_mesh = np.meshgrid(xx, yy) if bias: feats = create_features(np.c_[x_mesh.ravel(), y_mesh.ravel()], 1) else: feats = create_features(np.c_[x_mesh.ravel(), y_mesh.ravel()], 0) out_labels = machine.apply(feats) z = [] for i in range(out_labels.get_num_labels()): label = SparseMultilabel_obtain_from_generic(out_labels.get_label(i)).get_data() if label.shape[0] == 1: # predicted a single label z.append(label[0]) elif label.shape[0] == 2: # predicted both the classes z.append(2) elif label.shape[0] == 0: # predicted none of the class z.append(3) z = np.array(z) z = z.reshape(x_mesh.shape) c = plt.pcolor(x_mesh, y_mesh, z, cmap=plt.cm.gist_heat) scatter_plot(X, y) plt.xlim((x_min, x_max)) plt.ylim((y_min, y_max)) plt.colorbar(c) plt.title("Decision Surface") plt.legend(loc="best") plt.subplot(1, 2, 2) weights = machine.get_w() m_0, c_0 = get_parameters(weights[:3]) m_1, c_1 = get_parameters(weights[3:]) yy_0 = m_0 * xx + c_0 yy_1 = m_1 * xx + c_1 plt.plot(xx, yy_0, "r--", label="Boundary for class 0") plt.plot(xx, yy_1, "g-", label="Boundary for class 1") plt.title("Hyper planes for different classes") plt.legend(loc="best") plt.xlim((x_min, x_max)) plt.ylim((y_min, y_max)) plt.show() plot_decision_plane(sgd,"Model *without* Threshold Tuning", X, Y, False) plot_decision_plane(sgd_with_bias,"Model *with* Threshold Tuning", X, Y, True) """ Explanation: As we can see from the above plot that sgd_with_bias can produce better classification boundary. The model without threshold tuning is crossing origin of space, while the one with threshold tuning is crossing $(1,1)$ (the constant we have added earlier). End of explanation """ def load_data(file_name): input_file = open(file_name) lines = input_file.readlines() n_samples = len(lines) n_features = len(lines[0].split()) - 1 Y = [] X = [] for line in lines: data = line.split() Y.append(map(int, data[0].split(","))) feats = [] for feat in data[1:]: feats.append(float(feat.split(":")[1])) X.append(feats) X = np.array(X) n_classes = max(max(label) for label in Y) + 1 return X, Y, n_samples, n_features, n_classes """ Explanation: As we can see from the above plots of decision surface, the black region corresponds to the region of negative (label = $0$) class, where as the red region corresponds to the positive (label = $1$). But along with that there are some regions (although very small) of white surface and orange surface. The white surface corresponds to the region not classified to any label, whereas the orange region correspond to the region classified to both the labels. The reason for existence of these type of surface is that the above boundaries for both the class don't overlap exactly with each other (illustrated above). So, there are some regions for which both the compatibility function $f(x, 0) > 0$ as well as $f(x, 1) > 0$ (predicted both the labels) and there are some regions where both the compatibility function $f(x, 0) < 0$ and $f(x, 1) < 0$ (predicted none of the labels). Experiment 2 : Multi-Label Data Loading of data from LibSVM File End of explanation """ def test_multilabel_data(train_file, test_file): X_train, Y_train, n_samples, n_features, n_classes = load_data(train_file) X_test, Y_test, n_samples, n_features, n_classes = load_data(test_file) # create features and labels multilabel_feats_0 = create_features(X_train, 0) multilabel_feats_1 = create_features(X_train, 1) multilabel_labels = create_labels(Y_train, n_classes) # create multi-label model multilabel_model = MultilabelModel(multilabel_feats_0, multilabel_labels) multilabel_model_with_bias = MultilabelModel(multilabel_feats_1, multilabel_labels) # initializing machines for SO-learning multilabel_sgd = StochasticSOSVM(multilabel_model, multilabel_labels) multilabel_sgd_with_bias = StochasticSOSVM(multilabel_model_with_bias, multilabel_labels) start = time() multilabel_sgd.train() t1 = time() - start multilabel_sgd_with_bias.train() t2 = time() - start - t1 return (evaluate_machine(multilabel_sgd, X_test, Y_test, n_classes, False), t1, evaluate_machine(multilabel_sgd_with_bias, X_test, Y_test, n_classes, True), t2) """ Explanation: Training and Evaluation of Structured Machines with/without Threshold End of explanation """ from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC from sklearn.metrics import jaccard_similarity_score from sklearn.preprocessing import LabelBinarizer def sklearn_implementation(train_file, test_file): label_binarizer = LabelBinarizer() X_train, Y_train, n_samples, n_features, n_classes = load_data(train_file) X_test, Y_test, n_samples, n_features, n_classes = load_data(test_file) clf = OneVsRestClassifier(SVC(kernel='linear')) start = time() clf.fit(X_train, label_binarizer.fit_transform(Y_train)) t1 = time() - start return (jaccard_similarity_score(label_binarizer.fit_transform(Y_test), clf.predict(X_test)), t1) def print_table(train_file, test_file, caption): acc_0, t1, acc_1, t2 = test_multilabel_data(train_file, test_file) sk_acc, sk_t1 = sklearn_implementation(train_file, test_file) result = ''' \t\t%s Machine\t\t\t\tAccuracy\tTrain-time\n SGD *without* threshold tuning \t%f \t%f SGD *with* threshold tuning \t%f \t%f scikit-learn's implementation \t%f \t%f ''' % (caption, acc_0, t1, acc_1, t2, sk_acc, sk_t1) print(result) """ Explanation: Comparision with scikit-learn's implementation End of explanation """ print_table(os.path.join(SHOGUN_DATA_DIR, "multilabel/yeast_train.svm"), os.path.join(SHOGUN_DATA_DIR, "multilabel/yeast_test.svm"), "Yeast dataset") """ Explanation: Yeast Multi-Label Data [2] End of explanation """ print_table(os.path.join(SHOGUN_DATA_DIR, "multilabel/scene_train"), os.path.join(SHOGUN_DATA_DIR, "multilabel/scene_test"), "Scene dataset") """ Explanation: Scene Multi-Label Data [2] End of explanation """
BinRoot/TensorFlow-Book
ch02_basics/Concept01_defining_tensors.ipynb
mit
import tensorflow as tf import numpy as np """ Explanation: Ch 02: Concept 01 Defining tensors Import TensorFlow and Numpy: End of explanation """ m1 = [[1.0, 2.0], [3.0, 4.0]] m2 = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) m3 = tf.constant([[1.0, 2.0], [3.0, 4.0]]) """ Explanation: Now, define a 2x2 matrix in different ways: End of explanation """ print(type(m1)) print(type(m2)) print(type(m3)) """ Explanation: Let's see what happens when we print them: End of explanation """ t1 = tf.convert_to_tensor(m1, dtype=tf.float32) t2 = tf.convert_to_tensor(m2, dtype=tf.float32) t3 = tf.convert_to_tensor(m3, dtype=tf.float32) """ Explanation: So, that's what we're dealing with. Interesting. By the way, there's a function called convert_to_tensor(...) that does exactly what you might expect. Let's use it to create tensor objects out of various types: End of explanation """ print(type(t1)) print(type(t2)) print(type(t3)) """ Explanation: Ok, ok! Time for the reveal: End of explanation """