repo_name stringlengths 6 77 | path stringlengths 8 215 | license stringclasses 15
values | content stringlengths 335 154k |
|---|---|---|---|
mne-tools/mne-tools.github.io | 0.14/_downloads/plot_stockwell.ipynb | bsd-3-clause | # Authors: Denis A. Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.time_frequency import tfr_stockwell
from mne.datasets import somato
print(__doc__)
"""
Explanation: Time frequency with Stockwell transform in sensor space
This script shows how to compute induced power and intertrial coherence
using the Stockwell transform, a.k.a. S-Transform.
End of explanation
"""
data_path = somato.data_path()
raw_fname = data_path + '/MEG/somato/sef_raw_sss.fif'
event_id, tmin, tmax = 1, -1., 3.
# Setup for reading the raw data
raw = io.Raw(raw_fname)
baseline = (None, 0)
events = mne.find_events(raw, stim_channel='STI 014')
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
"""
Explanation: Set parameters
End of explanation
"""
epochs = epochs.pick_channels([epochs.ch_names[82]]) # reduce computation
power, itc = tfr_stockwell(epochs, fmin=6., fmax=30., decim=4, n_jobs=1,
width=.3, return_itc=True)
power.plot([0], baseline=(-0.5, 0), mode=None, title='S-transform (power)')
itc.plot([0], baseline=None, mode=None, title='S-transform (ITC)')
"""
Explanation: Calculate power and intertrial coherence
End of explanation
"""
|
dalonlobo/GL-Mini-Projects | TweetAnalysis/Final/Q4/Dalon_4_RTD_MiniPro_Tweepy_Q4_3.ipynb | mit | import logging # python logging module
# basic format for logging
logFormat = "%(asctime)s - [%(levelname)s] (%(funcName)s:%(lineno)d) %(message)s"
# logs will be stored in tweepy.log
logging.basicConfig(filename='tweepyloc.log', level=logging.INFO,
format=logFormat, datefmt="%Y-%m-%d %H:%M:%S")
"""
Explanation: Tweepy streamer
## Geo based analysis:
- Country based tweets count.
- Top countries in terms of Tweets Volume.
- List of top 15 countries in terms of tweet counts.
Since this is streaming application, we will use python logging module to log. Further read.
End of explanation
"""
import tweepy # importing all the modules required
import socket # will be used to create sockets
import json # manipulate json
from httplib import IncompleteRead
# Keep these tokens secret, as anyone can have full access to your
# twitter account, using these tokens
consumerKey = "#"
consumerSecret = "#"
accessToken = "#"
accessTokenSecret = "#"
"""
Explanation: Authentication and Authorisation
Create an app in twitter here. Copy the necessary keys and access tokens, which will be used here in our code.
The authorization is done using Oauth, An open protocol to allow secure authorization in a simple and standard method from web, mobile and desktop applications. Further read.
We will use Tweepy a python module. Tweepy is open-sourced, hosted on GitHub and enables Python to communicate with Twitter platform and use its API. Tweepy supports oauth authentication. Authentication is handled by the tweepy.AuthHandler class.
End of explanation
"""
# Performing the authentication and authorization, post this step
# we will have full access to twitter api's
def connectToTwitter():
"""Connect to twitter."""
try:
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
logging.info("Successfully logged in to twitter.")
return api, auth
except Exception as e:
logging.info("Something went wrong in oauth, please check your tokens.")
logging.error(e)
"""
Explanation: Post this step, we will have full access to twitter api's
End of explanation
"""
# Tweet listner class which subclasses from tweepy.StreamListener
class TweetListner(tweepy.StreamListener):
"""Twitter stream listner"""
def __init__(self, csocket):
self.clientSocket = csocket
def dataProcessing(self, data):
"""Process the data, before sending to spark streaming
"""
try:
sendData = {} # data that is sent to spark streamer
coordinates = data["coordinates"]
lat = coordinates["coordinates"][0]
lon = coordinates["coordinates"][0]
sendData["lat"] = lat
sendData["lon"] = lon
print(sendData)
#data_string = "{}:{}".format(name, followersCount)
self.clientSocket.send(json.dumps(sendData) + u"\n") # append new line character, so that spark recognizes it
logging.debug(json.dumps(sendData))
except:
pass
def on_data(self, raw_data):
""" Called when raw data is received from connection.
return False to stop stream and close connection.
"""
try:
data = json.loads(raw_data)
self.dataProcessing(data)
#self.clientSocket.send(json.dumps(sendData) + u"\n") # Because the connection was breaking
return True
except Exception as e:
logging.error("An unhandled exception has occured, check your data processing")
logging.error(e)
raise e
def on_error(self, status_code):
"""Called when a non-200 status code is returned"""
logging.error("A non-200 status code is returned: {}".format(status_code))
return True
# Creating a proxy socket
def createProxySocket(host, port):
""" Returns a socket which can be used to connect
to spark.
"""
try:
s = socket.socket() # initialize socket instance
s.bind((host, port)) # bind to the given host and port
s.listen(5) # Enable a server to accept connections.
logging.info("Listening on the port {}".format(port))
cSocket, address = s.accept() # waiting for a connection
logging.info("Received Request from: {}".format(address))
return cSocket
except socket.error as e:
if e.errno == socket.errno.EADDRINUSE: # Address in use
logging.error("The given host:port {}:{} is already in use"\
.format(host, port))
logging.info("Trying on port: {}".format(port + 1))
return createProxySocket(host, port + 1)
"""
Explanation: Streaming with tweepy
The Twitter streaming API is used to download twitter messages in real time. We use streaming api instead of rest api because, the REST api is used to pull data from twitter but the streaming api pushes messages to a persistent session. This allows the streaming api to download more data in real time than could be done using the REST API.
In Tweepy, an instance of tweepy.Stream establishes a streaming session and routes messages to StreamListener instance. The on_data method of a stream listener receives all messages and calls functions according to the message type.
But the on_data method is only a stub, so we need to implement the functionality by subclassing StreamListener.
Using the streaming api has three steps.
Create a class inheriting from StreamListener
Using that class create a Stream object
Connect to the Twitter API using the Stream.
End of explanation
"""
# Get the list of trending topics from twitter
def getTrendingTopics(api, woeid):
"""Get the top trending topics from twitter"""
data = api.trends_place(woeid)
listOfTrendingTopic = [trend["name"] for trend in data[0]["trends"]]
return listOfTrendingTopic
if __name__ == "__main__":
try:
api, auth = connectToTwitter() # connecting to twitter
# Global information is available by using 1 as the WOEID
# woeid = getWOEIDForTrendsAvailable(api, "Worldwide") # get the woeid of the worldwide
woeid = 1
trendingTopics = getTrendingTopics(api, woeid)[:10] # Pick only top 10 trending topics
host = "localhost"
port = 8400
cSocket = createProxySocket(host, port) # Creating a socket
while True:
try:
# Connect/reconnect the stream
tweetStream = tweepy.Stream(auth, TweetListner(cSocket)) # Stream the twitter data
# DON'T run this approach async or you'll just create a ton of streams!
tweetStream.filter(track=trendingTopics) # Filter on trending topics
except IncompleteRead:
# Oh well, reconnect and keep trucking
continue
except KeyboardInterrupt:
# Or however you want to exit this loop
tweetStream.disconnect()
break
except Exception as e:
logging.error("Unhandled exception has occured")
logging.error(e)
continue
except KeyboardInterrupt: # Keyboard interrupt called
logging.error("KeyboardInterrupt was hit")
except Exception as e:
logging.error("Unhandled exception has occured")
logging.error(e)
"""
Explanation: Drawbacks of twitter streaming API
The major drawback of the Streaming API is that Twitter’s Steaming API provides only a sample of tweets that are occurring. The actual percentage of total tweets users receive with Twitter’s Streaming API varies heavily based on the criteria users request and the current traffic. Studies have estimated that using Twitter’s Streaming API users can expect to receive anywhere from 1% of the tweets to over 40% of tweets in near real-time. The reason that you do not receive all of the tweets from the Twitter Streaming API is simply because Twitter doesn’t have the current infrastructure to support it, and they don’t want to; hence, the Twitter Firehose. Ref
So we will use a hack i.e. get the top trending topics and use that to filter data.
Problem with retweet count
Maybe you're looking in the wrong place for the value.
The Streaming API is in real time. When tweets are created and streamed, their retweet_count is always zero.
The only time you'll see a non-zero retweet_count in the Streaming API is for when you're streamed a tweet that represents a retweet. Those tweets have a child node called "retweeted_status" that contains the original tweet that was retweeted embedded within it. The retweet_count value attached to that node represents, roughly, the number of times that original tweet has been retweeted as of some time near when you were streamed the tweet.
Retweets themselves are currently not retweetable, so should not have a non-zero retweet_count.
Source: here
This is quite normal as it is expected when you are using streaming api endpoint, its because you receive the tweets as they are posted live on twitter platform, by the time you receive the tweet no other user had a chance to retweet it so retweet_count will always be 0. If you want to find out the retweet_count you have to refetch this particular tweet some time later using the rest api then you can see the retweet_count will contain the number of retweets happened till this particular point in time.
Source: here
End of explanation
"""
|
mathinmse/mathinmse.github.io | Lecture-18-Implicit-Finite-Difference.ipynb | mit | import sympy as sp
sp.init_session(quiet=True)
var('U_LHS U_RHS')
"""
Explanation: Lecture 18: Numerical Solutions to the Diffusion Equation
(Implicit Methods)
Sections
Introduction
Learning Goals
On Your Own
In Class
Revisiting the Discrete Version of Fick's Law
A Linear System for Diffusion
An Implicit Numerical Solution
Deconstruction of the Solution Scheme
Homework
Summary
Looking Ahead
Reading Assignments and Practice
Possible future improvement: Crank-Nicholson.
Introduction
This lecture introduces the implicit scheme for solving the diffusion equation. Examine the descritization for our explicit scheme that we covered in the previous lecture:
$$
\frac{u_{i,\, j+1} - u_{i,\, j}}{\Delta t} = D \frac{u_{i - 1,\, j} - 2 u_{i,\, j} + u_{i + 1,\, j}}{\Delta x^2}
$$
This expression uses a forward difference in time where we are subtracting the value of our dependent variable at time index $j$ from the value of our dependent variable at time-index $j+1$. If, instead, we perform a backward difference (replace $j$ with $j-1$) we will be subtracting our dependent variable at $j-1$ from the value at the index $j$. For example:
$$
\frac{u_{i,\, j} - u_{i,\, j-1}}{\Delta t} = D \frac{u_{i - 1,\, j} - 2 u_{i,\, j} + u_{i + 1,\, j}}{\Delta x^2}
$$
Attempting to repeat our previous algebraic manipulations we find that the solution to this equation is in terms of three unknown quantities at the index $j$. These quantities depend on indices $i-1$, $i$ and $i+1$ and our solution is only known to the index $j-1$. This seems like a complication that cannot be resolved however, examination of all the resulting equations in our grid will revel that this is a linear system that can be solved with the inclusion of the boundary conditions.
The point of this lecture is to develop your understanding for how the use of matrices and linear algebra can be used to solve this problem. The system of equations and the method for solving the equations is known as an "implicit method". There is a good discussion in Numerical Recipes by Teukolsky, et al. to provide a foundation for these methods.
Top of Page
Learning Goals
Re-develop the descretizaton of Fick's law such that the solution scheme is implicit
Write the method as a linear system
Incorporate boundary conditions
Develop a solution strategy using linear algebra and Numpy or SciPy as appropriate.
Top of Page
On Your Own
Suggestions for improvement of this section:
Develop numpy methods for linear algebra (e.g. creating matrices efficiently)
Matrix operations relevant to LA.
Solve a simple linear system.
Top of Page
In Class
Re-derive the discrete form of Fick's law.
Examine the structure of the resulting matrix.
Write a solver.
Revisiting the Discrete Version of Fick's Law
We start with a re-statement of Fick's second law in finite difference form that uses a FORWARD difference in time:
$$
\frac{u_{i,\, j+1} - u_{i,\, j}}{\Delta t} = D \frac{u_{i - 1,\, j} - 2 u_{i,\, j} + u_{i + 1,\, j}}{\Delta x^2}
$$
This choice of time differencing led to the EXPLICIT scheme. This time, we choose a BACKWARD difference in time as follows:
$$
\frac{u_{i,\, j} - u_{i,\, j-1}}{\Delta t} = D \frac{u_{i - 1,\, j} - 2 u_{i,\, j} + u_{i + 1,\, j}}{\Delta x^2}
$$
This choice leads to a set of linear equations. To illustrate how this develops we will write the equation above for a grid of eight points that represent the quantity of diffusing substance. See the next figure.
In the above figure we represent a grid of two dimensions - this grid is identical to the explicit finite difference grid. The main difference between the explicit and implicit method is the way we fill the grid to arrive at our solution. In the spatial dimension we have 8 columns (the "$i$" index) and in the time dimension we show only three rows (the "$j$" index). The sizes of the grid in the two dimensions are arbitrary. Keep the following in mind:
The solution is known to the $j-1$ index.
The unknowns are the $j$ indcies.
Algebraiclly rearranging this differencing scheme, we can write down:
$$
u_{i,\, j-1} = \frac{\Delta t D}{\Delta x^2} \left( - u_{i - 1,\, j} + 2 u_{i,\, j} - u_{i + 1,\, j} \right) + u_{i,\, j}
$$
one additional re-arrangment (substitute $\beta$ for the factor containing the diffusion coefficient) and we get:
$$
- \beta u_{i - 1,\, j} + (1 + 2 \beta) u_{i,\, j} - \beta u_{i + 1,\, j} = u_{i,\, j-1}
$$
We include "ghost cells" in grey above to enforce the boundary conditions. We can use fixed value (setting the ghost cells to a particular number) or fixed flux (setting the value of the ghost cell based on a pair of interior cells) to produce a linear system with an equal number of unknowns and equations.
A Linear System for Diffusion
We begin as usual by importing SymPy into the namespace and using init_session to define some helpful symbols. We also define a pair of symbols $U_{LHS}$ and $U_{RHS}$ that will be used to define values in the ghost cells.
End of explanation
"""
var('dt dx beta u1:7 b1:7')
"""
Explanation: We define the symbols we want to use in our linear system. For this demonstration, I don't add the time index but I keep my subscripts consistent with the figure above.
End of explanation
"""
hpad = ones(0, 1); vpad = ones(1, 0)
mainDiag = 2*beta+1; offDiag = -beta
M = (sp.diag(vpad, offDiag, offDiag, offDiag, offDiag, offDiag, hpad)+ \
sp.diag(hpad, offDiag, offDiag, offDiag, offDiag, offDiag, vpad)+ \
sp.diag(mainDiag,mainDiag,mainDiag,mainDiag,mainDiag,mainDiag))
M
"""
Explanation: In this cell we create the square matrix holding the coefficients that multiply the unknown quantities. Note the structure of the matrix. It is a tridiagonal matrix. The function in NumPy is very compact, in SymPy not so much. So I apologize for the syntax in the SymPy/Python code below, but the more compact version can be difficult to read:
End of explanation
"""
xmatrix = sp.Matrix([u1,u2,u3,u4,u5,u6])
xmatrix
"""
Explanation: Here is our vector of unknown quantities. We know the solution to the $j-1$ time step. All of these symbols represent the value of our field (e.g. concentration, temperature, etc.) at the $j$'th time step.
End of explanation
"""
M*xmatrix
"""
Explanation: If we've got everything correct, this matrix product will reproduce the discrete diffusion equation outlined above. You'll note that the boundary equations are not formed correctly. For reference, here is the discrete form:
$$
- \beta u_{i - 1,\, j} + (1 + 2 \beta) u_{i,\, j} - \beta u_{i + 1,\, j} = u_{i,\, j-1}
$$
End of explanation
"""
bmatrix = sp.Matrix([(b1+beta*U_LHS),b2,b3,b4,b5,(b6+beta*U_RHS)])
bmatrix
"""
Explanation: It should start to become clear that we can write this linear system (of a tridiagonal matrix and a column vector of unknowns) as a matrix equation:
$$
M \cdot \overline{x} = \overline{b}
$$
Where M is the square matrix, x is the vector of unknown quantities and b is the last known value of the system variables (the $u_{i,j}$ are the unknowns, the $j-1$ are the last known values). There is still some work to be done before we can use linear algebra to get the solution. We need to implement the boundary conditions.
Fixed Value Boundary Conditions
Start with the form at the interior of the grid:
$$
- \beta u_{i - 1,\, j} + (1 + 2 \beta) u_{i,\, j} - \beta u_{i + 1,\, j} = u_{i,\, j-1}
$$
To get the form correct at the top and bottom of this solution vector we need to imagine adding "ghost cells" to the boundaries of our domain at $i=0$ and $i=7$. Using the above expression, let $i = 1$:
$$
- \beta u_{0,\, j} + (1 + 2 \beta) u_{1,\, j} - \beta u_{2,\, j} = u_{1,\, j-1}
$$
If we have fixed value boundary conditions, we then know the value of $u_0$. This is the boundary condition of our simulation. We will call this value $U_{LHS}$, substitute $U_{LHS} = u_0$ and move the known quantities to the RHS of the equation:
$$
(1 + 2 \beta) u_{1,\, j} - \beta u_{2,\, j} = u_{1,\, j-1} + \beta U_{LHS}
$$
Fixed Flux Boundary Conditions
If we have fixed flux boundary conditions we can write the flux as a central difference on the cell $u_1$ that uses the "ghost" point at $u_0$:
$$
\frac{u_{2,\, j} - u_{0,\, j}}{2 \Delta x} = F
$$
Proceeding as before with $i=1$:
$$
- \beta u_{0,\, j} + (1 + 2 \beta) u_{1,\, j} - \beta u_{2,\, j} = u_{1,\, j-1}
$$
This time we know the relationship of $u_0$ to the other unknowns due to the specification of the defined flux boundary condition. Solving for $u_0$ we get:
$$
u_{0,\, j} = u_{2,\, j} - {2 \Delta x} F
$$
Substituting this into our expression that includes the ghost cell gives us:
$$
- \beta (u_{2,\, j} - {2 \Delta x} F) + (1 + 2 \beta) u_{1,\, j} - \beta u_{2,\, j} = u_{1,\, j-1}
$$
Simplifying:
$$
(1 + 2 \beta) u_{1,\, j} - 2 \beta u_{2,\, j} = u_{1,\, j-1} - \beta 2 \Delta x F
$$
So in this case we have to modify the matrix $M$ entries AND the solution vector $b$ recalling that the $j-1$ index is the known solution.
We have now recovered the form of the equation in the dot product $M \cdot x$ and the form of this equation is telling us that we need to modify the solution vector $b$ with information about the boundary conditions before we find the inverse of the matrix and compute the new solution vector.
Modifying the $b$ matrix with the known ghost cell values for the fixed value boundary conditions we get:
End of explanation
"""
sp.Eq(M*xmatrix,bmatrix)
"""
Explanation: So the full form of our system is therefore:
$$
\left[\begin{matrix}2 \beta + 1 & - \beta & 0 & 0 & 0 & 0\- \beta & 2 \beta + 1 & - \beta & 0 & 0 & 0\0 & - \beta & 2 \beta + 1 & - \beta & 0 & 0\0 & 0 & - \beta & 2 \beta + 1 & - \beta & 0\0 & 0 & 0 & - \beta & 2 \beta + 1 & - \beta\0 & 0 & 0 & 0 & - \beta & 2 \beta + 1\end{matrix}\right] \cdot \left[\begin{matrix}u_{1}\u_{2}\u_{3}\u_{4}\u_{5}\u_{6}\end{matrix}\right] = \left[\begin{matrix}U_{LHS} \beta + b_{1}\b_{2}\b_{3}\b_{4}\b_{5}\U_{RHS} \beta + b_{6}\end{matrix}\right]
$$
SymPy can evaluate the LHS for us.
End of explanation
"""
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
"""
Explanation: All that remains is to solve the above linear system. Instead of using SymPy, we will use some tools in a different Python library.
Top of Page
An Implicit Numerical Solution
General setup in this section:
End of explanation
"""
numberOfPoints = 100
lengthOfDomain = 1.0
dx = lengthOfDomain/numberOfPoints
xPoints = np.linspace(0.0, lengthOfDomain, numberOfPoints)
initialCondition = np.sin(xPoints*np.pi/lengthOfDomain)
"""
Explanation: Simulation parameters:
End of explanation
"""
def plotIC():
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.plot(xPoints, initialCondition, 'ro')
axes.set_xlabel('Distance $x$')
axes.set_ylabel('Concentration of Stuff $c(x,t)$')
axes.set_title('Initial Conditions');
plotIC()
"""
Explanation: A simple function to plot the initial condition:
End of explanation
"""
diffusionCoefficient = 10.0
dt = dx**2/(diffusionCoefficient)
numberOfIterations = 1000
"""
Explanation: It is worth noting that these schemes are unconditionally stable - so any choice of time step will produce a solution. The accuracy of the solution does depend on this choice, though.
End of explanation
"""
newConcentration = np.zeros((numberOfPoints), dtype='float32')
oldConcentration = np.zeros((numberOfPoints), dtype='float32')
"""
Explanation: We create two solution vectors rather than one whole array to hold all of our solution. This is not particular to the implicit method, but it demonstrates another technique for saving memory and speeding up the calculation. We will fill these matrices and swap them (move data from new into old and overwrite new) at each time step.
End of explanation
"""
['h','h','h']*3
"""
Explanation: First, some syntax:
End of explanation
"""
def tridiag(a, b, c, k1=-1, k2=0, k3=1):
# Here we use Numpy addition to make the job easier.
return np.diag(a, k1) + np.diag(b, k2) + np.diag(c, k3)
a = [-dt*diffusionCoefficient/dx/dx]*(numberOfPoints-1)
b = [2*dt*diffusionCoefficient/dx/dx+1]*(numberOfPoints)
c = [-dt*diffusionCoefficient/dx/dx]*(numberOfPoints-1)
A = tridiag(a, b, c)
A
"""
Explanation: The matrix has to be square. It should have the same dimensions as the nubmer of points in the system. The following code snippet was inspired by this post.
End of explanation
"""
np.copyto(oldConcentration,initialCondition)
"""
Explanation: We first need to prime the arrays by copying the initial condition into oldConcentration. Afterwards it will be enough to swap pointers (a variable that points to a memory location).
End of explanation
"""
uLHS = 0.0
uRHS = 0.0
numIterations = 200
for i in range(numIterations):
# enforce boundary conditions
oldConcentration[0] = oldConcentration[0] + uLHS*dt*diffusionCoefficient/dx/dx
oldConcentration[-1] = oldConcentration[-1] + uRHS*dt*diffusionCoefficient/dx/dx
# solve the system
np.copyto(newConcentration,np.linalg.solve(A,oldConcentration))
# swap pointers
oldConcentration, newConcentration = newConcentration, oldConcentration
# plot the results
fig2 = plt.figure()
axes = fig2.add_axes([0.1, 0.1, 0.8, 0.8])
axes.plot(xPoints, newConcentration, 'ro')
axes.set_ylim(0,1)
axes.set_xlabel('Distance $x$')
axes.set_ylabel('Concentration of Stuff $c(x,t)$')
axes.set_title('Solution');
"""
Explanation: Top of Page
Deconstruction of the Solution Scheme
In spite of the small chunk of code a few cells below, there is a lot going on. Let us dissect it. In bullet points:
Before the first solution step we enforce the boundary conditions. Our choice of matrix means that we are using "fixed value" boundary conditions. So we need to modify the b vector accordingly. The indexing notation of Numpy that permits us to find the first ([0]) and last cell ([-1]) of an array is very helpful here.
```python
oldConcentration[0] = oldConcentration[0] + uLHSdtdiffusionCoefficient/dx/dx
oldConcentration[-1] = oldConcentration[-1] + uRHSdtdiffusionCoefficient/dx/dx
```
Recall:
$$
\left[\begin{matrix}2 \beta + 1 & - \beta & 0 & 0 & 0 & 0\- \beta & 2 \beta + 1 & - \beta & 0 & 0 & 0\0 & - \beta & 2 \beta + 1 & - \beta & 0 & 0\0 & 0 & - \beta & 2 \beta + 1 & - \beta & 0\0 & 0 & 0 & - \beta & 2 \beta + 1 & - \beta\0 & 0 & 0 & 0 & - \beta & 2 \beta + 1\end{matrix}\right] \cdot \left[\begin{matrix}u_{1}\u_{2}\u_{3}\u_{4}\u_{5}\u_{6}\end{matrix}\right] = \left[\begin{matrix}U_{LHS} \beta + b_{1}\b_{2}\b_{3}\b_{4}\b_{5}\U_{RHS} \beta + b_{6}\end{matrix}\right]
$$
Solving the system involves using the built in NumPy functions to invert the matrix. What is returned is the solution vector. Please note that I'm using an internal Numpy (an optimized function!) function to COPY the results of the linear algebra solution into the newConcentration vector.
python
np.copyto(newConcentration,np.linalg.solve(A,oldConcentration))
Rather than storing ALL the data, we instead store just the current and the old concentrations. There are efficiencies in doing this, but if we want the older values, we need to store them on disk or in memory.
Tuple unpacking in Python leads to the A,B=B,A syntax below. This switches the references to the arrays. This is important for efficiency - you don't want to move any data if you don't have to. If you are running big calculations then moving that data around is a waste of time/resources. Better to just swap references.
python
oldConcentration, newConcentration = newConcentration, oldConcentration
Repeat the process and after a specified number of iterations, plot the results.
End of explanation
"""
|
khalido/algorithims | quicksort.ipynb | gpl-3.0 | import random
import numpy as np
random_data = [random.randint(0,100) for i in range(10)]
random_data[:10]
def quicksort(data):
if len(data) < 2:
return data
else:
pivot = data[0]
less = [i for i in data[1:] if i <= pivot]
more = [i for i in data[1:] if i > pivot]
return quicksort(less) + [pivot] + quicksort(more)
quicksort(random_data)
"""
Explanation: recursive quicksort
sort an array by choosing a point in the array, called the pivot point, then creating two smaller arrays:
Keep in mind an array of size one is already sorted, so no need to sort that.
choose a point, called the pivot point
make an array containing everything smaller or equal to the pivot
second array containing everything bigger than the pivot
so the answer is just quicksort(smaller_array) + pivot + quicksort(bigger_array)
first to generate some random data:
End of explanation
"""
def quicksort2(data):
import random
if len(data) < 2:
return data
else:
p_idx = random.randrange(0,len(data)-1)
pivot = data[p_idx]
less = [i for i in data[:p_idx] if i <= pivot] + [i for i in data[p_idx+1:] if i <= pivot]
more = [i for i in data[:p_idx] if i > pivot] + [i for i in data[p_idx+1:] if i > pivot]
return quicksort2(less) + [pivot] + quicksort2(more)
quicksort2(random_data)
"""
Explanation: using a random pivot
Quicksort works faster when using a random pivot
End of explanation
"""
assert len(random_data) == len(quicksort(random_data))
assert quicksort(random_data) == quicksort2(random_data) == sorted(random_data)
a = [i for i in range(10)]
random.shuffle(a)
assert [i for i in range(10)] == quicksort(a) == quicksort2(a)
%timeit(quicksort(random_data))
%timeit(quicksort2(random_data))
"""
Explanation: some tests to make sure the algos are working correctly
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
from IPython import display
def quicksort_onestep(data):
import random
if len(data) < 2:
return data
else:
p_idx = random.randrange(0,len(data)-1)
pivot = data[p_idx]
less = [i for i in data[:p_idx] if i <= pivot] + [i for i in data[p_idx+1:] if i <= pivot]
more = [i for i in data[:p_idx] if i > pivot] + [i for i in data[p_idx+1:] if i > pivot]
return less + [pivot] + more
quicksort_onestep(random_data)
"""
Explanation: visualizing quicksort
there are many better visuals on the web, heres my stab:
I modified the quicksort function above so it only does one sort step and returns the list:
End of explanation
"""
def compare_lists(a, b):
"returns True if two lists contain the same element at each index, false otherwise"
assert len(a) == len(b)
for pair in zip(a, b):
if pair[0] != pair[1]:
return False
return True
random_data = [random.randint(0,100) for i in range(100)]
sorted_data = quicksort2(random_data)
plt.plot(random_data, label="initial data", lw=1.5, ls="dashed")
qs_steps = []
# first quicksort step
d = quicksort_onestep(random_data)
qs_steps.append(d)
plt.plot(d, alpha=0.5, lw=0.8, label="first pass")
#rest of quicksort steps
q_pass = 1
while not (compare_lists(sorted_data, d)):
q_pass += 1
d = quicksort_onestep(d)
qs_steps.append(d)
if compare_lists(d, sorted_data):
plt.plot(sorted_data, c="r", ls="dashed", lw=2.5, label="sorted", alpha = 0.9)
else:
plt.plot(d, alpha=0.7, lw=0.8)
print(f"it took {len(qs_steps)} steps to sort {len(random_data)} items")
# make plot bigger
plt.legend();
"""
Explanation: Here I add the list after each sort step to an array qs_steps.
End of explanation
"""
# to display animations inline
%matplotlib nbagg
import matplotlib.animation as animation
from IPython import display
# the data
x = [i for i in range(len(qs_steps[0]))]
y = qs_steps
# the figure
fig, ax = plt.subplots()
fig.set_size_inches(8,6)
ax.set_title("Quick Sort steps")
ax.set_xlabel('X')
ax.set_ylabel('Y')
# this displays the data to be sorted as a scatter plot
original_line = ax.scatter(x,y[0], alpha = 0.2, label = "original data")
# the final sorted line.
sorted_line = ax.plot(x,y[-1], lw=2, alpha = 0.7, label="sorted")
# this displays the data being sorted in a scatter plot
scatterplot = ax.scatter(x,y[0], label="sorting")
def animate(i):
scatterplot.set_offsets(np.c_[x,y[i]])
ani = animation.FuncAnimation(fig, animate,
frames=len(y), interval=150, repeat=False)
print(f"it took {len(qs_steps)-1} steps to sort {len(qs_steps[0])} items")
plt.legend()
#ani.save("quicksort_animate.mp4")
plt.show();
display.HTML("<video controls autoplay src='quicksort_animate.mp4'></video>")
"""
Explanation: use animations to visualize quicksort
qs_steps is a array containing each step in the quicksort algorithim.
Using matplotlib.animation to animate this.
Github doesn't render videos for some reason, so see this notebook at nbviewer for the pretty animations.
End of explanation
"""
x = [i for i in range(len(qs_steps[0]))]
y = qs_steps
fig1, ax1 = plt.subplots()
# why the heck does line need a comma after it?
line, = ax1.plot(x,y[0], lw=3, alpha=0.8, label="sorting")
line2, = ax1.plot(x,y[0], lw=2, alpha = 0.1, label = "one step before")
line3 = ax1.plot(x,y[0], lw=0.8, alpha = 0.4, label = "original data")
line3 = ax1.plot(x,y[-1], lw=1, alpha = 0.6, label="sorted")
fig1.set_size_inches(8,6)
ax1.set_title("Quick Sort steps")
ax1.set_xlabel('X')
ax1.set_ylabel('Y')
def animate(i):
line.set_ydata(y[i]) # update the data
if i > 1:
line2.set_ydata(y[i-1])
ani2 = animation.FuncAnimation(fig1, animate,
frames=len(y), interval=120, repeat=False)
print(f"it took {len(qs_steps)-1} steps to sort {len(qs_steps[0])} items")
plt.legend()
#ani2.save("quicksort_animate1.mp4")
plt.show();
display.HTML("<video controls autoplay><source src='quicksort_animate1.mp4' type='video/mp4'></video>")
"""
Explanation: Another animation, this time using lines instead of a scatter plot.
End of explanation
"""
|
ceos-seo/data_cube_notebooks | notebooks/landslides/Landslide_Identification_SLIP.ipynb | apache-2.0 | import sys
import os
sys.path.append(os.environ.get('NOTEBOOK_ROOT'))
import numpy as np
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
from utils.data_cube_utilities.dc_display_map import display_map
from utils.data_cube_utilities.clean_mask import landsat_clean_mask_full
# landsat_qa_clean_mask, landsat_clean_mask_invalid
from utils.data_cube_utilities.dc_baseline import generate_baseline
from utils.data_cube_utilities.dc_displayutil import display_at_time
from utils.data_cube_utilities.dc_slip import create_slope_mask
from datacube.utils.aws import configure_s3_access
configure_s3_access(requester_pays=True)
import datacube
dc = datacube.Datacube()
"""
Explanation: Sudden Landslide Identification Product (SLIP)
What to expect from this notebook
Introduction to the SLIP algorithm
describing change detection in the context of datacube
Detailed band math equations for SLIP filtering
Illustrate the step by step evolution of a SLIP product
<a id='slip_top'></a>
SLIP
SLIP is used to automate the detection of Landslides. A SLIP product is the result of filtering based on per-pixel changes in both soil moisture and vegetation in areas with high elevation gradients. All of which (with the exception of elevation gradients) can be computed using simple bandmath equations.
Data
SLIP makes use of the following Landsat 7 Surface Reflectance Bands:
- RED,
- NIR,
- SWIR1
- PIXEL_QA
SLIP makes use of the following ASTER GDEM V2 bands:
- dem
Algorithmic Process
Algorithmically speaking, SLIP is a series of per-pixel filter operations acting on relationships between NEW(current) and BASELINE(historical) values of an area. The remaining pixels after filter operations will be what SLIP classifies as landslides. Itemized in the list below are operations taken to create a SLIP product:
Import and initialize datacube
Load Geographic area
Remove clouds and no-data values
Label this product NEW
Generate a rolling average composite of NEW
Label the rolling average composite BASELINE
Filter in favor of sufficiently large changes in vegetation (using NDWI values derived from NEW and BASELINE)
Filter in favor of sufficiently large increases in RED reflectance(using RED band values from NEW and BASELINE)
Generate a slope-mask(using ASTERDEM V2 data)
Filter in favor of areas that have a high enough slope(Landslides don't happen on flat surfaces)
Index
Import Dependencies and Connect to the Data Cube
Choose Platform and Product
Define the Extents of the Analysis
Load Data from the Data Cube
Change Detection
NDWI (Nomalized Difference Water Index)
RED Reflectance
ASTER Global Elevation Models
Reviewing the Evolution of the SLIP Product
Visual Comparison of SLIP Output and Baseline Composited Scene
<span id="slip_import">Import Dependencies and Connect to the Data Cube ▴</span>
End of explanation
"""
platform = 'LANDSAT_8'
product = 'ls8_usgs_sr_scene'
collection = 'c1'
level = 'l2'
"""
Explanation: <span id="slip_plat_prod">Choose Platform and Product ▴</span>
End of explanation
"""
# Freetown, Sierra Leone
# (https://www.reuters.com/article/us-leone-mudslide-africa/cities-across-africa-face-threat-of-landslides-like-sierra-leone-idUSKCN1AY115)
# define geographic boundaries in (min, max) format
lon = (-13.3196, -12.9366)
lat = (8.1121, 8.5194)
# define date range boundaries in (min,max) format
# There should be a landslide by Freetown during August 2017.
date_range =("2016-01-01", "2017-12-31")
display_map(lat, lon)
"""
Explanation: <span id="slip_define_extents">Define the Extents of the Analysis ▴</span>
End of explanation
"""
# Define desired bands. For SLIP, only red, nir, swir and pixel_qa will be necessary.
desired_bands = ['red','nir','swir1','pixel_qa']
# Add blue and green bands since they are needed for visualizing results (RGB).
desired_bands = desired_bands + ['green', 'blue']
# Load area.
landsat_ds = dc.load(product = product,\
platform = platform,\
lat = lat,\
lon = lon,\
time = date_range,\
measurements = desired_bands,
group_by='solar_day',
dask_chunks={'time':1, 'longitude': 1000, 'latitude': 1000}).persist()
# clean_mask = landsat_qa_clean_mask(landsat_ds, platform) & \
# (landsat_ds != -9999).to_array().all('variable') & \
# landsat_clean_mask_invalid(landsat_ds)
clean_mask = landsat_clean_mask_full(dc, landsat_ds, product=product, platform=platform,
collection=collection, level=level).persist()
# Determine the times with data.
data_time_mask = (clean_mask.sum(['latitude', 'longitude']) > 0).persist()
clean_mask = clean_mask.sel(time=data_time_mask)
landsat_ds = landsat_ds.sel(time=data_time_mask)
landsat_ds = landsat_ds.where(clean_mask).persist()
"""
Explanation: <span id="slip_load_data">Load Data from the Data Cube ▴</span>
End of explanation
"""
time_to_show = '2017-08-04'
acq_to_show = landsat_ds.sel(time=time_to_show, method='nearest')
rgb_da = acq_to_show[['red', 'green', 'blue']].squeeze().to_array().compute()
vmin = rgb_da.quantile(0.05).values
vmax = rgb_da.quantile(0.95).values
rgb_da.plot.imshow(vmin=vmin, vmax=vmax)
plt.show()
"""
Explanation: Visualization
This step is optional, but useful to those seeking a step by step validation of SLIP. The following code shows a true-color representation of our loaded scene.
End of explanation
"""
new = acq_to_show
"""
Explanation: <span id="slip_change_detect">Change Detection ▴</span>
In the context of SLIP, Change detection happens through the comparison of 'current' values against 'past' values.
<br>
Trivialized Example:
<br>
$$ \Delta Value = (Value_{new} - Value_{old})/ Value_{old} $$
<br>
It is easy to define NEW as the current value being analyzed.
<br>
End of explanation
"""
# Generate a moving average of n values leading up to current time.
baseline = generate_baseline(landsat_ds, composite_size = 3, mode = 'average')
"""
Explanation: <br>
However, OLD can have varying interpretations.
In SLIP, OLD values (referred to in code as BASELINE values) are simply rolling averages of not-nan values leading up to the date in question.
<br>
The following figure illustrates such a compositing method:
<br><br>
<!--  -->
<br>
In the figure above, t4 values are the average of t1-t3 (assuming a window size of 3)
<br>
The code below composites with a window size of 5.
End of explanation
"""
(len(new.time), len(baseline.time))
"""
Explanation: It is important to note that compositing will shorten the length of baseline's time domain by the window size since ranges less than the composite size are not computed. For a composite size of 5, new's first 5 time values will not have composite values.
End of explanation
"""
display_at_time([baseline, new], time = time_to_show, width = 2, w = 12)
"""
Explanation: What this composite looks like
End of explanation
"""
ndwi_new = (new.nir- new.swir1)/(new.nir + new.swir1)
ndwi_baseline = (baseline.nir - baseline.swir1)/ (baseline.nir + baseline.swir1)
ndwi_change = ndwi_new - ndwi_baseline
"""
Explanation: The baseline composite is featured in the figure above (left). It represents what was typical for the past five acquisitions 'leading-up-to' time_to_show. Displayed next to it (right) is the true-color visualization of the acquisition 'at' time_to_show. The new object contains unaltered LS7 scenes that are index-able using a date like time_to_show. The baseline object contains a block of composites of those landsat scenes that is index-able the same way.
<span id="slip_ndwi">NDWI (Nomalized Difference Water Index) ▴</span>
SLIP makes the major assumption that landslides will strip a hill/mountain-side of all of its vegetation.
SLIP uses NDWI, an index used to monitor water content of leaves, to track the existence of vegetation on a slope. At high enough levels, leaf water content change can no longer be attributed to something like seasonal fluctuations and will most likely indicate a change in the existence of vegetation.
NDWI BANDMATH
NDWI is computed on a per-pixel level and involves arithmetic between NIR (Near infrared) and SWIR1 (Short Wave Infrared) values.
NDWI is computed for both NEW and BASELINE imagery then compared to yield NDWI change. The equations bellow detail a very simple derivation of change in NDWI:
$$ NDWI_{NEW} = \frac{NIR_{NEW} - SWIR_{NEW}}{NIR_{NEW} + SWIR_{NEW}}$$
<br><br>
$$ NDWI_{BASELINE} = \frac{NIR_{BASELINE} - SWIR_{BASELINE}}{NIR_{BASELINE} + SWIR_{BASELINE}}$$
<br><br>
$$\Delta NDWI = NDWI_{NEW} - NDWI_{BASELINE}$$
<br>
The code is just as simple:
End of explanation
"""
new_ndwi_filtered = new.where(abs(ndwi_change) > 0.2)
"""
Explanation: Filtering NDWI
In the context of code, you can best think of filtering as a peicewise transformation that assigns a nan (or null) value to points that fall below our minimum change threshold. (For SLIP that threshold is 20%)
<br>
$$ ndwi_filter(Dataset) = \left{
\begin{array}{lr}
Dataset & : | \Delta NDWI(Dataset) | > 0.2\
np.nan & : | \Delta NDWI(Dataset) | \le 0.2
\end{array}
\right.\ $$
<br>
In code, it's even simpler:
End of explanation
"""
display_at_time([new, (new, new_ndwi_filtered),new_ndwi_filtered],
time = time_to_show,
width = 3, w =14)
"""
Explanation: How far NDWI filtering gets you
A SLIP product is the result of a process of elimination. NDWI is sufficient in eliminating a majority of non-contending areas early on in the process. Featured below is what is left of the original image after having filtered for changes in NDWI .
End of explanation
"""
red_change = (new.red - baseline.red)/(baseline.red)
"""
Explanation: Highlighted in the center picture are values that meet our NDWI change expectations. Featured in the right-most image is what remains of our original image after NDWI filtering.
<span id="slip_red">RED Reflectance ▴</span>
SLIP makes another important assumption about Landslides.
On top of stripping the Slope of vegetation, a landslide will reveal a large layer of previously vegetated soil. Since soil reflects more light in the RED spectral band than highly vegetated areas do, SLIP looks for increases in the RED bands. This captures both the loss of vegetation, and the unearthing of soil.
RED change bandmath
Red change is computed on a per-pixel level and involves arithmetic on the RED band values. The derivation of RED change is simple:
<br><br>
$$ \Delta Red = \frac{RED_{NEW} - RED_{BASELINE}}{RED_{BASELINE}} $$
The code is just as simple:
End of explanation
"""
new_red_and_ndwi_filtered = new_ndwi_filtered.where(red_change > 0.4)
"""
Explanation: Filtering for RED reflectance increase
Filtering RED reflectance change is just like the piecewise transformation used for filtering NDWI change.
<br>
$$ red_filter(Dataset) = \left{
\begin{array}{lr}
Dataset & : \Delta red(Dataset) > 0.4\
np.nan & : \Delta red(Dataset) \le 0.4
\end{array}
\right.\ $$
<br>
In Code:
End of explanation
"""
display_at_time([new, (new, new_red_and_ndwi_filtered),new_red_and_ndwi_filtered],
time = time_to_show,
width = 3, w = 14)
"""
Explanation: How much further RED reflectance filtering gets you
Continuing SLIP's process of elimination, Red increase filtering will further refine the area of interest to areas that, upon visual inspection appear to be light brown in color.
End of explanation
"""
aster = dc.load(product="terra_aster_gdm",\
lat=lat,\
lon=lon,\
measurements=['dem'],
group_by='solar_day')
"""
Explanation: <span id="slip_aster">ASTER Global Elevation Models ▴</span>
Aster GDEM models provide elevation data for each pixel expressed in meters. For SLIP height is not enough to determine that a landslide can happen on a pixel. SLIP focuses on areas with high elevation Gradients/Slope (Expressed in non-radian degrees).The driving motivation for using slope based filtering is that landslides are less likely to happen in flat regions.
Loading the elevation model
End of explanation
"""
# Create a slope-mask. False: if pixel <15 degees; True: if pixel > 15 degrees;
is_above_slope_threshold = create_slope_mask(aster, degree_threshold = 15,resolution = 30)
"""
Explanation: Calculating Angle of elevation
A gradient is generated for each pixel using the four pixels adjacent to it, as well as a rise/run formuala.
<br><br>
$$ Gradient = \frac{Rise}{Run} $$
<br><br>
Basic trigonometric identities can then be used to derive the angle:
<br><br>
$$ Angle of Elevation = \arctan(Gradient) $$
<br><br>
When deriving the angle of elevation for a pixel, two gradients are available. One formed by the bottom pixel and top pixel, the other formed by the right and left pixel. For the purposes of identifying landslide causing slopes, the greatest of the two slopes will be used.
The following image describes the process for angle-of-elevation calculation for a single pixel within a grid of DEM pixels
<br><br>
<br><br>
The vagaries of implementation have been abstracted away by dc_demutils. It's used to derive a slope-mask. A slope-mask in this sense, is an array of true and false values based on whether or not that pixel meets a minimum angle of elevation requirement. Its use is detailed below.
End of explanation
"""
slip_product = new_red_and_ndwi_filtered.where(is_above_slope_threshold)
"""
Explanation: Filtering out pixels that don't meet requirements for steepness
<br>
Filtering based on slope is a peicewise transformation using a derived slopemask:
<br>
$$ slope_filter(Dataset) = \left{
\begin{array}{lr}
Dataset & : is_above_degree_threshold(Dataset, 15^{\circ}) = True\
np.nan & : is_above_degree_threshold(Dataset, 15^{\circ}) = False\
\end{array}
\right.\ $$
<br>
Its use in code:
End of explanation
"""
display_at_time([new, (new, slip_product),slip_product],
time = time_to_show,
width = 3, w = 14)
"""
Explanation: Visualising our final SLIP product
The final results of SLIP are small regions of points with a high likelihood of landslides having occurred on them. Furthermore there is no possibility that detections are made in flat areas(areas with less than a $15^{\circ}$ angle of elevation.
End of explanation
"""
display_at_time([new, (new,new_ndwi_filtered),new_ndwi_filtered,new, (new, new_red_and_ndwi_filtered),new_red_and_ndwi_filtered, new, (new, slip_product),slip_product],
time = time_to_show,
width = 3, w = 14, h = 12)
"""
Explanation: <span id="slip_evo">Reviewing the Evolution of the SLIP Product ▴</span>
The following visualizations will detail the evolution of the SLIP product from the previous steps.
Order of operations:
- NDWI change Filtered
- RED increase Filtered
- Slope Filtered
Visualization
End of explanation
"""
display_at_time([baseline, (new,slip_product)],
time = time_to_show,
width = 2, mode = 'blend', color = [210,7,7] , w = 14)
"""
Explanation: <span id="slip_compare_output_baseline">Visual Comparison of SLIP Output and Baseline Composited Scene ▴</span>
In the name of validating results, it makes sense to compare the SLIP product generated for the selected date (time_to_show) to the composited scene representing what is considered to be "normal" for the last 5 acquisitions.
End of explanation
"""
|
ContinualAI/avalanche | notebooks/from-zero-to-hero-tutorial/05_evaluation.ipynb | mit | !pip install avalanche-lib==0.2.0
"""
Explanation: description: Automatic Evaluation with Pre-implemented Metrics
Evaluation
Welcome to the "Evaluation" tutorial of the "From Zero to Hero" series. In this part we will present the functionalities offered by the evaluation module.
End of explanation
"""
import torch
from avalanche.evaluation.metrics import Accuracy
task_labels = 0 # we will work with a single task
# create an instance of the standalone Accuracy metric
# initial accuracy is 0 for each task
acc_metric = Accuracy()
print("Initial Accuracy: ", acc_metric.result()) # output {}
# two consecutive metric updates
real_y = torch.tensor([1, 2]).long()
predicted_y = torch.tensor([1, 0]).float()
acc_metric.update(real_y, predicted_y, task_labels)
acc = acc_metric.result()
print("Average Accuracy: ", acc) # output 0.5 on task 0
predicted_y = torch.tensor([1,2]).float()
acc_metric.update(real_y, predicted_y, task_labels)
acc = acc_metric.result()
print("Average Accuracy: ", acc) # output 0.75 on task 0
# reset accuracy
acc_metric.reset()
print("After reset: ", acc_metric.result()) # output {}
"""
Explanation: 📈 The Evaluation Module
The evaluation module is quite straightforward: it offers all the basic functionalities to evaluate and keep track of a continual learning experiment.
This is mostly done through the Metrics: a set of classes which implement the main continual learning metrics computation like A_ccuracy_, F_orgetting_, M_emory Usage_, R_unning Times_, etc. At the moment, in Avalanche we offer a number of pre-implemented metrics you can use for your own experiments. We made sure to include all the major accuracy-based metrics but also the ones related to computation and memory.
Each metric comes with a standalone class and a set of plugin classes aimed at emitting metric values on specific moments during training and evaluation.
Standalone metric
As an example, the standalone Accuracy class can be used to monitor the average accuracy over a stream of <input,target> pairs. The class provides an update method to update the current average accuracy, a result method to print the current average accuracy and a reset method to set the current average accuracy to zero. The call to resultdoes not change the metric state.
The Accuracy metric requires the task_labels parameter, which specifies which task is associated with the current patterns. The metric returns a dictionary mapping task labels to accuracy values.
End of explanation
"""
from avalanche.evaluation.metrics import accuracy_metrics, \
loss_metrics, forgetting_metrics, bwt_metrics,\
confusion_matrix_metrics, cpu_usage_metrics, \
disk_usage_metrics, gpu_usage_metrics, MAC_metrics, \
ram_usage_metrics, timing_metrics
# you may pass the result to the EvaluationPlugin
metrics = accuracy_metrics(epoch=True, experience=True)
"""
Explanation: Plugin metric
If you want to integrate the available metrics automatically in the training and evaluation flow, you can use plugin metrics, like EpochAccuracy which logs the accuracy after each training epoch, or ExperienceAccuracy which logs the accuracy after each evaluation experience. Each of these metrics emits a curve composed by its values at different points in time (e.g. on different training epochs). In order to simplify the use of these metrics, we provided utility functions with which you can create different plugin metrics in one shot. The results of these functions can be passed as parameters directly to the EvaluationPlugin(see below).
{% hint style="info" %}
We recommend to use the helper functions when creating plugin metrics.
{% endhint %}
End of explanation
"""
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from avalanche.benchmarks.classic import SplitMNIST
from avalanche.evaluation.metrics import forgetting_metrics, \
accuracy_metrics, loss_metrics, timing_metrics, cpu_usage_metrics, \
confusion_matrix_metrics, disk_usage_metrics
from avalanche.models import SimpleMLP
from avalanche.logging import InteractiveLogger
from avalanche.training.plugins import EvaluationPlugin
from avalanche.training import Naive
benchmark = SplitMNIST(n_experiences=5)
# MODEL CREATION
model = SimpleMLP(num_classes=benchmark.n_classes)
# DEFINE THE EVALUATION PLUGIN
# The evaluation plugin manages the metrics computation.
# It takes as argument a list of metrics, collectes their results and returns
# them to the strategy it is attached to.
eval_plugin = EvaluationPlugin(
accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True),
loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
timing_metrics(epoch=True),
forgetting_metrics(experience=True, stream=True),
cpu_usage_metrics(experience=True),
confusion_matrix_metrics(num_classes=benchmark.n_classes, save_image=False, stream=True),
disk_usage_metrics(minibatch=True, epoch=True, experience=True, stream=True),
loggers=[InteractiveLogger()],
benchmark=benchmark,
strict_checks=False
)
# CREATE THE STRATEGY INSTANCE (NAIVE)
cl_strategy = Naive(
model, SGD(model.parameters(), lr=0.001, momentum=0.9),
CrossEntropyLoss(), train_mb_size=500, train_epochs=1, eval_mb_size=100,
evaluator=eval_plugin)
# TRAINING LOOP
print('Starting experiment...')
results = []
for experience in benchmark.train_stream:
# train returns a dictionary which contains all the metric values
res = cl_strategy.train(experience)
print('Training completed')
print('Computing accuracy on the whole test set')
# test also returns a dictionary which contains all the metric values
results.append(cl_strategy.eval(benchmark.test_stream))
"""
Explanation: 📐Evaluation Plugin
The Evaluation Plugin is the object in charge of configuring and controlling the evaluation procedure. This object can be passed to a Strategy as a "special" plugin through the evaluator attribute.
The Evaluation Plugin accepts as inputs the plugin metrics you want to track. In addition, you can add one or more loggers to print the metrics in different ways (on file, on standard output, on Tensorboard...).
It is also recommended to pass to the Evaluation Plugin the benchmark instance used in the experiment. This allows the plugin to check for consistency during metrics computation. For example, the Evaluation Plugin checks that the strategy.eval calls are performed on the same stream or sub-stream. Otherwise, same metric could refer to different portions of the stream.
These checks can be configured to raise errors (stopping computation) or only warnings.
End of explanation
"""
from avalanche.evaluation import Metric
# a standalone metric implementation
class MyStandaloneMetric(Metric[float]):
"""
This metric will return a `float` value
"""
def __init__(self):
"""
Initialize your metric here
"""
super().__init__()
pass
def update(self):
"""
Update metric value here
"""
pass
def result(self) -> float:
"""
Emit the metric result here
"""
return 0
def reset(self):
"""
Reset your metric here
"""
pass
"""
Explanation: Implement your own metric
To implement a standalone metric, you have to subclass Metric class.
End of explanation
"""
from avalanche.evaluation import PluginMetric
from avalanche.evaluation.metrics import Accuracy
from avalanche.evaluation.metric_results import MetricValue
from avalanche.evaluation.metric_utils import get_metric_name
class MyPluginMetric(PluginMetric[float]):
"""
This metric will return a `float` value after
each training epoch
"""
def __init__(self):
"""
Initialize the metric
"""
super().__init__()
self._accuracy_metric = Accuracy()
def reset(self) -> None:
"""
Reset the metric
"""
self._accuracy_metric.reset()
def result(self) -> float:
"""
Emit the result
"""
return self._accuracy_metric.result()
def after_training_iteration(self, strategy: 'PluggableStrategy') -> None:
"""
Update the accuracy metric with the current
predictions and targets
"""
# task labels defined for each experience
task_labels = strategy.experience.task_labels
if len(task_labels) > 1:
# task labels defined for each pattern
task_labels = strategy.mb_task_id
else:
task_labels = task_labels[0]
self._accuracy_metric.update(strategy.mb_output, strategy.mb_y,
task_labels)
def before_training_epoch(self, strategy: 'PluggableStrategy') -> None:
"""
Reset the accuracy before the epoch begins
"""
self.reset()
def after_training_epoch(self, strategy: 'PluggableStrategy'):
"""
Emit the result
"""
return self._package_result(strategy)
def _package_result(self, strategy):
"""Taken from `GenericPluginMetric`, check that class out!"""
metric_value = self.accuracy_metric.result()
add_exp = False
plot_x_position = strategy.clock.train_iterations
if isinstance(metric_value, dict):
metrics = []
for k, v in metric_value.items():
metric_name = get_metric_name(
self, strategy, add_experience=add_exp, add_task=k)
metrics.append(MetricValue(self, metric_name, v,
plot_x_position))
return metrics
else:
metric_name = get_metric_name(self, strategy,
add_experience=add_exp,
add_task=True)
return [MetricValue(self, metric_name, metric_value,
plot_x_position)]
def __str__(self):
"""
Here you can specify the name of your metric
"""
return "Top1_Acc_Epoch"
"""
Explanation: To implement a plugin metric you have to subclass PluginMetric class
End of explanation
"""
eval_plugin2 = EvaluationPlugin(
accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True),
loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
forgetting_metrics(experience=True, stream=True),
timing_metrics(epoch=True),
cpu_usage_metrics(experience=True),
confusion_matrix_metrics(num_classes=benchmark.n_classes, save_image=False, stream=True),
disk_usage_metrics(minibatch=True, epoch=True, experience=True, stream=True),
collect_all=True, # this is default value anyway
loggers=[InteractiveLogger()],
benchmark=benchmark
)
# since no training and evaluation has been performed, this will return an empty dict.
metric_dict = eval_plugin2.get_all_metrics()
print(metric_dict)
d = eval_plugin.get_all_metrics()
d['Top1_Acc_Epoch/train_phase/train_stream/Task000']
"""
Explanation: Accessing metric values
If you want to access all the metrics computed during training and evaluation, you have to make sure that collect_all=True is set when creating the EvaluationPlugin (default option is True). This option maintains an updated version of all metric results in the plugin, which can be retrieved by calling evaluation_plugin.get_all_metrics(). You can call this methods whenever you need the metrics.
The result is a dictionary with full metric names as keys and a tuple of two lists as values. The first list stores all the x values recorded for that metric. Each x value represents the time step at which the corresponding metric value has been computed. The second list stores metric values associated to the corresponding x value.
End of explanation
"""
print(res)
print(results[-1])
"""
Explanation: Alternatively, the train and eval method of every strategy returns a dictionary storing, for each metric, the last value recorded for that metric. You can use these dictionaries to incrementally accumulate metrics.
End of explanation
"""
|
mtasende/Machine-Learning-Nanodegree-Capstone | notebooks/prod/.ipynb_checkpoints/n10_dyna_q_with_predictor_full_training-checkpoint.ipynb | mit | # Basic imports
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import sys
from time import time
from sklearn.metrics import r2_score, median_absolute_error
from multiprocessing import Pool
import pickle
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20.0, 10.0)
%load_ext autoreload
%autoreload 2
sys.path.append('../../')
import recommender.simulator as sim
from utils.analysis import value_eval
from recommender.agent_predictor import AgentPredictor
from functools import partial
from sklearn.externals import joblib
NUM_THREADS = 1
LOOKBACK = 252*3
STARTING_DAYS_AHEAD = 252
POSSIBLE_FRACTIONS = [0.0, 1.0]
DYNA = 20
BASE_DAYS = 112
# Get the data
SYMBOL = 'SPY'
total_data_train_df = pd.read_pickle('../../data/data_train_val_df.pkl').stack(level='feature')
data_train_df = total_data_train_df[SYMBOL].unstack()
total_data_test_df = pd.read_pickle('../../data/data_test_df.pkl').stack(level='feature')
data_test_df = total_data_test_df[SYMBOL].unstack()
if LOOKBACK == -1:
total_data_in_df = total_data_train_df
data_in_df = data_train_df
else:
data_in_df = data_train_df.iloc[-LOOKBACK:]
total_data_in_df = total_data_train_df.loc[data_in_df.index[0]:]
# Create many agents
index = np.arange(NUM_THREADS).tolist()
env, num_states, num_actions = sim.initialize_env(total_data_in_df,
SYMBOL,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS)
estimator_close = joblib.load('../../data/best_predictor.pkl')
estimator_volume = joblib.load('../../data/best_volume_predictor.pkl')
agents = [AgentPredictor(num_states=num_states,
num_actions=num_actions,
random_actions_rate=0.98,
random_actions_decrease=0.999,
dyna_iterations=DYNA,
name='Agent_{}'.format(i),
estimator_close=estimator_close,
estimator_volume=estimator_volume,
env=env,
prediction_window=BASE_DAYS) for i in index]
def show_results(results_list, data_in_df, graph=False):
for values in results_list:
total_value = values.sum(axis=1)
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(total_value))))
print('-'*100)
initial_date = total_value.index[0]
compare_results = data_in_df.loc[initial_date:, 'Close'].copy()
compare_results.name = SYMBOL
compare_results_df = pd.DataFrame(compare_results)
compare_results_df['portfolio'] = total_value
std_comp_df = compare_results_df / compare_results_df.iloc[0]
if graph:
plt.figure()
std_comp_df.plot()
"""
Explanation: In this notebook a Q learner with dyna and a custom predictor will be trained and evaluated. The Q learner recommends when to buy or sell shares of one particular stock, and in which quantity (in fact it determines the desired fraction of shares in the total portfolio value).
End of explanation
"""
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_in_df['Close'].iloc[STARTING_DAYS_AHEAD:]))))
# Simulate (with new envs, each time)
n_epochs = 4
for i in range(n_epochs):
tic = time()
env.reset(STARTING_DAYS_AHEAD)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL,
agents[0],
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_in_df)
env.reset(STARTING_DAYS_AHEAD)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL, agents[0],
learn=False,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
other_env=env)
show_results([results_list], data_in_df, graph=True)
import pickle
with open('../../data/dyna_q_with_predictor.pkl', 'wb') as best_agent:
pickle.dump(agents[0], best_agent)
"""
Explanation: Let's show the symbols data, to see how good the recommender has to be.
End of explanation
"""
TEST_DAYS_AHEAD = 112
env.set_test_data(total_data_test_df, TEST_DAYS_AHEAD)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=False,
starting_days_ahead=TEST_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True)
"""
Explanation: Let's run the trained agent, with the test set
First a non-learning test: this scenario would be worse than what is possible (in fact, the q-learner can learn from past samples in the test set without compromising the causality).
End of explanation
"""
TEST_DAYS_AHEAD = 112
env.set_test_data(total_data_test_df, TEST_DAYS_AHEAD)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=True,
starting_days_ahead=TEST_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True)
"""
Explanation: And now a "realistic" test, in which the learner continues to learn from past samples in the test set (it even makes some random moves, though very few).
End of explanation
"""
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_test_df['Close'].iloc[STARTING_DAYS_AHEAD:]))))
"""
Explanation: What are the metrics for "holding the position"?
End of explanation
"""
|
nick-youngblut/SIPSim | ipynb/bac_genome/fullCyc/trimDataset/dataset_info.ipynb | mit | %load_ext rpy2.ipython
%%R
workDir = '/home/nick/notebook/SIPSim/dev/fullCyc/'
physeqDir = '/home/nick/notebook/SIPSim/dev/fullCyc_trim/'
physeqBulkCore = 'bulk-core_trm'
physeqSIP = 'SIP-core_unk_trm'
ampFragFile = '/home/nick/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags_kde.pkl'
"""
Explanation: General info on the fullCyc dataset (as it pertains to SIPSim validation)
Simulating 12C gradients
Determining if simulated taxon abundance distributions resemble the true distributions
Simulation parameters to infer from dataset:
Infer total richness of bulk soil community
richness of starting community
Infer abundance distribution of bulk soil community
NO: distribution fit
INSTEAD: using relative abundances of bulk soil community
Get distribution of total OTU abundances per fraction
Number of sequences per sample
User variables
End of explanation
"""
import os
%%R
library(ggplot2)
library(dplyr)
library(tidyr)
library(phyloseq)
library(fitdistrplus)
library(sads)
%%R
dir.create(workDir, showWarnings=FALSE)
"""
Explanation: Init
End of explanation
"""
%%R
# bulk core samples
F = file.path(physeqDir, physeqBulkCore)
physeq.bulk = readRDS(F)
#physeq.bulk.m = physeq.bulk %>% sample_data
physeq.bulk %>% names
%%R
# SIP core samples
F = file.path(physeqDir, physeqSIP)
physeq.SIP = readRDS(F)
#physeq.SIP.m = physeq.SIP %>% sample_data
physeq.SIP %>% names
"""
Explanation: Loading phyloseq list datasets
End of explanation
"""
%%R
physeq2otu.long = function(physeq){
df.OTU = physeq %>%
transform_sample_counts(function(x) x/sum(x)) %>%
otu_table %>%
as.matrix %>%
as.data.frame
df.OTU$OTU = rownames(df.OTU)
df.OTU = df.OTU %>%
gather('sample', 'abundance', 1:(ncol(df.OTU)-1))
return(df.OTU)
}
df.OTU.l = lapply(physeq.bulk, physeq2otu.long)
df.OTU.l %>% names
#df.OTU = do.call(rbind, lapply(physeq.bulk, physeq2otu.long))
#df.OTU$Day = gsub('.+\\.D([0-9]+)\\.R.+', '\\1', df.OTU$sample)
#df.OTU %>% head(n=3)
%%R -w 450 -h 400
lapply(df.OTU.l, function(x) descdist(x$abundance, boot=1000))
%%R
fitdists = function(x){
fit.l = list()
#fit.l[['norm']] = fitdist(x$abundance, 'norm')
fit.l[['exp']] = fitdist(x$abundance, 'exp')
fit.l[['logn']] = fitdist(x$abundance, 'lnorm')
fit.l[['gamma']] = fitdist(x$abundance, 'gamma')
fit.l[['beta']] = fitdist(x$abundance, 'beta')
# plotting
plot.legend = c('exponential', 'lognormal', 'gamma', 'beta')
par(mfrow = c(2,1))
denscomp(fit.l, legendtext=plot.legend)
qqcomp(fit.l, legendtext=plot.legend)
# fit summary
gofstat(fit.l, fitnames=plot.legend) %>% print
return(fit.l)
}
fits.l = lapply(df.OTU.l, fitdists)
fits.l %>% names
%%R
# getting summaries for lognormal fits
get.summary = function(x, id='logn'){
summary(x[[id]])
}
fits.s = lapply(fits.l, get.summary)
fits.s %>% names
%%R
# listing estimates for fits
df.fits = do.call(rbind, lapply(fits.s, function(x) x$estimate)) %>% as.data.frame
df.fits$Sample = rownames(df.fits)
df.fits$Day = gsub('.+D([0-9]+)\\.R.+', '\\1', df.fits$Sample) %>% as.numeric
df.fits
%%R -w 650 -h 300
ggplot(df.fits, aes(Day, meanlog,
ymin=meanlog-sdlog,
ymax=meanlog+sdlog)) +
geom_pointrange() +
geom_line() +
theme_bw() +
theme(
text = element_text(size=16)
)
%%R
# mean of estimaates
apply(df.fits, 2, mean)
"""
Explanation: Infer abundance distribution of each bulk soil community
distribution fit
End of explanation
"""
%%R -w 800
df.OTU = do.call(rbind, df.OTU.l) %>%
mutate(abundance = abundance * 100) %>%
group_by(sample) %>%
mutate(rank = row_number(desc(abundance))) %>%
ungroup() %>%
filter(rank < 10)
ggplot(df.OTU, aes(rank, abundance, color=sample, group=sample)) +
geom_point() +
geom_line() +
labs(y = '% rel abund')
"""
Explanation: Relative abundance of most abundant taxa
End of explanation
"""
%%R -w 800 -h 300
df.OTU = do.call(rbind, df.OTU.l) %>%
mutate(abundance = abundance * 100) %>%
group_by(sample) %>%
mutate(rank = row_number(desc(abundance))) %>%
group_by(rank) %>%
summarize(mean_abundance = mean(abundance)) %>%
ungroup() %>%
mutate(library = 1,
mean_abundance = mean_abundance / sum(mean_abundance) * 100) %>%
rename('rel_abund_perc' = mean_abundance) %>%
dplyr::select(library, rel_abund_perc, rank) %>%
as.data.frame
df.OTU %>% nrow %>% print
ggplot(df.OTU, aes(rank, rel_abund_perc)) +
geom_point() +
geom_line() +
labs(y = 'mean % rel abund')
"""
Explanation: Making a community file for the simulations
End of explanation
"""
ret = !SIPSim KDE_info -t /home/nick/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags_kde.pkl
ret = ret[1:]
ret[:5]
%%R
F = '/home/nick/notebook/SIPSim/dev/fullCyc_trim//ampFrags_kde_amplified.txt'
ret = read.delim(F, sep='\t')
ret = ret$genomeID
ret %>% length %>% print
ret %>% head
%%R
ret %>% length %>% print
df.OTU %>% nrow
%%R -i ret
# randomize
ret = ret %>% sample %>% sample %>% sample
# adding to table
df.OTU$taxon_name = ret[1:nrow(df.OTU)]
df.OTU = df.OTU %>%
dplyr::select(library, taxon_name, rel_abund_perc, rank)
df.OTU %>% head
%%R
#-- debug -- #
df.gc = read.delim('~/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags_parsed_kde_info.txt',
sep='\t', row.names=)
top.taxa = df.gc %>%
filter(KDE_ID == 1, median > 1.709, median < 1.711) %>%
dplyr::select(taxon_ID) %>%
mutate(taxon_ID = taxon_ID %>% sample) %>%
head
top.taxa = top.taxa$taxon_ID %>% as.vector
top.taxa
%%R
#-- debug -- #
p1 = df.OTU %>%
filter(taxon_name %in% top.taxa)
p2 = df.OTU %>%
head(n=length(top.taxa))
p3 = anti_join(df.OTU, rbind(p1, p2), c('taxon_name' = 'taxon_name'))
df.OTU %>% nrow %>% print
p1 %>% nrow %>% print
p2 %>% nrow %>% print
p3 %>% nrow %>% print
p1 = p2$taxon_name
p2$taxon_name = top.taxa
df.OTU = rbind(p2, p1, p3)
df.OTU %>% nrow %>% print
df.OTU %>% head
"""
Explanation: Adding reference genome taxon names
End of explanation
"""
%%R
F = file.path(workDir, 'fullCyc_12C-Con_trm_comm.txt')
write.table(df.OTU, F, sep='\t', quote=FALSE, row.names=FALSE)
cat('File written:', F, '\n')
"""
Explanation: Writing file
End of explanation
"""
!tail -n +2 /home/nick/notebook/SIPSim/dev/fullCyc/fullCyc_12C-Con_trm_comm.txt | \
cut -f 2 > /home/nick/notebook/SIPSim/dev/fullCyc/fullCyc_12C-Con_trm_comm_taxa.txt
outFile = os.path.splitext(ampFragFile)[0] + '_parsed.pkl'
!SIPSim KDE_parse \
$ampFragFile \
/home/nick/notebook/SIPSim/dev/fullCyc/fullCyc_12C-Con_trm_comm_taxa.txt \
> $outFile
print 'File written {}'.format(outFile)
!SIPSim KDE_info -n $outFile
"""
Explanation: parsing amp-Frag file to match comm file
End of explanation
"""
|
amlanlimaye/yelp-dataset-challenge | notebooks/reports/3.1-technical-report.ipynb | mit | ### Link to requirements.txt on github
"""
Explanation: Discovering Abstract Topics in Yelp Reviews - Technical Report
1. Background
Yelp is an American multinational corporation headquartered in San Francisco, California. It develops, hosts and markets Yelp.com and the Yelp mobile app, which publish crowd-sourced reviews about local businesses, as well as the online reservation service Yelp Reservations and online food-delivery service Eat24. The company also trains small businesses in how to respond to reviews, hosts social events for reviewers, and provides data about businesses, including health inspection scores.
Yelp.com is a crowd-sourced local business review and social networking site. Its user community is primarily active in major metropolitan areas.The site has pages devoted to individual locations, such as restaurants or schools, where Yelp users can submit a review on their products or services using a one to five star rating system.Businesses can also update contact information, hours and other basic listing information or add special deals. In addition to writing reviews, users can react to reviews, plan events or discuss their personal lives. According to Sterling Market Intelligence, Yelp is "one of the most important sites on the Internet." As of Q2 2016 it has 168 million monthly unique visitors and 108 million reviews.
78 percent of businesses listed on the site have a rating of three stars or better, but some negative reviews are very personal or extreme. Many reviews are written in an entertaining or creative manner. Users can give a review a "thumbs-up" if it is "useful, funny or cool." Each day a "Review of the Day" is determined based on a vote by users.
2. Problem Statement
The objective of this project is to unearth the "topics" being talked about in Yelp Reviews, understand their distribution and develop an understanding of Yelp Reviews that will serve as a foundation to tackle more sophisticated questions in the future, such as:
Cultural Trends: What makes a particular city different? What cuisines do Yelpers rave about in different countries? Do Americans tend to eat out late compared to those in Germany or the U.K.? In which countries are Yelpers sticklers for service quality? In international cities such as Montreal, are French speakers reviewing places differently than English speakers?
Inferring Categories: Are there any non-intuitive correlations between business categories e.g., how many karaoke bars also offer Korean food, and vice versa? What businesses deserve their own subcategory (i.e., Szechuan or Hunan versus just "Chinese restaurants")
Detecting Sarcasm in Reviews: Are Yelpers a sarcastic bunch?
Detecting Changepoints and Events: Detecting when things change suddenly (e.g., a business coming under new management or when a city starts going nuts over cronuts)
3. Data Collection and Cleaning
End of explanation
"""
business.head(2)
"""
Explanation: 3.1 Data Dictionary
The data for the project was obtained from https://www.yelp.com/dataset_challenge/dataset.
400K reviews and 100K tips by 120K users for 106K businesses
Cities(US): Pittsburgh, Charlotte, Urbana-Champaign, Phoenix, Las Vegas, Madison, Cleveland
<br><br>
Businesses table:
<br><br>
"business_id":"encrypted business id"
"name":"business name"
"neighborhood":"hood name"
"address":"full address"
"city":"city"
"state":"state -- if applicable --"
"postal code":"postal code"
"latitude":latitude
"longitude":longitude
"stars":star rating, rounded to half-stars
"review_count":number of reviews
"is_open":0/1 (closed/open)
"attributes":["an array of strings: each array element is an attribute"]
"categories":["an array of strings of business categories"]
"hours":["an array of strings of business hours"]
"type": "business"
<br><br>
Reviews table:
<br><br>
"review_id":"encrypted review id"
"user_id":"encrypted user id"
"business_id":"encrypted business id"
"stars":star rating, rounded to half-stars
"date":"date formatted like 2009-12-19"
"text":"review text"
"useful":number of useful votes received
"funny":number of funny votes received
"cool": number of cool review votes received
"type": "review"
<br><br>
Users table:
<br><br>
"user_id":"encrypted user id"
"name":"first name"
"review_count":number of reviews
"yelping_since": date formatted like "2009-12-19"
"friends":["an array of encrypted ids of friends"]
"useful":"number of useful votes sent by the user"
"funny":"number of funny votes sent by the user"
"cool":"number of cool votes sent by the user"
"fans":"number of fans the user has"
"elite":["an array of years the user was elite"]
"average_stars":floating point average like 4.31
"compliment_hot":number of hot compliments received by the user
"compliment_more":number of more compliments received by the user
"compliment_profile": number of profile compliments received by the user
"compliment_cute": number of cute compliments received by the user
"compliment_list": number of list compliments received by the user
"compliment_note": number of note compliments received by the user
"compliment_plain": number of plain compliments received by the user
"compliment_cool": number of cool compliments received by the user
"compliment_funny": number of funny compliments received by the user
"compliment_writer": number of writer compliments received by the user
"compliment_photos": number of photo compliments received by the user
"type":"user"
<br><br>
Checkins table:
<br><br>
"time":["an array of check ins with the format day-hour:number of check ins from hour to hour+1"]
"business_id":"encrypted business id"
"type":"checkin"
<br><br>
Tips table:
<br><br>
"text":"text of the tip"
"date":"date formatted like 2009-12-19"
"likes":compliment count
"business_id":"encrypted business id"
"user_id":"encrypted user id"
"type":"tip"
3.2 Data Cleaning
3.2.1 Converted raw json files obtained from https://www.yelp.com/dataset_challenge/dataset into csv files.
<pre>
```python
def convert_json_to_csv(json_file_dir, csv_file_dir):
for filename in os.listdir(json_file_dir):
if filename.endswith('.json'):
try:
pd.read_json(os.path.join(json_file_dir, filename), lines=True).to_csv(os.path.join(csv_file_path, filename.replace('.json', '.csv')), encoding='utf-8', index=False)
except:
print filename + 'error\n'
convert_json_to_csv('/home/amlanlimaye/yelp-dataset-challenge/data/raw/',
'/home/amlanlimaye/yelp-dataset-challenge/data/interim/original_csv/')
```
<pre>
#### 3.2.2 Reading all data tables
<pre>
```python
table_names = ['business', 'review', 'user', 'checkin', 'tip']
original_csv_filepath = '/home/amlanlimaye/yelp-dataset-challenge/data/interim/original_csv/'
for tbl_name in table_names:
globals()[tbl_name] = pd.read_csv(original_csv_filepath + "{}".format(tbl_name) + '.csv')
```
<pre>
#### 3.2.3 Cleaning 'business' table
<pre>
```python
# Sample row in the attributes column:
# u"[BikeParking: True, BusinessAcceptsBitcoin: False, BusinessAcceptsCreditCards: True, BusinessParking: {'garage': False, 'street': False, 'validated': False, 'lot': True, 'valet': False}, DogsAllowed: False, RestaurantsPriceRange2: 2, WheelchairAccessible: True]"
# Function to clean the attributes column and use regex to make python understand that business['attributes'] is a json-type dict:
def clean_business(test_str):
test_str = test_str.fillna('[]')
test_str = test_str.map(lambda x: x.replace('[','{'))
test_str = test_str.map(lambda x: x.replace(']','}'))
test_str = test_str.map(lambda x: x.replace('True', 'true'))
test_str = test_str.map(lambda x: x.replace('False', 'false'))
test_str = test_str.map(lambda x: x.replace('\'', '"'))
matches = re.findall("([A-Za-z0-9]+)(?=:)", test_str)
if len(matches):
for match in matches:
test_str = test_str.replace(match, '"%s"' % match)
return test_str
business['attributes'] = business['attributes'].map(regex_match)
# Function to extract attributes from json object and convert them into columns
def expand_features(row):
try:
extracted = json.loads(row['attributes'])
for key, value in extracted.items():
print key, type(value)
if type(value) != dict:
row["attribute_" + key] = value
else:
for attr_key, attr_value in value.items():
row["attribute_" + key + "_" + attr_key] = attr_value
except:
print "could not decode:", row['attributes']
return row
business.apply(expand_features, axis=1).columns
# Cleaning 'categories' column
business['categories'] = business['categories'].fillna(' ')
business['categories'] = business['categories'].map(lambda x: x[1:-1].split(','))
# Cleaning 'hours' column
business['hours'] = business['hours'].fillna(' ')
business['hours'] = business['hours'].map(lambda x: x[1:-1].split(','))
# Cleaning 'neighborhoods' column
business['neighborhood'] = business['neighborhood'].fillna(' ')
business['neighborhood'] = business['neighborhood'].map(lambda x: x[1:-1].split(','))
# Cleaning 'postal_code' column
business['postal_code'] = business['postal_code'].map(lambda x: x[:-2])
```
<pre>
End of explanation
"""
review.head(2)
review.text.head(2)
review_all = pd.read_csv('../../data/interim/original_csv/review.csv')
# Number of reviews by date
# The sharp seasonal falls are Chrismas Day and New Year's Day
# The sharp seasonal spikes are in summer, where people presumably have more free time
review.groupby('date').agg({'review_id': len}).reset_index().plot(x='date', y='review_id', figsize=(10,6))
"""
Explanation: 3.2.4 Cleaning 'review' table
<pre>
```python
# Cleaning the 'date' column
review['date'] = pd.to_datetime(review['date'])
# Cleaning the 'useful' column
review['useful'] = review['useful'].fillna(0)
review['useful'] = review['useful'].map(int)
```
<pre>
End of explanation
"""
checkin.head(2)
"""
Explanation: 3.2.5 Cleaning 'checkin' table
<pre>
```python
# Cleaning 'time' column
checkin['time'] = checkin['time'].map(lambda x: x[1:-1].split(','))
# Making columns aggregating checkins by day of week
checkin['mon_list'] = checkins['time'].map(lambda x: [list_item for list_item in x[1:-1].split(',') if 'Mon' in list_item])
checkin['tue_list'] = checkins['time'].map(lambda x: [list_item for list_item in x[1:-1].split(',') if 'Tue' in list_item])
checkin['wed_list'] = checkins['time'].map(lambda x: [list_item for list_item in x[1:-1].split(',') if 'Wed' in list_item])
checkin['thu_list'] = checkins['time'].map(lambda x: [list_item for list_item in x[1:-1].split(',') if 'Thu' in list_item])
checkin['fri_list'] = checkins['time'].map(lambda x: [list_item for list_item in x[1:-1].split(',') if 'Fri' in list_item])
checkin['sat_list'] = checkins['time'].map(lambda x: [list_item for list_item in x[1:-1].split(',') if 'Sat' in list_item])
checkin['sun_list'] = checkins['time'].map(lambda x: [list_item for list_item in x[1:-1].split(',') if 'Sun' in list_item])
# Converting day of week lists to dictionaries so that # of checkins can be looked up by hour
checkin['mon_list'] = checkin['mon_list'].map(lambda x:
{int(list_item.replace(' ', '').replace('Mon-', '').split(':')[0]):int(list_item.replace(' ', '').replace('Mon-', '').split(':')[1])
for list_item in x})
checkin['tue_list'] = checkin['tue_list'].map(lambda x:
{int(list_item.replace(' ', '').replace('Tue-', '').split(':')[0]):int(list_item.replace(' ', '').replace('Tue-', '').split(':')[1])
for list_item in x})
checkin['wed_list'] = checkin['wed_list'].map(lambda x:
{int(list_item.replace(' ', '').replace('Wed-', '').split(':')[0]):int(list_item.replace(' ', '').replace('Wed-', '').split(':')[1])
for list_item in x})
checkin['thu_list'] = checkin['thu_list'].map(lambda x:
{int(list_item.replace(' ', '').replace('Thu-', '').split(':')[0]):int(list_item.replace(' ', '').replace('Thu-', '').split(':')[1])
for list_item in x})
checkin['fri_list'] = checkin['fri_list'].map(lambda x:
{int(list_item.replace(' ', '').replace('Fri-', '').split(':')[0]):int(list_item.replace(' ', '').replace('Fri-', '').split(':')[1])
for list_item in x})
checkin['sat_list'] = checkin['sat_list'].map(lambda x:
{int(list_item.replace(' ', '').replace('Sat-', '').split(':')[0]):int(list_item.replace(' ', '').replace('Sat-', '').split(':')[1])
for list_item in x})
checkin['sun_list'] = checkin['sun_list'].map(lambda x:
{int(list_item.replace(' ', '').replace('Sun-', '').split(':')[0]):int(list_item.replace(' ', '').replace('Sun-', '').split(':')[1])
for list_item in x})
```
<pre>
End of explanation
"""
user.head(2)
"""
Explanation: 3.2.6 Cleaning 'user' table
<pre>
```
# Cleaning 'elite' column
user['elite'] = user['elite'].map(lambda x: x[1:-1].split(','))
# Cleaning 'friends' column
user['friends'] = user['friends'].map(lambda x: x[1:-1].split(','))
# Cleaning 'yelping since' column
user['yelping_since'] = pd.to_datetime(user['yelping_since'])
```
<pre>
End of explanation
"""
tip.head(2)
tip.text.head(2)
"""
Explanation: 3.2.7 Cleaning 'tip' table
<pre>
```python
# Cleaning 'date' column
tip['date'] = pd.to_datetime(tip['date'])
```
<pre>
End of explanation
"""
import pandas as pd
import numpy as np
import seaborn as sns # For prettier plots. Seaborn takes over pandas' default plotter
import nltk
import pyLDAvis
import pyLDAvis.sklearn
from gensim import models, matutils
from collections import defaultdict
from gensim import corpora
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
pyLDAvis.enable_notebook()
%matplotlib inline
review = pd.read_csv('../../data/interim/clean_US_cities/2016_review.csv')
review = review.fillna('')
tvec = TfidfVectorizer(stop_words='english', min_df=10, max_df=0.5, max_features=100,
norm='l2',
strip_accents='unicode'
)
review_dtm_tfidf = tvec.fit_transform(review['text'])
cvec = CountVectorizer(stop_words='english', min_df=10, max_df=0.5, max_features=100,
strip_accents='unicode')
review_dtm_cvec = cvec.fit_transform(review['text'])
print review_dtm_tfidf.shape, review_dtm_cvec.shape
# Fitting LDA models
# On cvec DTM
lda_cvec = LatentDirichletAllocation(n_topics=10, random_state=42)
lda_cvec.fit(review_dtm_cvec)
# On tfidf DTM
lda_tfidf = LatentDirichletAllocation(n_topics=10, random_state=42)
lda_tfidf.fit(review_dtm_tfidf)
lda_viz_10_topics_cvec = pyLDAvis.sklearn.prepare(lda_cvec, review_dtm_cvec, cvec)
lda_viz_10_topics_cvec
# topic labels
topics_labels = {
1: "customer_feelings",
2: "customer_actions",
3: "restaurant_related",
4: "compliments",
5: "las_vegas_related",
6: "hotel_related",
7: "location_related",
8: "chicken_related",
9: "superlatives",
10: "ordering_pizza"
}
"""
Explanation: 4. Model - Latent Dirichlet Allocation (LDA)
A topic model is a type of statistical model for discovering the abstract "topics" that occur in a collection of documents. A document typically concerns multiple topics in different proportions; thus, in a document that is 10% about cats and 90% about dogs, there would probably be about 9 times more dog words than cat words. The "topics" produced by topic modeling techniques are clusters of similar words. A topic model captures this intuition in a mathematical framework, which allows examining a set of documents and discovering, based on the statistics of the words in each, what the topics might be and what each document's balance of topics is.
LDA (Latent Dirichlet Allocation) is an example of a topic model that posits that each document is a mixture of a small number of topics and that each word's creation is attributable to one of the document's topics.
LDA represents documents as mixtures of topics that spit out words with certain probabilities. It assumes that documents are produced in the following fashion: when writing each document, you:
Decide on the number of words N the document will have (say, according to a Poisson distribution).
Choose a topic mixture for the document (according to a Dirichlet distribution over a fixed set of K topics). For example, assuming that we have the two topics; food and cute animals, you might choose the document to consist of 1/3 food and 2/3 cute animals.
Generate each word in the document by:
....First picking a topic (according to the multinomial distribution that you sampled above; for example, you might pick the food topic with 1/3 probability and the cute animals topic with 2/3 probability).
....Then using the topic to generate the word itself (according to the topic's multinomial distribution). For instance, the food topic might output the word "broccoli" with 30% probability, "bananas" with 15% probability, and so on.
Assuming this generative model for a collection of documents, LDA then tries to backtrack from the documents to find a set of topics that are likely to have generated the collection.
End of explanation
"""
vocab = {v: k for k, v in cvec.vocabulary_.iteritems()}
vocab
lda_ = models.LdaModel(
matutils.Sparse2Corpus(review_dtm_cvec, documents_columns=False),
# or use the corpus object created with the dictionary in the previous frame!
# corpus,
num_topics = 10,
passes = 1,
id2word = vocab
# or use the gensim dictionary object!
# id2word = dictionary
)
stops = stopwords.words()
docs = pd.DataFrame(review_dtm_cvec.toarray(), columns=vectorizer.get_feature_names())
docs.sum()
bow = []
for document in review_dtm_cvec.toarray():
single_document = []
for token_id, token_count in enumerate(document):
if token_count > 0:
single_document.append((token_id, token_count))
bow.append(single_document)
# remove words that appear only once
frequency = defaultdict(int)
for text in documents:
for token in text.split():
frequency[token] += 1
texts = [[token for token in text.split() if frequency[token] > 1 and token not in stops]
for text in documents]
# Create gensim dictionary object
dictionary = corpora.Dictionary(texts)
# Create corpus matrix
corpus = [dictionary.doc2bow(text) for text in texts]
lda_.print_topics(num_topics=3, num_words=5)
lda_.get_document_topics(bow[0])
doc_topics = [lda_.get_document_topics(doc) for doc in corpus]
topic_data = []
for document_id, topics in enumerate(doc_topics):
document_topics = []
for topic, probability in topics:
topic_data.append({
'document_id': document_id,
'topic_id': topic,
'topic': topics_labels[topic],
'probability': probability
})
topics_df = pd.DataFrame(topic_data.[:5])
topics_df.pivot_table(values="probability", index=["document_id", "topic"]).T
"""
Explanation: Generating topic probabilities for each review
End of explanation
"""
|
rnoxy/cifar10-cnn | Classification_using_CNN_codes.ipynb | mit | !ls features/
"""
Explanation: CIFAR10 classification using CNN codes
Here we are going to build linear models to classify CNN codes of CIFAR10 images.
We assume that we already have all the codes extracted by the scripts in the following notebooks:
- Feature_extraction_using_keras.ipynb
- Feature_extraction_using_Inception_v3.ipynb
End of explanation
"""
model_names = [
'vgg16-keras',
'vgg19-keras',
'resnet50-keras',
'incv3-keras',
'Inception_v3'
]
import numpy as np
data = dict()
for model_name in model_names:
data[model_name] = np.load('features/CIFAR10_{model}_features.npz'.format(model=model_name))
# It is important that CNN codes for all the models are given in the same order,
# i.e. they refer to the same samples from the dataset (both training and testing)
y_training = data[ model_names[0] ]['labels_training'] # this should be common for all the models
y_testing = data[ model_names[0] ]['labels_testing'] # this should be common for all the models
for i in range(1,len(model_names)):
assert( (data[model_names[i]]['labels_training'] == y_training).all() )
assert( (data[model_names[i]]['labels_testing'] == y_testing).all() )
"""
Explanation: Load CNN codes
End of explanation
"""
# First we tried all of the following parameters for each model
model_params = {
'vgg16-keras': [ {'C':0.0001}, {'C':0.001}, {'C':0.01,'max_iter':3000},
{'C':0.1}, {'C':0.5}, {'C':1.0}, {'C':1.2}, {'C':1.5}, {'C':2.0}, {'C':10.0} ],
'vgg19-keras': [ {'C':0.0001}, {'C':0.001}, {'C':0.01},
{'C':0.1}, {'C':0.5}, {'C':1.0}, {'C':1.2}, {'C':1.5}, {'C':2.0}, {'C':10.0} ],
'resnet50-keras': [ {'C':0.0001}, {'C':0.001}, {'C':0.01},
{'C':0.1}, {'C':0.5}, {'C':1.0}, {'C':1.2}, {'C':1.5}, {'C':2.0}, {'C':10.0} ],
'Inception_v3': [ {'C':0.0001}, {'C':0.001}, {'C':0.01},
{'C':0.1}, {'C':0.5}, {'C':1.0}, {'C':1.2}, {'C':1.5}, {'C':2.0}, {'C':10.0} ],
'incv3-keras': [ {'C':0.0001}, {'C':0.001}, {'C':0.01},
{'C':0.1}, {'C':0.5}, {'C':1.0}, {'C':1.2}, {'C':1.5}, {'C':2.0}, {'C':10.0} ],
}
"""
Explanation: LinearSVC classifier from scikit-learn
We used the linear classifier from the scikit-learn library.<br/>
More precisely, we used LinearSVC
End of explanation
"""
# and we decided to choose the best parameters
model_params = {
'vgg16-keras': [ {'C':0.0001} ],
'vgg19-keras': [ {'C':0.001} ],
'resnet50-keras': [ {'C':0.001} ],
'Inception_v3': [ {'C':0.01} ],
'incv3-keras': [ {'C':0.001} ]
}
from sklearn.svm import LinearSVC
# C - chosen experimentally (see explanation below)
results = dict()
for model_name in model_params:
print('model = ', model_name)
X_training = data[model_name]['features_training']
X_testing = data[model_name]['features_testing']
print( 'X_training size = {}'.format(X_training.shape))
# print( 'X_testing size = {}'.format(X_testing.shape))
# print( 'y_training size = {}'.format(y_training.shape))
# print( 'y_testing size = {}'.format(y_testing.shape))
results[model_name] = []
for params in model_params[model_name]:
clf = LinearSVC(**params, verbose=0)
clf.fit( X_training, y_training )
y_pred = clf.predict( X_testing )
score = sum( y_pred == y_testing )
print('features={:>16}, C={:8f} => score={:5d}'.format(model_name,params['C'],score))
results[model_name].append({'pred': y_pred, 'score': score, 'clf': clf})
from sklearn.externals import joblib
for model_name in model_params:
joblib.dump(results[model_name][0]['clf'], \
'classifiers/{score}-{name}.pkl'.format(score=results[model_name][0]['score'], name=model_name))
!ls -l classifiers/*.pkl
best_model = 'resnet50-keras'
X_training = data[best_model]['features_training']
X_testing = data[best_model]['features_testing']
clf = results[best_model][0]['clf']
print( 'Best accuracy = {}'.format( clf.score( X_testing, y_testing ) ) )
y_predictions = clf.predict( X_testing )
"""
Explanation: Before we start to train so many classifiers, let us write all the results
we obtained after hours of computation.
We tried to build LinearSVC classifier with many possible paramater C.
Below we present the accuracy of all the considered models.
Model
-----------------------------------------------------------------------------
C | vgg16-keras | vgg19-keras | resnet50-keras | incv3-keras | Inception_v3
------------------------------------------------------------------------------------
0.0001 | 8515 | 8633 | 9043 | 7244 | 8860
0.001 | 8528 | 8654 | 9158 | 7577 | 9005
0.01 | 8521 | 8644 | 9130 | 7604 | 9061
0.1 | 8519 | 8615 | 9009 | 7461 | 8959
0.5 | 7992 | 8014 | 8858 | 7409 | 8834
1.0 | 8211 | 8225 | 8853 | 7369 | 8776
1.2 | 8156 | 8335 | 8871 | 7357 | 8772
1.5 | 8172 | 8022 | 8852 | 7318 | 8762
2.0 | 7609 | 8256 | 8870 | 7281 | 8736
10.0 | 7799 | 7580 | 8774 | 7042 | 8709
End of explanation
"""
import myutils
from sklearn.metrics import confusion_matrix
labels = myutils.load_CIFAR_classnames()
conf_matrix = confusion_matrix( y_testing, y_predictions )
print( 'Confusion matrix:\n', conf_matrix )
print( labels )
i,j = 3,0
img_idx = [ k for k in range(10000) if y_testing[k]==i and y_predictions[k]==j ]
print( 'We have, e.g., {c} {iname}s predicted to be {jname}'.format(\
c=conf_matrix[i,j], iname=labels[i], jname=labels[j]) )
# print(img_idx)
_, data_testing = myutils.load_CIFAR_dataset(shuffle=False)
from matplotlib import pyplot as plt
%matplotlib inline
fig = plt.figure(figsize=(18,2));
for _i in range(conf_matrix[i,j]):
a=fig.add_subplot(1,conf_matrix[i,j],_i+1)
plt.imshow(data_testing[img_idx[_i]][0])
plt.axis('off')
"""
Explanation: So we obtained 91.58% accuracy on testing dataset using LinearSVC classifier on top of features extracted with ResNET50 convolutional neural network.
Some misclassifications
End of explanation
"""
# np.savez_compressed("classifiers/9158_resnet50-keras_LinearSVC.npz",W=np.array(clf.coef_).T, b=clf.intercept_)
"""
Explanation: Saving parameters
We simply save the matrix with weights and bias vector for linear classifier.
End of explanation
"""
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(X_training, y_training)
print( 'Linear regression accuracy = ', clf.score( X_testing, y_testing ) )
"""
Explanation: k-nearest neighbors classifier
Let us note that simple kNN classifier
(with k=10), trained with 5000 training features (CNN codes from Inception_v3) gives 83.45% accuracy on whole 10000 testing images.
Remark that computing predictions with this classifier is very complex and it is not recommended for classificcation of images.
Here is the code to compute the score on testing dataset.
```python
from sklearn.neighbors import KNeighborsClassifier
kNN_clf = KNeighborsClassifier(n_neighbors=10)
kNN_clf.fit(X_training, y_training)
print( 'Classification score = ', kNN_clf.score( X_testing, y_testing ) )
Classification score = 0.8345
```
Logistic regression
Finally we used <tt>Logistic regression</tt> with default parameters. We trained the model with all the training data and obtained 90.37% accuracy on testing dataset.
End of explanation
"""
|
LorenzoBi/courses | TSAADS/tutorial 2/TSA2_LORENZO_BIASI__JULIUS_VERNIE.ipynb | mit | import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
from sklearn import datasets, linear_model
%matplotlib inline
def set_data(p, x):
temp = x.flatten()
n = len(temp[p:])
x_T = temp[p:].reshape((n, 1))
X_p = np.ones((n, p + 1))
for i in range(1, p + 1):
X_p[:, i] = temp[i - 1: i - 1 + n]
return X_p, x_T
def AR(coeff, init, T):
offset = coeff[0]
mult_coef = np.flip(coeff, 0)[:-1]
series = np.zeros(T)
for k, x_i in enumerate(init):
series[k] = x_i
for i in range(k + 1, T):
series[i] = np.sum(mult_coef * series[i - k - 1:i]) + np.random.normal() + offset
return series
def estimated_autocorrelation(x):
n = len(x)
mu, sigma2 = np.mean(x), np.var(x)
r = np.correlate(x - mu, x - mu, mode = 'full')[-n:]
result = r/(sigma2 * (np.arange(n, 0, -1)))
return result
def test_AR(x, coef, N):
x = x.flatten()
offset = coef[0]
slope = coef[1]
ave_err = np.empty((len(x) - N, N))
x_temp = np.empty(N)
for i in range(len(x) - N):
x_temp[0] = x[i] * slope + offset
for j in range(N -1):
x_temp[j + 1] = x_temp[j] * slope + offset
ave_err[i, :] = (x_temp - x[i:i+N])**2
return ave_err
x = sio.loadmat('Tut2_file1.mat')['x'].flatten()
plt.plot(x * 2, ',')
plt.xlabel('time')
plt.ylabel('x')
X_p, x_T = set_data(1, x)
model = linear_model.LinearRegression()
model.fit(X_p, x_T)
model.coef_
"""
Explanation: Linear time series analysis - AR/MA models
Lorenzo Biasi (3529646), Julius Vernie (3502879)
Task 1. AR(p) models.
1.1
End of explanation
"""
x_1 = AR(np.append(model.coef_, 0), [0, x[0]], 50001)
plt.plot(x_1[1:], ',')
plt.xlabel('time')
plt.ylabel('x')
"""
Explanation: We can see that simulating the data as an AR(1) model is not effective in giving us anything similar the aquired data. This is due to the fact that we made the wrong assumptions when we computed the coefficients of our data. Our data is in fact clearly not a stationary process and in particular cannot be from an AR(1) model alone, as there is a linear trend in time. The meaning of the slope that we computed shows that successive data points are strongly correlated.
End of explanation
"""
rgr = linear_model.LinearRegression()
x = x.reshape((len(x)), 1)
t = np.arange(len(x)).reshape(x.shape)
rgr.fit(t, x)
x_star= x - rgr.predict(t)
plt.plot(x_star.flatten(), ',')
plt.xlabel('time')
plt.ylabel('x')
"""
Explanation: 1.2
Before estimating the coefficients of the AR(1) model we remove the linear trend in time, thus making it resemble more closely the model with which we are trying to analyze it.
End of explanation
"""
X_p, x_T = set_data(1, x_star)
model.fit(X_p, x_T)
model.coef_
x_1 = AR(np.append(model.coef_[0], 0), [0, x_star[0]], 50000)
plt.plot(x_1, ',')
plt.xlabel('time')
plt.ylabel('x')
plt.plot(x_star[1:], x_star[:-1], ',')
plt.xlabel(r'x$_{t - 1}$')
plt.ylabel(r'x$_{t}$')
"""
Explanation: This time we obtain different coefficients, that we can use to simulate the data and see if they give us a similar result the real data.
End of explanation
"""
err = test_AR(x_star, model.coef_[0], 10)
np.sum(err, axis=0) / err.shape[0]
plt.plot(np.sum(err, axis=0) / err.shape[0], 'o', label='Error')
plt.plot([0, 10.], np.ones(2)* np.var(x_star), 'r', label='Variance')
plt.grid(linestyle='dotted')
plt.xlabel(r'$\Delta t$')
plt.ylabel('Error')
"""
Explanation: In the next plot we can see that our predicted values have an error that decays exponentially the further we try to make a prediction. By the time it arrives to 5 time steps of distance it equal to the variance.
End of explanation
"""
x = sio.loadmat('Tut2_file2.mat')['x'].flatten()
plt.plot(x, ',')
plt.xlabel('time')
plt.ylabel('x')
np.mean(x)
X_p, x_T = set_data(1, x)
model = linear_model.LinearRegression()
model.fit(X_p, x_T)
model.coef_
"""
Explanation: 1.4
By plotting the data we can already see that this cannot be a simple AR model. The data seems divided in 2 parts with very few data points in the middle.
End of explanation
"""
x_1 = AR(model.coef_[0], x[:1], 50001)
plt.plot(x_1[1:], ',')
plt.xlabel('time')
plt.ylabel('x')
"""
Explanation: We tried to simulate the data with these coefficients but it is clearly uneffective
End of explanation
"""
plt.plot(x[1:], x[:-1], ',')
plt.xlabel(r'x$_{t - 1}$')
plt.ylabel(r'x$_{t}$')
plt.plot(x_star[1:], x_star[:-1], ',')
plt.xlabel(r'x$_{t - 1}$')
plt.ylabel(r'x$_{t}$')
"""
Explanation: By plotting the return plot we can better understand what is going on. The data can be divided in two parts. We can see that successive data is always around one of this two poles. If it were a real AR model we would expect something like the return plots shown below this one.
End of explanation
"""
plt.plot(estimated_autocorrelation(x)[:200])
plt.xlabel(r'$\Delta$t')
plt.ylabel(r'$\rho$')
plt.plot(estimated_autocorrelation(x_1.flatten())[:20])
plt.xlabel(r'$\Delta$t')
plt.ylabel(r'$\rho$')
"""
Explanation: We can see that in the autocorelation plot the trend is exponential, which is what we would expect, but it is taking too long to decay for being a an AR model with small value of $p$
End of explanation
"""
data = sio.loadmat('Tut2_file3.mat')
x_AR = data['x_AR'].flatten()
x_MA = data['x_MA'].flatten()
"""
Explanation: Task 2. Autocorrelation and partial autocorrelation.
2.1
End of explanation
"""
for i in range(3,6):
X_p, x_T = set_data(i, x_AR)
model = linear_model.LinearRegression()
model.fit(X_p, x_T)
plt.plot(estimated_autocorrelation((x_T - model.predict(X_p)).flatten())[:20], \
label='AR(' + str(i) + ')')
plt.xlabel(r'$\Delta$t')
plt.ylabel(r'$\rho$')
plt.legend()
"""
Explanation: For computing the $\hat p$ for the AR model we predicted the parameters $a_i$ for various AR(5). We find that for p = 5 we do not have any correlation between previous values and future values.
End of explanation
"""
plt.plot(estimated_autocorrelation(x_MA)[:20])
plt.xlabel(r'$\Delta$t')
plt.ylabel(r'$\rho$')
"""
Explanation: For the MA $\hat q$ could be around 4-6
End of explanation
"""
|
Raag079/self-driving-car | Term01-Computer-Vision-and-Deep-Learning/P2-Traffic-Sign-Classifier/Traffic_Sign_Classifier.ipynb | mit | # Load pickled data
import pickle
# TODO: Fill this in based on where you saved the training and testing data
training_file = 'train.p'
testing_file = 'test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_test, y_test = test['features'], test['labels']
"""
Explanation: Self-Driving Car Engineer Nanodegree
Deep Learning
Project: Build a Traffic Sign Recognition Classifier
In this notebook, a template is provided for you to implement your functionality in stages which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission, if necessary. Sections that begin with 'Implementation' in the header indicate where you should begin your implementation for your project. Note that some sections of implementation are optional, and will be marked with 'Optional' in the header.
In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a 'Question' header. Carefully read each question and provide thorough answers in the following text boxes that begin with 'Answer:'. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
Note: Code and Markdown cells can be executed using the Shift + Enter keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
Step 0: Load The Data
End of explanation
"""
### Replace each question mark with the appropriate value.
# TODO: Number of training examples
n_train = len(X_train)
# TODO: Number of testing examples.
n_test = len(X_test)
# TODO: What's the shape of an traffic sign image?
image_shape = X_train[0].shape
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(set(y_test))
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
"""
Explanation: Step 1: Dataset Summary & Exploration
The pickled data is a dictionary with 4 key/value pairs:
'features' is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).
'labels' is a 2D array containing the label/class id of the traffic sign. The file signnames.csv contains id -> name mappings for each id.
'sizes' is a list containing tuples, (width, height) representing the the original width and height the image.
'coords' is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES
Complete the basic data summary below.
End of explanation
"""
### Data exploration visualization goes here.
import random
import numpy as np
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
%matplotlib inline
index = random.randint(0, len(X_train))
image = X_train[index].squeeze()
plt.figure(figsize=(1,1))
plt.imshow(image, cmap="gray")
print(y_train[index])
"""
Explanation: Set Validation features
Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc.
The Matplotlib examples and gallery pages are a great resource for doing visualizations in Python.
NOTE: It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections.
End of explanation
"""
### Preprocess the data here.
from sklearn.utils import shuffle
# Implement Min-Max scaling for image data
def normalize(image_data):
a = 0.01
b = 0.99
color_min = 0.0
color_max = 255.0
return a + ( ( (image_data - color_min) * (b - a) )/(color_max - color_min))
# Normalize train features and test features
X_train = normalize(X_train)
X_test = normalize(X_test)
X_train, y_train = shuffle(X_train, y_train)
from sklearn.model_selection import train_test_split
X_train = np.append(X_train, X_test, axis=0)
y_train = np.append(y_train, y_test, axis=0)
X_train, X_validation, y_train, y_validation = train_test_split(
X_train,
y_train,
test_size=0.02,
random_state=42)
"""
Explanation: Step 2: Design and Test a Model Architecture
Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the German Traffic Sign Dataset.
There are various aspects to consider when thinking about this problem:
Neural network architecture
Play around preprocessing techniques (normalization, rgb to grayscale, etc)
Number of examples per label (some have more than others).
Generate fake data.
Here is an example of a published baseline model on this problem. It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these.
NOTE: The LeNet-5 implementation shown in the classroom at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play!
Preprocess Data
Shuffle the training data.
End of explanation
"""
from tensorflow.contrib.layers import flatten
import tensorflow as tf
model_name = 'lenet_report'
EPOCHS = 40
BATCH_SIZE = 120
def LeNet(x):
# Hyperparameters
mu = 0
sigma = 0.01
keep_prob = 1
# Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# SOLUTION: Activation.
conv1 = tf.nn.relu(conv1)
# SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# SOLUTION: Activation.
conv2 = tf.nn.relu(conv2)
# SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# SOLUTION: Activation.
fc1 = tf.nn.relu(fc1)
# SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# SOLUTION: Activation.
fc2 = tf.nn.relu(fc2)
fc2 = tf.nn.dropout(fc2, keep_prob)
# Layer 5: Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
"""
Explanation: Setup TensorFlow
The EPOCH and BATCH_SIZE values affect the training speed and model accuracy.
Implement LeNet-5
Implement the LeNet-5 neural network architecture.
Input
The LeNet architecture accepts a 32x32xC image as input, where C is the number of color channels. Since the images are color, C is 3 in this case.
Architecture
Layer 1: Convolutional. The output shape should be 28x28x6.
Activation. Your choice of activation function.
Pooling. The output shape should be 14x14x6.
Layer 2: Convolutional. The output shape should be 10x10x16.
Activation. Your choice of activation function.
Pooling. The output shape should be 5x5x16.
Flatten. Flatten the output shape of the final pooling layer such that it's 1D instead of 3D. The easiest way to do is by using tf.contrib.layers.flatten, which is already imported for you.
Layer 3: Fully Connected. This should have 120 outputs.
Activation. Your choice of activation function.
Layer 4: Fully Connected. This should have 84 outputs.
Activation. Your choice of activation function.
Layer 5: Fully Connected (Logits). This should have 43 outputs.
Output
Return the result of the 2nd fully connected layer.
End of explanation
"""
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
### Train your model here.
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_validation, y_validation)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './models/'+model_name)
print("Model saved")
"""
Explanation: Features and Labels
Train LeNet to classify input data.
x is a placeholder for a batch of input images.
y is a placeholder for a batch of output labels.
End of explanation
"""
with tf.Session() as sess:
print ('loading '+model_name+'...')
saver.restore(sess, './models/'+model_name)
print('loaded')
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
"""
Explanation: Question 1
Describe how you preprocessed the data. Why did you choose that technique?
Answer:
For pre-processing Min-Max normalization. Where I normalized test and train data. I normalized the data in the image by coverting all 0 - 255 values to 0 - 1, by doing this we can achieve close to 0 mean and equal variance.
Also, I didn't convert image to greyscale inorder to retain all the properties of 3 channels so that I can extract more information when doing convolution.
Question 2
Describe how you set up the training, validation and testing data for your model. Optional: If you generated additional data, how did you generate the data? Why did you generate the data? What are the differences in the new dataset (with generated data) from the original dataset?
Answer:
I used test and train split function to split training data as train and validate data. I am using 20% of train data for validation. This is necessary as we need to validate the training so that we can measure the prediction accuracy after training.
Question 3
What does your final architecture look like? (Type of model, layers, sizes, connectivity, etc.) For reference on how to build a deep neural network using TensorFlow, see Deep Neural Network in TensorFlow
from the classroom.
Answer:
I reused the LeNet lab, which is LeNet-5 neural network architecture.
Five layers looks as below,
Layer 1: Convolutional. The output shape is 28x28x6.
Activation. I am using RELU activation layer.
Pooling. I am using Max Pooling which outputs the shape 14x14x6.
Layer 2: Convolutional. The output shape is 10x10x16.
Activation. I am using RELU activation layer.
Pooling. I am using Max Pooling which outputs the shape 5x5x16.
Flatten. Flatten the output shape of the final pooling layer such that it's 1D instead of 3D. I do is by using
tf.contrib.layers.flatten.
Layer 3: Is a Fully Connected layer with 120 outputs.
Activation. I am using RELU activation layer.
Layer 4: Is a Fully Connected layer with 84 outputs.
Activation. I am using RELU activation layer.
Layer 5: Fully Connected (Logits) with 43 outputs.
Question 4
How did you train your model? (Type of optimizer, batch size, epochs, hyperparameters, etc.)
Answer:
The EPOCH and BATCH_SIZE values affect the training speed and model accuracy. I tried various combinations of epochs, batch size and learning rate. Finally I got an accuracy of 98% with 40 Epochs, Batch Size of 150 and learning rate of 0.01. I didn't modify any other hyperparameters. Even though I tried dropout of 0.5 it didn't yield me good results.
Question 5
What approach did you take in coming up with a solution to this problem? It may have been a process of trial and error, in which case, outline the steps you took to get to the final solution and why you chose those steps. Perhaps your solution involved an already well known implementation or architecture. In this case, discuss why you think this is suitable for the current problem.
Answer:
I followed LeNet architecture approach discussed in the convolutional neural networks. I followed this approach as it looked more efficient method of training. I spent lot of time trying to improve training accuracy. Some preprocessing like normalizing the input, then I used LeNet involving convolution network, RELU, MAX Pooling. I spent lot of time tuning the hyperparamters like dropout, standard deviation etc.., Also I tried different combinations of Epochs, Learning rate and Batch size. After getting training accuracy of 98% I think my approach is good enough for this project.
End of explanation
"""
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
# load test images
from skimage import io
import numpy as np
import os
images = os.listdir("testImages/")
images.sort()
num_imgs = len(images)
test_imgs = np.uint8(np.zeros((num_imgs,32,32,3)))
labels = ['?', 29, 28, 33, 5, 14, 18, 17, 34]
for i, j in enumerate(images):
image = io.imread('./testImages/'+j)
test_imgs[i] = image
# Normalize train features and test features
test_imgs = normalize(test_imgs.reshape((-1, 32, 32, 3)).astype(np.float32))
import matplotlib.pyplot as plt
f, ax = plt.subplots(num_imgs, 1)
for i in range(num_imgs):
ax[i].imshow(test_imgs[i])
plt.setp(ax[i].get_xticklabels(), visible=False)
plt.setp(ax[i].get_yticklabels(), visible=False)
plt.show()
test_imgs.shape
"""
Explanation: Step 3: Test a Model on New Images
Take several pictures of traffic signs that you find on the web or around you (at least five), and run them through your classifier on your computer to produce example results. The classifier might not recognize some local signs but it could prove interesting nonetheless.
You may find signnames.csv useful as it contains mappings from the class id (integer) to the actual sign name.
Implementation
Use the code cell (or multiple code cells, if necessary) to implement the first step of your project. Once you have completed your implementation and are satisfied with the results, be sure to thoroughly answer the questions that follow.
Question 6
Choose five candidate images of traffic signs and provide them in the report. Are there any particular qualities of the image(s) that might make classification difficult? It could be helpful to plot the images in the notebook.
Answer:
I have used 9 images (Thanks to Tyler Lanigan for the images).
A summary of the test signs and their categories is shown in the following table:
| Test Image | Sign Category |
|------------|--------------------------|
| 1 | Wild Animals Crossing - ?|
| 2 | Bicycles crossing - 29 |
| 3 | Children Crossing - 28 |
| 4 | Turn Right ahead - 33 |
| 5 | Speed limit (80km/h) - 5 |
| 6 | Stop - 14 |
| 7 | General Caution - 18 |
| 8 | No Entry - 17 |
| 9 | Turn Left ahead - 34 |
Among these, first image is not in any of the sign category and few images are not in the dataset. Hence it makes it difficult for the model to classify. But I am expecting close guess for those!
End of explanation
"""
import tensorflow as tf
model_name = 'lenet_report'
predictions = tf.nn.softmax(logits)
def classify_images(X_data):
sess = tf.get_default_session()
pred_vals = sess.run(predictions, feed_dict={x: X_data})
return pred_vals
with tf.Session() as sess:
print ('loading '+model_name+'...')
saver.restore(sess, './models/'+model_name)
predictions = classify_images(test_imgs)
top_k = sess.run(tf.nn.top_k(predictions, 5, sorted=True))
print("Predicted Labels:", np.argmax(predictions, 1))
print("Expected Labels: ", labels)
"""
Explanation: Question 7
Is your model able to perform equally well on captured pictures when compared to testing on the dataset? The simplest way to do this check the accuracy of the predictions. For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate.
NOTE: You could check the accuracy manually by using signnames.csv (same directory). This file has a mapping from the class id (0-42) to the corresponding sign name. So, you could take the class id the model outputs, lookup the name in signnames.csv and see if it matches the sign from the image.
Answer:
As seen in the result below, we can see that images 3, 4, 5, 6, 7, 8 are predicted correctly. Label for first image is not there in the csv file provided. 2 and 9 are not predicted properly. So our model has an accuracy of 75 percent [(6/8)*100]. This is expected as few images are not in our dataset. However, my accuracy is much lesser than test accuracy which is 0.98. Model can be further improved by adding more layers in LeNet or using better architecture like Keras, or simply by removing explicit learning rate and asking Adam Optimizer to choose one!!!
End of explanation
"""
N = 5
ind = np.arange(N) # the x locations for the values
for i in range(5):
plt.figure(i)
values = top_k[0][i]
plt.bar(range(N), values, 0.40, color='g')
plt.ylabel('Probabilities')
plt.xlabel('Class Labels')
plt.title('Top {} Softmax Probabilities for test-image{}'.format(N, str(i+1)))
plt.xticks(ind+0.40, tuple(top_k[1][i]))
plt.show()
"""
Explanation: Question 8
Use the model's softmax probabilities to visualize the certainty of its predictions, tf.nn.top_k could prove helpful here. Which predictions is the model certain of? Uncertain? If the model was incorrect in its initial prediction, does the correct prediction appear in the top k? (k should be 5 at most)
tf.nn.top_k will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.
Take this numpy array as an example:
```
(5, 6) array
a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497,
0.12789202],
[ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401,
0.15899337],
[ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 ,
0.23892179],
[ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 ,
0.16505091],
[ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137,
0.09155967]])
```
Running it through sess.run(tf.nn.top_k(tf.constant(a), k=3)) produces:
TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202],
[ 0.28086119, 0.27569815, 0.18063401],
[ 0.26076848, 0.23892179, 0.23664738],
[ 0.29198961, 0.26234032, 0.16505091],
[ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5],
[0, 1, 4],
[0, 5, 1],
[1, 3, 5],
[1, 4, 3]], dtype=int32))
Looking just at the first row we get [ 0.34763842, 0.24879643, 0.12789202], you can confirm these are the 3 largest probabilities in a. You'll also notice [3, 0, 5] are the corresponding indices.
Answer:
Of the 5 visualized predictions, the model incorrectly predicted first and second. The first prediction is for a picture of a horse crossing the road. As there is no "horse crossing" sign in the German traffic Sign dataset, it is expected that the model will have trouble identifying it, however, I would consider a correct prediction to be Wild Animal crossing, or class number 31.
According to the above visualization, in all five cases, neural network model makes its first choice with the highest probability (almost 100 percent) and other four choices are almost negligible. For me, it looks like a little bit of warning. Hence, further investigation should be made to understand this behaviour.
End of explanation
"""
|
walkon302/CDIPS_Recommender | notebooks/Exploring_Data.ipynb | apache-2.0 | import sys
import os
sys.path.append(os.getcwd()+'/../')
# other
import numpy as np
import glob
import pandas as pd
import ntpath
#keras
from keras.preprocessing import image
# plotting
import seaborn as sns
sns.set_style('white')
import matplotlib.pyplot as plt
%matplotlib inline
# debuggin
from IPython.core.debugger import Tracer
#stats
import scipy.stats as stats
import bqplot.pyplot as bqplt
"""
Explanation: Data Exploration
End of explanation
"""
user_profile = pd.read_csv('../data_user_view_buy/user_profile.csv',sep='\t',header=None)
user_profile.columns = ['user_id','buy_spu','buy_sn','buy_ct3','view_spu','view_sn','view_ct3','time_interval','view_cnt','view_seconds']
string =str(user_profile.buy_spu.as_matrix()[3002])
print(string)
print(string[0:7]+'-'+string[7::])
#print(str(user_profile.buy_spu.as_matrix()[0])[7::])
user_profile.head(10)
print('n rows: {0}').format(len(user_profile))
"""
Explanation: Data File
End of explanation
"""
def plot_trajectory_scatter(user_profile,scatter_color_col=None,samplesize=50,size=10,savedir=None):
plt.figure(figsize=(12,1*samplesize/10))
for ui,user_id in enumerate(np.random.choice(user_profile.user_id.unique(),samplesize)):
trajectory = user_profile.loc[user_profile.user_id==user_id,]
time = 0-trajectory.time_interval.as_matrix()/60.0/60.0/24.0
# add image or not
if scatter_color_col is not None:
c = trajectory[scatter_color_col].as_matrix()
else:
c = np.ones(len(trajectory))
plt.scatter(time,np.ones(len(time))*ui,s=size,c=c,edgecolors="none",cmap="jet")
plt.axvline(x=0,linewidth=1)
sns.despine()
plt.title('example user trajectories')
plt.xlabel('days to purchase')
if savedir is not None:
plt.savefig(savedir,dpi=100)
"""
Explanation: Plotting Functions
End of explanation
"""
user_profile.describe()
print('unique users:{0}').format(len(user_profile.user_id.unique()))
print('unique items viewed:{0}').format(len(user_profile.view_spu.unique()))
print('unique items bought:{0}').format(len(user_profile.buy_spu.unique()))
print('unique categories viewed:{0}').format(len(user_profile.view_ct3.unique()))
print('unique categories bought:{0}').format(len(user_profile.buy_ct3.unique()))
print('unique brands viewed:{0}').format(len(user_profile.view_sn.unique()))
print('unique brands bought:{0}').format(len(user_profile.buy_sn.unique()))
samplesize = 2000
plt.figure(figsize=(12,4))
plt.subplot(1,3,1)
plt.hist(np.random.choice(user_profile.time_interval.as_matrix()/60.0/60.0,samplesize))
sns.despine()
plt.title('sample histogram from "time interval"')
plt.xlabel('hours from view to buy')
plt.ylabel('counts of items')
plt.subplot(1,3,2)
plt.hist(np.random.choice(user_profile.view_cnt.as_matrix(),samplesize))
sns.despine()
plt.title('sample histogram from "view count"')
plt.xlabel('view counts')
plt.ylabel('counts of items')
plt.subplot(1,3,3)
plt.hist(np.random.choice(user_profile.view_seconds.as_matrix(),samplesize))
sns.despine()
plt.title('sample histogram from "view lengths"')
plt.xlabel('view lengths (seconds)')
plt.ylabel('counts of items')
"""
Explanation: Descriptions of Data
End of explanation
"""
print('longest time interval')
print(user_profile.time_interval.min())
print('longest time interval')
print(user_profile.time_interval.max()/60.0/60.0/24)
"""
Explanation: there are many items that are viewed more than a day before buying
most items are viewed less than 10 times and for less than a couple minutes (though need to zoom in)
End of explanation
"""
mean_time_interval = np.array([])
samplesize =1000
for user_id in np.random.choice(user_profile.user_id.unique(),samplesize):
mean_time_interval = np.append(mean_time_interval, user_profile.loc[user_profile.user_id==user_id,'time_interval'].mean())
plt.figure(figsize=(12,3))
plt.hist(mean_time_interval/60.0,bins=200)
sns.despine()
plt.title('sample histogram of average length for user trajectories"')
plt.xlabel('minutes')
plt.ylabel('counts of items out of '+str(samplesize))
"""
Explanation: longest span from viewing to buying is 6 days
Average Time for Items Viewed before Being Bought
End of explanation
"""
plt.figure(figsize=(12,3))
plt.hist(mean_time_interval/60.0,bins=1000)
plt.xlim(0,100)
sns.despine()
plt.title('sample histogram of average length for user trajectories"')
plt.xlabel('minutes')
plt.ylabel('counts of items out of '+str(samplesize))
"""
Explanation: 5% look like they have relatively short sessions (maybe within one sitting)
End of explanation
"""
plt.figure(figsize=(8,3))
plt.hist(mean_time_interval/60.0,bins=200,cumulative=True,normed=True)
plt.xlim(0,2000)
sns.despine()
plt.title('sample cdf of average length for user trajectories"')
plt.xlabel('minutes')
plt.ylabel('counts of items out of '+str(samplesize))
"""
Explanation: zooming in to look at the shortest sessions.
about 7% have sessions <10 minutes
End of explanation
"""
user_id = 1606682799
trajectory = user_profile.loc[user_profile.user_id==user_id,]
trajectory= trajectory.sort_values(by='time_interval',ascending=False)
trajectory
"""
Explanation: 20% has sessions less <100 minutes
Example Trajectories
End of explanation
"""
plot_trajectory_scatter(user_profile)
"""
Explanation: this is an example trajectory of someone who browsed a few items and then bought item 31.. within the same session.
End of explanation
"""
samplesize =1000
number_of_times_item_bought = np.empty(samplesize)
number_of_times_item_viewed = np.empty(samplesize)
for ii,item_id in enumerate(np.random.choice(user_profile.view_spu.unique(),samplesize)):
number_of_times_item_bought[ii] = len(user_profile.loc[user_profile.buy_spu==item_id,'user_id'].unique()) # assume the same user would not buy the same product
number_of_times_item_viewed[ii] = len(user_profile.loc[user_profile.view_spu==item_id]) # same user can view the same image more than once for this count
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.bar(np.arange(len(number_of_times_item_bought)),number_of_times_item_bought)
sns.despine()
plt.title('item popularity (purchases)')
plt.xlabel('item')
plt.ylabel('# of times items were bought')
plt.subplot(1,2,2)
plt.hist(number_of_times_item_bought,bins=100)
sns.despine()
plt.title('item popularity (purchases)')
plt.xlabel('# of times items were bought sample size='+str(samplesize))
plt.ylabel('# of items')
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.bar(np.arange(len(number_of_times_item_viewed)),number_of_times_item_viewed)
sns.despine()
plt.title('item popularity (views)')
plt.xlabel('item')
plt.ylabel('# of times items were viewed')
plt.subplot(1,2,2)
plt.hist(number_of_times_item_bought,bins=100)
sns.despine()
plt.title('item popularity (views) sample size='+str(samplesize))
plt.xlabel('# of times items were viewed')
plt.ylabel('# of items')
plt.figure(figsize=(6,4))
plt.subplot(1,1,1)
thresh =30
include = number_of_times_item_bought<thresh
plt.scatter(number_of_times_item_viewed[include],number_of_times_item_bought[include],)
(r,p) = stats.pearsonr(number_of_times_item_viewed[include],number_of_times_item_bought[include])
sns.despine()
plt.xlabel('number of times viewed')
plt.ylabel('number of times bought')
plt.title('r='+str(np.round(r,2))+' data truncated buys<'+str(thresh))
"""
Explanation: here are 50 random subjects and when they view items (could make into an interactive plot)
What's the distribution of items that are bought? Are there some items that are much more popular than others?
End of explanation
"""
samplesize =1000
items_bought_per_user = np.empty(samplesize)
items_viewed_per_user = np.empty(samplesize)
for ui,user_id in enumerate(np.random.choice(user_profile.user_id.unique(),samplesize)):
items_bought_per_user[ui] = len(user_profile.loc[user_profile.user_id==user_id,'buy_spu'].unique())
items_viewed_per_user[ui] = len(user_profile.loc[user_profile.user_id==user_id,'view_spu'].unique())
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.hist(items_bought_per_user)
sns.despine()
plt.title('number of items bought per user (sample of 1000)')
plt.xlabel('# items bought')
plt.ylabel('# users')
plt.subplot(1,2,2)
plt.hist(items_viewed_per_user)
sns.despine()
plt.title('number of items viewed per user (sample of 1000)')
plt.xlabel('# items viewed')
plt.ylabel('# users')
"""
Explanation: Items bought and viewed per user?
End of explanation
"""
urls = pd.read_csv('../../deep-learning-models-master/img/eval_img_url.csv',header=None)
urls.columns = ['spu','url']
print(len(urls))
urls.head(10)
urls[['spu','url']].groupby(['spu']).agg(['count']).head()
"""
Explanation: How many times did the user buy an item he/she already looked at?
Image URLs
How many of the SPUs in our dataset (smaller) have urls in our url.csv?
End of explanation
"""
urls.loc[urls.spu==357870273655002,'url'].as_matrix()
urls.loc[urls.spu==357889732772303,'url'].as_matrix()
"""
Explanation: items with more than one url?
End of explanation
"""
#urls.loc[urls.spu==1016200950427238422,'url']
tmp_urls = urls.loc[urls.spu==1016200950427238422,'url'].as_matrix()
tmp_urls
from urllib import urlretrieve
import time
# scrape images
for i,tmp_url in enumerate(tmp_urls):
urlretrieve(tmp_url, '../data_img_tmp/{}.jpg'.format(i))
#time.sleep(3)
# plot them.
print('two images from url with same spu (ugh)')
plt.figure(figsize=(8,3))
for i,tmp_url in enumerate(tmp_urls):
img_path= '../data_img_tmp/{}.jpg'.format(i)
img = image.load_img(img_path, target_size=(224, 224))
plt.subplot(1,len(tmp_urls),i+1)
plt.imshow(img)
plt.grid(b=False)
"""
Explanation: these are the same item, just different images.
End of explanation
"""
urls.spu[0]
urls.url[0]
"""
Explanation: These are different thought!!
End of explanation
"""
view_spus = user_profile.view_spu.unique()
contained = 0
spus_with_url = list(urls.spu.as_matrix())
for view_spu in view_spus:
if view_spu in spus_with_url:
contained+=1
print(contained/np.float(len(view_spus)))
buy_spus = user_profile.buy_spu.unique()
contained = 0
spus_with_url = list(urls.spu.as_matrix())
for buy_spu in buy_spus:
if buy_spu in spus_with_url:
contained+=1
print(contained/np.float(len(buy_spus)))
"""
Explanation: the url contains the spu, but I'm not sure what the other numbers are. The goods_num? The category etc?
End of explanation
"""
buy_spu in spus_with_url
len(urls.spu.unique())
len(user_profile.view_spu.unique())
"""
Explanation: we only have the url for 7% of the bought items and 9% of the viewed items
End of explanation
"""
spu_fea = pd.read_pickle("../data_nn_features/spu_fea.pkl") #takes forever to load
spu_fea['view_spu']=spu_fea['spu_id']
spu_fea['view_spu']=spu_fea['spu_id']
user_profile_w_features = user_profile.merge(spu_fea,on='view_spu',how='left')
print('before merge nrow: {0}').format(len(user_profile))
print('after merge nrows:{0}').format(len(user_profile_w_features))
print('number of items with features: {0}').format(len(spu_fea))
spu_fea.head()
# merge with userdata
spu_fea['view_spu']=spu_fea['spu_id']
user_profile_w_features = user_profile.merge(spu_fea,on='view_spu',how='left')
print('before merge nrow: {0}').format(len(user_profile))
print('after merge nrows:{0}').format(len(user_profile_w_features))
user_profile_w_features['has_features']=user_profile_w_features.groupby(['view_spu'])['spu_id'].apply(lambda x: np.isnan(x))
user_profile_w_features.has_features= user_profile_w_features.has_features.astype('int')
user_profile_w_features.head()
"""
Explanation: Are the images we have in this new dataset?
at the moment, I don't know how to find the spu of the images we have.
Viewing DataSet with Feature Data in
End of explanation
"""
plot_trajectory_scatter(user_profile_w_features,scatter_color_col='has_features',samplesize=100,size=10,savedir='../../test.png')
"""
Explanation: Plotting Trajectories and Seeing How many features we have
End of explanation
"""
1-(user_profile_w_features['features'].isnull()).mean()
"""
Explanation: What percent of rows have features?
End of explanation
"""
1-user_profile_w_features.groupby(['view_spu'])['spu_id'].apply(lambda x: np.isnan(x)).mean()
buy_spus = user_profile.buy_spu.unique()
contained = 0
spus_with_features = list(spu_fea.spu_id.as_matrix())
for buy_spu in buy_spus:
if buy_spu in spus_with_features:
contained+=1
print(contained/np.float(len(buy_spus)))
contained
len(buy_spus)
view_spus = user_profile.view_spu.unique()
contained = 0
spus_with_features = list(spu_fea.spu_id.as_matrix())
for view_spu in view_spus:
if view_spu in spus_with_features:
contained+=1
print(contained/np.float(len(view_spus)))
len(view_spus)
"""
Explanation: What percent of bought items are in the feature list?
End of explanation
"""
user_profile = pd.read_pickle('../data_user_view_buy/user_profile_items_nonnull_features_20_mins_5_views.pkl')
len(user_profile)
print('unique users:{0}').format(len(user_profile.user_id.unique()))
print('unique items viewed:{0}').format(len(user_profile.view_spu.unique()))
print('unique items bought:{0}').format(len(user_profile.buy_spu.unique()))
print('unique categories viewed:{0}').format(len(user_profile.view_ct3.unique()))
print('unique categories bought:{0}').format(len(user_profile.buy_ct3.unique()))
print('unique brands viewed:{0}').format(len(user_profile.view_sn.unique()))
print('unique brands bought:{0}').format(len(user_profile.buy_sn.unique()))
#user_profile.groupby(['user_id'])['buy_spu'].nunique()
# how many items bought per user in this dataset?
plt.figure(figsize=(8,3))
plt.hist(user_profile.groupby(['user_id'])['buy_spu'].nunique(),bins=20,normed=False)
sns.despine()
plt.xlabel('number of items bought per user')
plt.ylabel('number of user')
user_profile.loc[user_profile.user_id==4283991208,].head()
"""
Explanation: Evaluation Dataset
End of explanation
"""
user_profile.loc[user_profile.user_id==6539296,].head()
"""
Explanation: some people have longer viewing trajectories. first item was viewed 28hours ahead of time.
End of explanation
"""
plot_trajectory_scatter(user_profile,samplesize=100,size=10,savedir='../figures/trajectories_evaluation_dataset.png')
"""
Explanation: this person bought two items.
End of explanation
"""
user_profile = pd.read_pickle('../data_user_view_buy/user_profile_items_nonnull_features_20_mins_5_views_v2_sample1000.pkl')
print('unique users:{0}').format(len(user_profile.user_id.unique()))
print('unique items viewed:{0}').format(len(user_profile.view_spu.unique()))
print('unique items bought:{0}').format(len(user_profile.buy_spu.unique()))
print('unique categories viewed:{0}').format(len(user_profile.view_ct3.unique()))
print('unique categories bought:{0}').format(len(user_profile.buy_ct3.unique()))
print('unique brands viewed:{0}').format(len(user_profile.view_sn.unique()))
print('unique brands bought:{0}').format(len(user_profile.buy_sn.unique()))
# how many items bought per user in this dataset?
plt.figure(figsize=(8,3))
plt.hist(user_profile.groupby(['user_id'])['buy_spu'].nunique(),bins=20,normed=False)
sns.despine()
plt.xlabel('number of items bought per user')
plt.ylabel('number of user')
"""
Explanation: I'd like to make this figure better - easier to tell which rows people are on
Evaluation Dataset Sample 1000
End of explanation
"""
%%bash
jupyter nbconvert --to slides Exploring_Data.ipynb && mv Exploring_Data.slides.html ../notebook_slides/Exploring_Data_v2.slides.html
jupyter nbconvert --to html Exploring_Data.ipynb && mv Exploring_Data.html ../notebook_htmls/Exploring_Data_v2.html
cp Exploring_Data.ipynb ../notebook_versions/Exploring_Data_v2.ipynb
# push to s3
import sys
import os
sys.path.append(os.getcwd()+'/../')
from src import s3_data_management
s3_data_management.push_results_to_s3('Exploring_Data_v1.html','../notebook_htmls/Exploring_Data_v1.html')
s3_data_management.push_results_to_s3('Exporing_Data_v1.slides.html','../notebook_slides/Exploring_Data_v1.slides.html')
"""
Explanation: Save Notebook
End of explanation
"""
|
bhargavvader/pycobra | docs/notebooks/visualise.ipynb | mit | %matplotlib inline
import numpy as np
from pycobra.cobra import Cobra
from pycobra.ewa import Ewa
from pycobra.visualisation import Visualisation
from pycobra.diagnostics import Diagnostics
# setting up our random data-set
rng = np.random.RandomState(42)
# D1 = train machines; D2 = create COBRA; D3 = calibrate epsilon, alpha; D4 = testing
n_features = 2
D1, D2, D3, D4 = 200, 200, 200, 200
D = D1 + D2 + D3 + D4
X = rng.uniform(-1, 1, D * n_features).reshape(D, n_features)
# Y = np.power(X[:,1], 2) + np.power(X[:,3], 3) + np.exp(X[:,10])
Y = np.power(X[:,0], 2) + np.power(X[:,1], 3)
# training data-set
X_train = X[:D1 + D2]
X_test = X[D1 + D2 + D3:D1 + D2 + D3 + D4]
X_eps = X[D1 + D2:D1 + D2 + D3]
# for testing
Y_train = Y[:D1 + D2]
Y_test = Y[D1 + D2 + D3:D1 + D2 + D3 + D4]
Y_eps = Y[D1 + D2:D1 + D2 + D3]
# set up our COBRA machine with the data
cobra = Cobra(epsilon=0.5)
cobra.fit(X_train, Y_train)
"""
Explanation: COBRA Visualisations
This notebook will cover the visulaisation and plotting offered by pycobra.
End of explanation
"""
cobra_vis = Visualisation(cobra, X_test, Y_test)
# to plot our machines, we need a linspace as input. This is the 'scale' to plot and should be the range of the results
# since our data ranges from -1 to 1 it is such - and we space it out to a hundred points
cobra_vis.plot_machines(machines=["COBRA"])
cobra_vis.plot_machines()
"""
Explanation: Plotting COBRA
We use the visualisation class to plot our results, and for various visualisations.
End of explanation
"""
cobra_vis.QQ()
cobra_vis.boxplot()
"""
Explanation: Plots and Visualisations of Results
QQ and Boxplots!
End of explanation
"""
ewa = Ewa()
ewa.set_beta(X_beta=X_eps, y_beta=Y_eps)
ewa.fit(X_train, Y_train)
ewa_vis = Visualisation(ewa, X_test, Y_test)
ewa_vis.QQ("EWA")
ewa_vis.boxplot()
"""
Explanation: Plotting EWA!
We can use the same visualisation class for seeing how EWA works. Let's demonstrate this!
End of explanation
"""
from sklearn import datasets
from sklearn.metrics import accuracy_score
from pycobra.classifiercobra import ClassifierCobra
bc = datasets.load_breast_cancer()
X_cc = bc.data[:-40]
y_cc = bc.target[:-40]
X_cc_test = bc.data[-40:]
y_cc_test = bc.target[-40:]
cc = ClassifierCobra()
cc.fit(X_cc, y_cc)
cc_vis = Visualisation(cc, X_cc_test, y_cc_test)
cc_vis.boxplot()
"""
Explanation: Plotting ClassifierCobra
End of explanation
"""
from sklearn.metrics import classification_report
print(classification_report(y_cc_test, cc.predict(X_cc_test)))
"""
Explanation: Remember that all the estimators in the Pycobra package are scikit-learn compatible - we can also use the scikit-learn metrics and tools to analyse our machines!
End of explanation
"""
indices, MSE = cobra_vis.indice_info(X_test=X_eps[0:50], y_test=Y_eps[0:50], epsilon=0.50)
cobra_vis.color_cobra(X_test=X_eps[0:50], indice_info=indices, single=True)
cobra_vis.color_cobra(X_test=X_eps[0:50], indice_info=indices)
"""
Explanation: Plotting COBRA colors!
We're now going to experiment with plotting colors and data.
After we get information about which indices are used by which machines the best for a fixed epsilon (or not, we can toggle this option), we can plot the distribution of machines.
Why is this useful? Since we're dealing with a 2-D space now, we're attempting to see if there are some parts in the input space which are picked up by certain machines. This could lead to interesting experiments and
We first present a plot where the machine colors are mixed depending on which machines were selected; after which we plot one machine at a time.
End of explanation
"""
cobra_vis.voronoi(X_test=X_eps[0:50], indice_info=indices, single=True)
cobra_vis.voronoi(X_test=X_eps[0:50], indice_info=indices)
"""
Explanation: Voronoi Tesselation
We present a variety of Voronoi Tesselation based plots - the purpose of this is to help in visualising the pattern of points which tend to be picked up.
End of explanation
"""
cobra_vis.voronoi(X_test=X_eps[0:50], indice_info=indices, MSE=MSE, gradient=True)
"""
Explanation: Gradient-Colored Based Voronoi
End of explanation
"""
|
cavestruz/MLPipeline | notebooks/anomaly_detection/sample_anomaly_detection_stueber.ipynb | mit | import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
%matplotlib inline
"""
Explanation: Let us first explore an example that falls under novelty detection. Here, we train a model on data with some distribution and no outliers. The test data, has some "novel" subset of data that does not follow that distribution.
End of explanation
"""
mu,sigma=3,0.1
x=np.random.normal(mu,sigma,1000)
y=np.random.normal(mu,sigma,1000)
x_0=np.random.normal(2,sigma,1000)
y_0=np.random.normal(2,sigma,1000)
X_train_normal=np.ndarray(shape=(2000,2))
for i in range(0,2000):
if (i<1000):
X_train_normal[i]=[x[i],y[i]]
else:
X_train_normal[i]=[x_0[i-1001],y_0[i-1001]]
print(xy)
"""
Explanation: Use the np.random module to generate a normal distribution of 1,000 data points in two dimensions (e.g. x, y) - choose whatever mean and sigma^2 you like. Generate another 1,000 data points with a normal distribution in two dimensions that are well separated from the first set. You now have two "clusters". Concatenate them so you have 2,000 data points in two dimensions. Plot the points. This will be the training set.
End of explanation
"""
p_x=X_train_normal[:,0]
p_y=X_train_normal[:,1]
print(p_y)
plt.scatter(p_x,p_y)
plt.show()
"""
Explanation: Plot the points.
End of explanation
"""
X_test_normal=np.concatenate((0.1*np.random.randn(100,2)+2,0.1*np.random.randn(100,2)+3))
plt.scatter(X_test_normal[:,0],X_test_normal[:,1])
"""
Explanation: Generate 100 data points with the same distribution as your first random normal 2-d set, and 100 data points with the same distribution as your second random normal 2-d set. This will be the test set labeled X_test_normal.
End of explanation
"""
X_test_uniform=np.random.rand(100,2)+3
plt.scatter(X_test_uniform[:,0],X_test_uniform[:,1])
"""
Explanation: Generate 100 data points with a random uniform distribution. This will be the test set labeled X_test_uniform.
End of explanation
"""
model = svm.OneClassSVM()
"""
Explanation: Define a model classifier with the svm.OneClassSVM
End of explanation
"""
model.fit(X_train_normal)
"""
Explanation: Fit the model to the training data.
End of explanation
"""
predicted=model.predict(X_test_normal)-1
print(np.count_nonzero(predicted))
"""
Explanation: Use the trained model to predict whether X_test_normal data point are in the same distributions. Calculate the fraction of "false" predictions.
End of explanation
"""
uniform=model.predict(X_test_uniform)-1
print(np.count_nonzero(uniform))
"""
Explanation: Use the trained model to predict whether X_test_uniform is in the same distribution. Calculate the fraction of "false" predictions.
End of explanation
"""
trained=model.predict(X_train_normal)-1
print(np.count_nonzero(trained))
"""
Explanation: Use the trained model to see how well it recovers the training data. (Predict on the training data, and calculate the fraction of "false" predictions.)
End of explanation
"""
new_model=svm.OneClassSVM(nu=0.1)
new_model.fit(X_train_normal)
"""
Explanation: Create another instance of the model classifier, but change the kwarg value for nu. Hint: Use help to figure out what the kwargs are.
End of explanation
"""
new_predicted=new_model.predict(X_test_normal)-1
new_uniform=new_model.predict(X_test_uniform)-1
new_trained=new_model.predict(X_train_normal)-1
print(np.count_nonzero(new_trained))
print(np.count_nonzero(new_predicted))
print(np.count_nonzero(new_uniform))
"""
Explanation: Redo the prediction on the training set, prediction on X_test_random, and prediction on X_test.
End of explanation
"""
plt.scatter(X_train_normal[:,0],X_train_normal[:,1],color='blue')
plt.scatter(X_test_uniform[:,0],X_test_uniform[:,1],color='black')
plt.scatter(X_test_normal[:,0],X_test_normal[:,1],color='red')
xx1, yy1 = np.meshgrid(np.linspace(1.5, 4, 1000), np.linspace(1.5, 4,1000))
Z1 =model.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
plt.contour(xx1, yy1, Z1, levels=[0],
linewidths=2)
"""
Explanation: Plot in scatter points the X_train in blue, X_test_normal in red, and X_test_uniform in black. Overplot the trained model decision function boundary for the first instance of the model classifier.
End of explanation
"""
plt.scatter(X_train_normal[:,0],X_train_normal[:,1],color='blue')
plt.scatter(X_test_uniform[:,0],X_test_uniform[:,1],color='black')
plt.scatter(X_test_normal[:,0],X_test_normal[:,1],color='red')
xx1, yy1 = np.meshgrid(np.linspace(1.5, 4, 1000), np.linspace(1.5, 4,1000))
Z1 =new_model.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
plt.contour(xx1, yy1, Z1, levels=[0],
linewidths=2)
from sklearn.covariance import EllipticEnvelope
"""
Explanation: Do the same for the second instance of the model classifier.
End of explanation
"""
train_uniform=np.concatenate((X_train_normal,X_test_uniform))
envelope=EllipticEnvelope()
envelope.fit(train_uniform)
envelope.predict(train_uniform)
"""
Explanation: Test how well EllipticEnvelope predicts the outliers when you concatenate the training data with the X_test_uniform data.
End of explanation
"""
print(range(100))
plt.scatter(range(100),envelope.mahalanobis(X_test_uniform),color='black')
plt.scatter(range(2000),envelope.mahalanobis(X_train_normal),color='blue')
plt.scatter(range(200),envelope.mahalanobis(X_test_normal),color='red')
plt.show()
data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAXQAAAD8CAYAAABn919SAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAAEp0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMC4wcmMyKzI5MjAuZzExNWJhZGUsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy9bT2XBAAAgAElEQVR4nO3df5AkZ33f8fd353aB2cOWNHdRXSR2RsTEKcWVgNhScIGphAMMFwcpiaNwLGHBlLdYxS5RVMoIb1XKSdW6AFewZWKOWiHIWTMGORhKKkIAmeDE/gOZPRA/hJB16HZOUkm6HxKg4wBJu9/80T27s7PdMz0z3fOj5/Oq6tqZ3p6eZ57u/s4z3366H3N3RERk/E0NuwAiIpIOBXQRkZxQQBcRyQkFdBGRnFBAFxHJCQV0EZGcUEAXEckJBXQRkZxQQBcRyYl9SRYys0uAjwG/BDjwG8ADwB1ABdgAbnD3p9qt58CBA16pVHovrYjIBDpx4sQ5dz/YaTlLcum/mR0H/trdP2ZmM0AR+F3gSXd/v5ndDFzq7u9tt575+XlfX19P9glERAQAMzvh7vOdluuYcjGznwdeDdwG4O7PuPsPgOuA4+Fix4Hrey+uiIj0K0kO/SrgLPAJM/uGmX3MzGaBy939sXCZx4HLo15sZktmtm5m62fPnk2n1CIiskeSgL4PuAY45u4vA34M3Ny8gAd5m8jcjbuvufu8u88fPNgxBSQiIj1KEtAfAR5x93vC558mCPBPmNkhgPDvmWyKKCIiSXQM6O7+OPCwmf1iOOsw8F3gLmAxnLcI3JlJCUVEJJGk/dB/G6iZ2beAlwK/D7wfeJ2ZPQi8Nnw+MLVajUqlwtTUFJVKhVqtNsi3FxEZOYn6obv7vUBUl5nD6RYnmVqtxtLSEhcvXgSgXq+ztLQEwMLCwjCKJCIydGN5pejKysp2MG+4ePEiKysrQyqRiMjwjWVAP336dFfzRUQmwVgG9Lm5ua7mi4hMgrEM6KurqxSLxV3zisUiq6urQyqRiMjwjWVAX1hYYG1tjXK5jJlRLpdZW1vTCVERmWiJbs6VFt2cS0Ske6ndnEtERMaDArqISE4ooIuI5IQCuohITiigi4jkhAK6iEhOKKCLiOSEArqISE4ooIuI5IQCuohITiigi4jkhAK6iEhOKKCLiOSEArqISE4ooIuI5IQCuohITiigi4jkhAK6iEhOKKCLiOTEviQLmdkG8DSwCTzn7vNmdhlwB1ABNoAb3P2pbIopIiKddNNC/xfu/tKmgUpvBr7s7i8Bvhw+FxGRIekn5XIdcDx8fBy4vv/iiIhIr5IGdAe+ZGYnzGwpnHe5uz8WPn4cuDzqhWa2ZGbrZrZ+9uzZPosrIiJxEuXQgVe5+6Nm9veAu83se83/dHc3M496obuvAWsA8/PzkcuIiEj/ErXQ3f3R8O8Z4LPAtcATZnYIIPx7JqtCiohIZx0DupnNmtkLG4+B1wPfAe4CFsPFFoE7syqkiIh0liTlcjnwWTNrLP9n7v4FM/sa8Odm9k6gDtyQXTFFRKSTjgHd3R8C/mnE/PPA4SwKJSIi3dOVoiIiOTG2Ab1Wq1GpVJiamqJSqVCr1YZdJBGRoUrabXGk1Go1lpaWuHjxIgD1ep2lpaB7/MLCwjCLJiIyNGPZQl9ZWdkO5g0XL15kZWVlSCUSERm+sQzop0+f7mq+iMgkGMuAPjc319V8EZFJMJYBfXV1lWKxuGtesVhkdXV1SCUSERm+sQzoCwsLrK2tUS6XMTPK5TJra2s6ISoiE83cB3e/rPn5eV9fXx/Y+4mI5IGZnWgaiyLWWLbQRURkLwV0EZGcUEAXEckJBXQRkZxQQBcRyQkFdBGRnFBAFxHJCQV0EZGcUEAXEckJBXQRkZxQQBcRyQkFdBGRnMhlQNd4oyIyicZyTNF2NN6oiEyq3LXQNd6oiEyqxAHdzApm9g0z+1z4/Cozu8fMTprZHWY2k10x47WmV+r1euRyGm9URPKumxb6TcD9Tc8/APyhu/8C8BTwzjQLlkQjvVKv13F36vU6Zha5rMYbFZG8SxTQzexK4F8CHwufG/Aa4NPhIseB67MoYKvmFvni4uKe9Iq77wnqGm9URCZB0hb6HwG/A2yFz0vAD9z9ufD5I8AVKZdtj9YW+ebmZuRy7q7xRkVk4nTs5WJmvwaccfcTZvbPu30DM1sClqD/tEfUCc8o5XKZjY2Nvt5LRGTcJGmhvxJ4k5ltAJ8iSLXcAlxiZo0vhCuBR6Ne7O5r7j7v7vMHDx7sq7BJTmwqvSIik6pjQHf397n7le5eAd4M/B93XwC+Avx6uNgicGdmpQzFtfALhYLSKyIy8frph/5e4D1mdpIgp35bOkWKt7q6SrFY3DWvWCxy/Phxtra22NjYUDAXkYnV1ZWi7v5XwF+Fjx8Crk2/SPEawXplZYXTp08zNzfH6uqqgriICDm8UlREZFKN1b1cdJ8WEZF4Y9VC131aRETijVVAj+u2qPu0iIiMWUCP67ao+7SIiIxZQI/rtqgLiURExiygLywssLa2pvu0iIhEMHcf2JvNz8/7+vr6wN5PRCQPzOyEu893Wm6sWugiIhJPAV1EJCcU0EVEckIBXUQkJxTQRURyQgFdRCQnFNBFRHJCAV1EJCcU0EVEckIBXUQkJxTQRURyYuwDeq1Wo1KpMDU1RaVSoVarDbtIIiJDMVZD0LXSkHQiIjvGuoWuIelERHaMdUDXkHQiIjvGOqBrSDoRkR1jHdA1JJ2IyI6OAd3Mnm9mf2tm3zSz+8zsv4TzrzKze8zspJndYWYz2Rd3Nw1JJyKyo+MQdGZmwKy7XzCzaeBvgJuA9wCfcfdPmdlHgW+6+7F269IQdCIi3UttCDoPXAifToeTA68BPh3OPw5c32NZRUQkBYly6GZWMLN7gTPA3cD3gR+4+3PhIo8AV2RTRBERSSJRQHf3TXd/KXAlcC3wj5K+gZktmdm6ma2fPXu2x2LupqtDRUT26qqXi7v/APgK8MvAJWbWuNL0SuDRmNesufu8u88fPHiwr8LCztWh9Xodd9++OlRBXUQmXZJeLgfN7JLw8QuA1wH3EwT2Xw8XWwTuzKqQzXR1qIhItCQt9EPAV8zsW8DXgLvd/XPAe4H3mNlJoATcll0xd8RdBVqv19VKF5GJlqSXy7fc/WXu/k/c/Zfc/b+G8x9y92vd/Rfc/d+5+8+yL277q0CbUy/Ks4vIpBm7uy0eOXKEY8eiu7s3p150F0YRmTQdLyxKUxoXFlUqFer1euz/zYy5ubnIZcrlMhsbG329v4jIoKV2YdGo6XQnxbm5OeXZRWQijV1Ab5dDb9yYK2meXUQkT8YuoEfdYRGgVCpt35grbhlQF0cRya+xC+hRd1isVqucO3du+4RnY5k4GgBDRPJo7E6KdiPuBKpOjorIOMntSdFuaAAMEZkkuQ7oGgBDRCZJrlMuIiJ5oJRLE90GQEQmwdhd+t+txu12dRsAEcm73LfQdbtdEZkUuQ/o7W4DICKSJ7kP6HG3ATAz5dJFJFdyH9CPHDkSOd/dlXYRkVzJfUD//Oc/H/s/3QJARPIk9wG9XdBud1dGEZFxk/uA3i6HrlsAiEie5D6gR93Pxcx417vepX7oIpIruQ/oUfdzuf322/nIRz4y7KKJiKRK93IRERlxupeLiMiEUUAXEcmJsQ7ououiiMiOjgHdzF5kZl8xs++a2X1mdlM4/zIzu9vMHgz/Xpp9cXc07qJYr9dx9+27KCqoi8ik6nhS1MwOAYfc/etm9kLgBHA98HbgSXd/v5ndDFzq7u9tt640T4pqvFARmRSpnRR198fc/evh46eB+4ErgOuA4+FixwmC/MDEXQGqy/lFZFJ1lUM3swrwMuAe4HJ3fyz81+PA5amWrIO4K0CnpqaUUxeRiZQ4oJvZfuAvgHe7+4+a/+dB3iYyd2NmS2a2bmbrZ8+e7auwEOTODxw4EHs/883NTeXURWQiJQroZjZNEMxr7v6ZcPYTYX69kWc/E/Vad19z93l3nz948GBfha3VarzjHe/g/Pnze/43NbX3o2hkIhGZJEl6uRhwG3C/u3+o6V93AYvh40XgzvSLt9vKygrPPvts5P+2trYi5yunLiKTIkkL/ZXAfwBeY2b3htMR4P3A68zsQeC14fNM9RKcdYtcmVS1GlQqMDUV/FX2Mf/2dVrA3f8GsJh/H063OO3Nzc3F5s5LpRI/+clPdg0IXSwWdYtcmUi1GiwtQeNwqNeD5wC6yWh+jdWVoqurq0xPT++ZPzMzwy233MLi4iKFQgGAQqHA4uKibpErE2llZSeYN1y8GMyX/Bq7uy3WajVuuumm7ROjU1NTbG1tUSqVePrpp3nmmWe2ly0Wi6ytrSmoy8SZmoKoQ9sMYk43yQjL7d0WFxYWuOWWWyiVSsDOydDz58/vCuagXi4yueJOHemUUr6NXUBv3MMlqutilHq9rguNZOKsrkLLQF0Ui8F8ya+xC+grKyu7TnwmoQuNZNIsLMDaGpTLQZqlXA6eK/uYb2MX0PvpV64UTL6oW157CwuwsRHkzDc2FMwnwdgF9Hb9yqenpymVSgTXQkWr1+vceOONWRRNBqjRLa9eD07+NbrlKahLEnltDIxdQF9dXaXYmhwk6If+iU98gnPnzrG1tUW5XI5dx7FjxxTUx5y65fUmr4GsG50aA2NdR+4+sOnlL3+596tarXqpVGrcDMxLpZIvLy97uVx2M/NyuezVatWr1aoXi8Xt5VqnQqHQd1lkeMzcg8Nx92Q27JKNrmrVvVjcXV/FYjB/kpTL0ftOuTy6dQSse4IYO1b90Bs9XJpPik5PT2Nmkf3PAd761rfGrm+Qn13SVakELatW5XKQL5a9VGeBdn305+ZGs46S9kMfq4AeN0pRlMbIRfv27WNzc3PP/wuFAs8991zPZZHhar20HYJueerJEU8XGwXafbGdPj2adZTLC4u66eHSCPxLjRtYtIibL+NB3fK6p4uNAu366I99HSXJy6Q19ZtDL5fLsTnx1snMvBomvpaXl71QKGznzpeXl/sqRxaq1SCHZ7aTyxNJ06jmh4ch7ngb1ToiYQ59rAJ61InO6enp2KBeKpX2nCwdRaO6E0n+qOHQ2SjWUdKAPlY5dAhOjK6srHD69Gnm5uZYXV1te+Kz2ajerEsnq0SknVzm0CG4OdfGxgZbW1tsbEe7twCngM3w79HI147qlaJxpwY02JKIdGPsAnqzG2+8kbe+9X8Ba0CF4ONUgFuJC+qjOCTd2J+IEZGRMJYBvVarceDAAY4dOwb8PjDbssRsOH+vURySTnfGE5E0jF1A33v73LgAvXf+zMxMJkPS9XupsLrgiUgaxu6k6N6Li04RpFlabQBX7ZpTKpU4d+5cX+/fShe4iEymWi24d9Dp00F6dHU1u2M+tydF9+bAfxf4ccu8H4fzdzt//nzqA11keZOosb5JkHSk7Tu+RvZun0n6NqY1pXFzruiLi446nHLYDP8ebfu/YrGYWp/0rG4Spb7p+abtO97a3eArC+TxwiL36IuL4qejDhdaKv2Cw1Evp1TzpVL0hi2V+lvvoHcYGSxt396NwoU/g77bZ9KAPnYpl4WFBdbW1iiXy5hZ2/uet+sBM4rdF5upb3q+afv2ZlRSHaPa1XjsAjrsvbgoPqjH94D5rcsuSyWB+eST3c1PalR3GEmHtu9uSc8njMrAJiPb1bhTEx74OHAG+E7TvMuAu4EHw7+XJvk5kEbKJUp8GuZU5M+io/yhPzsz03cCs1p1LxSy+emsHGu+afvu6KYuekl1ZJWiGWTqh7Ry6MCrgWtaAvoHgZvDxzcDH0jyZmncnKtxs61SqbQ9clHznRQJ77RImxx63fb3HYWjdsK0D8xRyBXKbmluk1HYvqNQhm7OJ3R77qGfL85RqJuG1AJ6sC4qLQH9AeBQ+PgQ8ECS9fQT0JOeDN1798XdvVymp9/uW1F7RJuv+agNG7djFQoKvHmVt1b1MD5P1LHUTau7WnVv/XE9MxNf5l5PPneqm0EH+6wD+g+aHlvz84jXLgHrwPrc3FzPH6ibe6F3mh7uIk8St2G7/E6QHMhbz5RBf564Yymup1hUOapV9+np3ctNT8cH1F57o4zauKMDC+jh86eSrKefFvpOGqX/6Sj4hdYtFbNFymX3o1T9FGXfxPwUZT9KNbPcuYyuvA1MPejPExckS6XkAbLbL6Fev7Ta1c0wvtiTBvRee7k8YWaHAMK/Z3pcT2Jp3lTrk8BvAo8UCh1vnvLKeo1bWaJCnSmcCnVuZYkbNmujeZZbMtNrz5RhXBGa5D0H3dMmrkvmk08mv5dRN909azW4cGHv/CTHabu6Gekup0miPntb6H/A7pOiH0yynkHk0AGfmZlJtJyZdUyGPVwoR34dP1woj9RJk6xk/RnTXn+W5e3lp/aw8tRJ3nPQZUujZZt0HXGdFmZng18EnfaPdnXTTRnS2hdJsZfLJ4HHgGeBR4B3AiXgywTdFv8SuCzJm6XZy2V2djYyQC8vL3u1Wt1O0RwFPwW+Gf492rT8byf4rbdF9G+vLcb0d3YX4nbq5eV0dtSo9Td+6vay3qj1TU8nO4C7eY+4sSij5g/j53k37znIRkkaXyDt1tH8WeJSoq1T44RqVD30M+5o2l+WqQX0NKc0A3q5XPbl5eU9zxtdGRvBvDVXvhlOdTP/yf747ovVahAITlGOXObpUrnHz7B7J0krOGYhLjC05hd73VHj1t/rejutL6sWaLuDN408dTdBt1qN/+yDyPV3Kuvy8k6wLRSC592uKy74tuus0G6ane2+4dLpc6b9RZ67gB6Vcmm0whvBvLXL4qleti5B67txJv0oVb/A7q19gaK/fbqaSgsy6qBrt5MPUlwwSmtHTbL+btabtLyFQrpfoO0O3k69JToF6qStwcb7tKuDrE/YJ+nql7TV2m0LN8mXebdTPw2XtE845y6g99JtcbPHLflwobxrVlQvl14OkKQ7XSO1P2zdHCS97KhJ1t+83ubA1WjlJUlvtJvSaLG3O3jbpa3apYdKpfjufM37XtKW6SD6y3dqlWZxAVG7nHYWU9JjXi30DuK6LbbLkZ/qZYvNzPhbwoCdZhBr91M4zQ3frXatxHY57qjydpuPjepT3EvgagTCduXLsq7bBZS4tFoaQajTezdPg2ggdGqVdtNq7fQlmeQXSdL9YWoqeZ0nPeaVQ+8gqoUelSO/wE5Q/zDEXxUaM/2U6e0WeGPqpYXeHNxKpb1Xt6W143R633YnBLv5Od+c80/a4kzSA6RdvTS/vttfC5C83lvrut0XU1QOeHm5feCIqtMud8s9U6EQrKvXtFUWJ0MH0UI3S34stbtoqTFNT0fvu+0aLknrcCR7uaQ59ZtDT5ojP9Xh/5222inK20+7yaF303Lo5SDsXEedr2JtrLs5MLVOne7lHrWj9pIr7tSybc699lp/SXo+tB6kcV9Ys7PRr9+3L3mZooJHr1OnOozarnEnENPovZQkh97psv20jqFG+TudU4g70Xr4cPRrGue3or7Es0xr5TKgt/Yvj8uRb3b4/xZBnrz964OnSXu5dGqldTPF9Rnu1DsmzVxitztmpxZq1OdL8hO8194LjXV0+qXUehI6ac+eXqe01gNBubtJi0HwpRaXYkgjQHVK4bW7bL+fnirNU/MXdK+57E4NlCTvnabcBfSolMupmFo9xRUOR/1MzP/PUHKID9abmB+l6kepJrqRV6eWQC8HarMkO3parb7WHTdpiy3uAGjXKk5ysPX6JdUob2u9RAWz5sCVZh1mPTW+jJrTCo1WeFpf7kkCVKOLb2sZku4jnVIy3Uy99gdv3dfb1XncL7XG/7OQu4AedVI0OodeDHPcm36G6CjXCOhHqfpmzIVDZyjtSbU0T+f2lzv+lE/rIEq6o6ddjnYHS9LcersdP6512fyF1umLsl2ru5sAkeQn+iCmxjZsPv/RyzZK83N06v8dd2K7NbD3etK0223YKuq8UmtdR+1D7bZPuzJkIXcBPa7bYtDL5Yo9JyzBY4P1Jta0TPSWaXcydedLI72DJmoHb+hmR0+7pR530MS9z/79uw/4dmOuxgXc5i+OJGO2RuU7s66HLKfWFuQguuW128dmZuK/sDuddGzk5pN0wezncyZpGXeb0unlC6a5MaKTom1Uq1WP73O+uatSG71S4oJy80nPuLRL3Gu3INNg3ryDNyTd0VvTJN10x+pmJ09SnkarPar1Vigk66lQLsf/vG0E9Hat0ax/sWQ5zc7ublVm/X5XX905957F+7aeqO+1lR51Erz5V8QgvhQb09RU0MhQt8UOGpf17+17fut2pUX1SmmeWlvXcb1YzhB9FDV/GWQ19ZJDb7SiWtMg7Q7ExsHTTZfKxvq7OVijdvg0D552/4+6eCfr7ZfFlMWX86hNSa6ijntdp18BozL1mpLJZUCvVqv+FvbmzbfAf8j+7ZZ5XG1ugX+Y5T3/iupnHhfos26dN3bQdl0i46bWwJx2EOh0t7lRnZpbgWn0Adc0vG3YOjUaLMM+99HN5+hFLgO6u/u5Nlvup8x0vNz/h8w6xF8s1Dz/DCX/IbO+RfBlcIbSQAI6xH+TD2vHbT7Btbw8nDL0M+3b1zkvr2m8pnEJ4s2TWujNqm26EYZTkv9/gcN7TpheoOgfZnlPq7x1fYNqpTd22FJpdx55GDtxc756WK3zNH5tNFro45p20TTe0yBy6BYsOxjz8/O+vr7e8+svHDjA/vPn+y6HEwyE2uo5Cuxjs+PrNyhzFRt9l2NcVKvB36UluHix9/VMTcHWVjpl6lWhAJudN7FIKsrlYCSjublglKSoUZiSMLMT7j7fabl9va1+OIopBHOIDuYAhQTBHGCOURhrajBKpWAnrFT6C+Yw/GAOCuZRCgV4wQuih2uT/qQRzLvR65iiQ3Ge2WEXAYDTZDTo4ogpFuGGG4JgXq8PuzSShVIJrrxSwTwr7sGxs7Q0mLFkxyag33gjwPMyW39cGiZquc9xJLNyjJJKBW67TcE8z86f1/YdhIsX4W1vyz6oj01AX1uDEk9ltv4kwbyx3L/nzzMrxyj57nfhmWeGXQqRfNjagsXFbIP62AT0zU34WYYt9G4cIJ1cvohMls1NuOmm7NY/NgH9i7yW5/PTYRdDRKQvKfXtiDQ2Af11fDlxWiRrg+voKSKS3NgE9FEyKl8sIjJ+LMMAooDeo6MMoA+SiOROltdy9hXQzewNZvaAmZ00s5vTKlSUn/L8LFffFQNuIcMzGyIiPeg5oJtZAfgT4I3A1cBRM7s6rYK1eh4/y2rVPVFPFxEZNf200K8FTrr7Q+7+DPAp4Lp0irXXpFydKSL5dnVmzd7+AvoVwMNNzx8J52XiQ6VVfsp0Vqvv2jlKwy6CiIwZM7jvvuzWn/lJUTNbMrN1M1s/e/Zsz+v5Z7cssDz9Cc5S2h57bovOXQiTLBf3v7j5P2OGm7ilwztL3pVKcPhwcHOrKDMzwR0mZaeusuzhMeqmp+H22zN+kyT32I2agF8Gvtj0/H3A+9q9pv8Ri/aOF/jXy1U/R2l7EIofMuvnrORbmJ+e2hm44i3bA1cEQ9c1lt+0KT//96/eNW+LYCDpL3DY6xaMTbo5VfAtgiHofnO2GjvW5f79O+MYdjOQQqm0e6T65vuel0rBGIVp3gu9MWBFr+M4Nj5nY7s0f9bGZ+lnoOao+p2aCtbdPMReY39oXX7//r1D8rWOLxk3PF+hELw2zUF+m/fhXveLduVoLWvca6Lev3nwkqj6afwtlXbXf9yoWp3qK26fa8xL+n6dPnvrcdT6WaNee/hw5/2ik6g6HPlBogluvfsQcBUwA3wT+MftXpPGiEVZyuIAHpTmss/O7gwIUSgEO+kwPleSHbtdnY/z9pD2tG27kzSg9zXAhZkdAf4IKAAfd/fVdsv3O8CFiMgkGsgAF+7+eeDz/axDRETSoVM2IiI5oYAuIpITCugiIjmhgC4ikhMK6CIiOaGALiKSEwroIiI50deFRV2/mdlZoJ7Cqg4A51JYT9pGsVyjWCZQuboximUClasb/Zap7O4HOy000ICeFjNbT3LV1KCNYrlGsUygcnVjFMsEKlc3BlUmpVxERHJCAV1EJCfGNaCvDbsAMUaxXKNYJlC5ujGKZQKVqxsDKdNY5tBFRGSvcW2hi4hIi7EL6Gb2BjN7wMxOmtnNA3zfF5nZV8zsu2Z2n5ndFM7/PTN71MzuDacjTa95X1jOB8zsVzMs24aZfTt8//Vw3mVmdreZPRj+vTScb2b2x2G5vmVm12RQnl9sqo97zexHZvbuYdSVmX3czM6Y2Xea5nVdN2a2GC7/oJktZlSuPzCz74Xv/VkzuyScXzGznzTV20ebXvPycNufDMve1yBvMeXqeruleZzGlOmOpvJsmNm94fxB1lVcTBje/pVkFIxRmQgG0vg+8GJ2Rkm6ekDvfQi4Jnz8QuDvgKuB3wP+U8TyV4flex7BqE7fBwoZlW0DONAy74PAzeHjm4EPhI+PAP8bMOAVwD0D2GaPA+Vh1BXwauAa4Du91g1wGcHoXJcBl4aPL82gXK8H9oWPP9BUrkrzci3r+duwrBaW/Y0ZlKur7Zb2cRpVppb//zfgPw+hruJiwtD2r3FroV8LnHT3h9z9GeBTwHWDeGN3f8zdvx4+fhq4H7iizUuuAz7l7j9z91PASYLyD8p1wPHw8XHg+qb5f+qBrwKXmNmhDMtxGPi+u7e7oCyzunL3/wc8GfF+3dTNrwJ3u/uT7v4UcDfwhrTL5e5fcvfnwqdfBa5st46wbD/n7l/1IDL8adNnSa1cbcRtt1SP03ZlClvZNwCfbLeOjOoqLiYMbf8at4B+BfBw0/NHaB9UM2FmFeBlwD3hrN8Kf0J9vPHzisGW1YEvmdkJM1sK513u7o+Fjx8HLh9CuQDezO6Dbdh1Bd3XzTD2u98gaM01XGVm3zCz/2tmvxLOuyIsyyDK1c12G2R9/QrwhLs/2DRv4HXVEhOGtn+NW0AfOjPbD/wF8G53/xFwDPgHwEuBxwh+/g3aq9z9GuCNwH80s1c3/zNskQy8O5OZzQBvAv5nOGsU6lm6fWsAAAJGSURBVGqXYdVNO2a2AjwH1MJZjwFz7v4y4D3An5nZzw2wSCO33ZocZXeDYeB1FRETtg16/xq3gP4o8KKm51eG8wbCzKYJNlzN3T8D4O5PuPumu28Bt7KTKhhYWd390fDvGeCzYRmeaKRSwr9nBl0ugi+Yr7v7E2H5hl5XoW7rZmDlM7O3A78GLITBgDClcT58fIIgP/0PwzI0p2UyKVcP220g9WVm+4B/A9zRVNaB1lVUTGCI+9e4BfSvAS8xs6vC1t+bgbsG8cZhru424H53/1DT/Ob8878GGmfi7wLebGbPM7OrgJcQnJRJu1yzZvbCxmOCE2vfCd+/cbZ8EbizqVxvC8+4vwL4YdPPw7Ttaj0Nu66adFs3XwReb2aXhumG14fzUmVmbwB+B3iTu19smn/QzArh4xcT1M9DYdl+ZGavCPfPtzV9ljTL1e12G9Rx+lrge+6+nUoZZF3FxQSGuX/1c5Z3GBPBmeK/I/jmXRng+76K4KfTt4B7w+kIcDvw7XD+XcChpteshOV8gD7PqLcp14sJehF8E7ivUSdACfgy8CDwl8Bl4XwD/iQs17eB+YzKNQucB36+ad7A64rgC+Ux4FmC3OQ7e6kbgpz2yXB6R0blOkmQS23sXx8Nl/234ba9F/g68K+a1jNPEGC/D/x3wosFUy5X19stzeM0qkzh/P8BvKtl2UHWVVxMGNr+pStFRURyYtxSLiIiEkMBXUQkJxTQRURyQgFdRCQnFNBFRHJCAV1EJCcU0EVEckIBXUQkJ/4/88JKSM/h1cwAAAAASUVORK5CYII=
"""
Explanation: Compute and plot the mahanalobis distances of X_test, X_train_normal, X_train_uniform
End of explanation
"""
|
GoogleCloudPlatform/vertex-ai-samples | notebooks/community/ml_ops/stage4/get_started_with_model_evaluation.ipynb | apache-2.0 | import os
# The Vertex AI Workbench Notebook product has specific requirements
IS_WORKBENCH_NOTEBOOK = os.getenv("DL_ANACONDA_HOME")
IS_USER_MANAGED_WORKBENCH_NOTEBOOK = os.path.exists(
"/opt/deeplearning/metadata/env_version"
)
# Vertex AI Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_WORKBENCH_NOTEBOOK:
USER_FLAG = "--user"
# Install the packages
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG -q
! pip3 install --upgrade google-cloud-pipeline-components $USER_FLAG -q
! pip3 install --upgrade google-cloud-bigquery $USER_FLAG -q
! pip3 install --upgrade tensorflow $USER_FLAG -q
! pip3 install --upgrade tensorflow-hub $USER_FLAG -q
"""
Explanation: E2E ML on GCP: MLOps stage 4 : evaluation: get started with Vertex AI Model Evaluation
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage4/get_started_with_model_evaluation.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samplestree/main/notebooks/community/ml_ops/stage4/get_started_with_model_evaluation.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
<td>
<a href="https://console.cloud.google.com/vertex-ai/workbench/deploy-notebook?download_url=https://raw.githubusercontent.com/GoogleCloudPlatform/vertex-ai-samples/main/notebooks/community/ml_ops/stage4/get_started_with_model_evaluation.ipynb">
<img src="https://lh3.googleusercontent.com/UiNooY4LUgW_oTvpsNhPpQzsstV5W8F7rYgxgGBD85cWJoLmrOzhVs_ksK_vgx40SHs7jCqkTkCk=e14-rj-sc0xffffff-h130-w32" alt="Vertex AI logo">
Open in Vertex AI Workbench
</a>
</td>
</table>
Overview
This tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 4 : evaluation: get started with Vertex AI Model Evaluation.
Datasets
AutoML image model
The dataset used for this tutorial is the Flowers dataset from TensorFlow Datasets. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of flower an image is from a class of five flowers: daisy, dandelion, rose, sunflower, or tulip.
BigQuery ML tabular model
The dataset used for this tutorial is the Penguins dataset from BigQuery public datasets. This version of the dataset is used to predict the species of penguins from the available features like culmen-length, flipper-depth etc.
Custom model
This tutorial uses a pre-trained image classification model from TensorFlow Hub, which is trained on ImageNet dataset.
Learn more about ResNet V2 pretained model.
Pipeline
BLAH
The dataset used for this tutorial is the Bank Marketing . This dataset does not require any feature engineering. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket.
Objective
In this tutorial, you learn how to use Vertex AI Model Evaluation.
This tutorial uses the following Google Cloud ML services:
Vertex AI AutoML
BigQuery ML
Vertex AI Training
Vertex AI Batch Prediction
Vertex AI Model Evaluation
Google Cloud Pipeline Components
The steps performed include:
SDK
Evaluate an AutoML model.
Train an AutoML image classification model.
Retrieve the default evaluation metrics from training.
Do a batch evaluation for a custom evaluation slice.
Evaluate a BigQuery ML model.
Train a BigQuery ML tabular classification model.
Retrieve the default evaluation metrics from training.
Do a batch evaluation for a custom evaluation slice.
Evaluate a custom model.
Do a batch evaluation for a custom evaluation slice.
Add an evaluation to the Model Registry for the Model resource.
Pipeline Components
Evaluate an AutoML model.
Train an AutoML image classification model.
Retrieve the default evaluation metrics from training.
Do a batch evaluation for a custom evaluation slice.
Evaluate a BigQuery ML model.
Train a BigQuery ML tabular classification model.
Retrieve the default evaluation metrics from training.
Do a batch evaluation for a custom evaluation slice.
Evaluate a custom model.
Do a batch evaluation for a custom evaluation slice.
Add an evaluation to the Model Registry for the Model resource.
Installations
Install the packages required for executing this notebook.
End of explanation
"""
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
"""
Explanation: Restart the kernel
Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
End of explanation
"""
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
"""
Explanation: Before you begin
GPU runtime
Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select Runtime > Change Runtime Type > GPU
Set up your Google Cloud project
The following steps are required, regardless of your notebook environment.
Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.
Make sure that billing is enabled for your project.
Enable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.
If you are running this notebook locally, you will need to install the Cloud SDK.
Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $.
Set your project ID
If you don't know your project ID, you may be able to get your project ID using gcloud.
End of explanation
"""
REGION = "[your-region]" # @param {type: "string"}
if REGION == "[your-region]":
REGION = "us-central1"
"""
Explanation: Region
You can also change the REGION variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
Americas: us-central1
Europe: europe-west4
Asia Pacific: asia-east1
You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
Learn more about Vertex AI regions.
End of explanation
"""
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
"""
Explanation: Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
End of explanation
"""
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Vertex AI Workbench, then don't execute this code
IS_COLAB = False
if not os.path.exists("/opt/deeplearning/metadata/env_version") and not os.getenv(
"DL_ANACONDA_HOME"
):
if "google.colab" in sys.modules:
IS_COLAB = True
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
"""
Explanation: Authenticate your Google Cloud account
If you are using Vertex AI Workbench Notebooks, your environment is already authenticated. Skip this step.
If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
Otherwise, follow these steps:
In the Cloud Console, go to the Create service account key page.
Click Create service account.
In the Service account name field, enter a name, and click Create.
In the Grant this service account access to project section, click the Role drop-down list. Type "Vertex" into the filter box, and select Vertex Administrator. Type "Storage Object Admin" into the filter box, and select Storage Object Admin.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
End of explanation
"""
BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"}
BUCKET_URI = f"gs://{BUCKET_NAME}"
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]":
BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP
BUCKET_URI = "gs://" + BUCKET_NAME
"""
Explanation: Create a Cloud Storage bucket
The following steps are required, regardless of your notebook environment.
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
End of explanation
"""
! gsutil mb -l $REGION $BUCKET_URI
"""
Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.
End of explanation
"""
! gsutil ls -al $BUCKET_URI
"""
Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents:
End of explanation
"""
SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"}
if (
SERVICE_ACCOUNT == ""
or SERVICE_ACCOUNT is None
or SERVICE_ACCOUNT == "[your-service-account]"
):
# Get your service account from gcloud
if not IS_COLAB:
shell_output = !gcloud auth list 2>/dev/null
SERVICE_ACCOUNT = shell_output[2].replace("*", "").strip()
if IS_COLAB:
shell_output = ! gcloud projects describe $PROJECT_ID
project_number = shell_output[-1].split(":")[1].strip().replace("'", "")
SERVICE_ACCOUNT = f"{project_number}-compute@developer.gserviceaccount.com"
print("Service Account:", SERVICE_ACCOUNT)
"""
Explanation: Service Account
If you don't know your service account, try to get your service account using gcloud command by executing the second cell below.
End of explanation
"""
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_URI
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_URI
"""
Explanation: Set service account access for Vertex AI Pipelines
Run the following commands to grant your service account access to read and write pipeline artifacts in the bucket that you created in the previous step -- you only need to run these once per service account.
End of explanation
"""
import json
import google.cloud.aiplatform as aiplatform
import tensorflow as tf
import tensorflow_hub as hub
from kfp import dsl
from kfp.v2 import compiler
from kfp.v2.dsl import component
"""
Explanation: Set up variables
Next, set up some variables used throughout the tutorial.
Import libraries and define constants
End of explanation
"""
from google.cloud import bigquery
"""
Explanation: Import BigQuery
Import the BigQuery package into your Python environment.
End of explanation
"""
aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_URI)
"""
Explanation: Initialize Vertex AI SDK for Python
Initialize the Vertex AI SDK for Python for your project and corresponding bucket.
End of explanation
"""
bqclient = bigquery.Client()
"""
Explanation: Create BigQuery client
Create the BigQuery client.
End of explanation
"""
import os
if os.getenv("IS_TESTING_DEPLOY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPLOY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, 1)
"""
Explanation: Set hardware accelerators
You can set hardware accelerators for prediction.
Set the variable DEPLOY_GPU/DEPLOY_NGPU to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:
(aip.AcceleratorType.NVIDIA_TESLA_K80, 4)
Otherwise specify (None, None) to use a container image to run on a CPU.
Learn more here hardware accelerator support for your region
End of explanation
"""
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-5".replace(".", "-")
if TF[0] == "2":
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
DEPLOY_IMAGE = "{}-docker.pkg.dev/vertex-ai/prediction/{}:latest".format(
REGION.split("-")[0], DEPLOY_VERSION
)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU)
"""
Explanation: Set pre-built containers
Set the pre-built Docker container image for prediction.
Set the variable TF to the TensorFlow version of the container image. For example, 2-1 would be version 2.1, and 1-15 would be version 1.15. The following list shows some of the pre-built images available:
For the latest list, see Pre-built containers for prediction.
End of explanation
"""
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
"""
Explanation: Set machine type
Next, set the machine type to use for prediction.
Set the variable DEPLOY_COMPUTE to configure the compute resources for the VM you will use for prediction.
machine type
n1-standard: 3.75GB of memory per vCPU.
n1-highmem: 6.5GB of memory per vCPU
n1-highcpu: 0.9 GB of memory per vCPU
vCPUs: number of [2, 4, 8, 16, 32, 64, 96 ]
Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs
End of explanation
"""
IMPORT_FILE = (
"gs://cloud-samples-data/vision/automl_classification/flowers/all_data_v2.csv"
)
"""
Explanation: Introduction to Vertex AI Model Evaluation for AutoML models.
For AutoML models, you can retrieve the model evaluation metrics that were obtained during training from the dataset split into train and test, using the Vertex AI Model Evaluation service. Additionally, you can further evaluate the model with custom evaluation slices.
Location of Cloud Storage training data.
Now set the variable IMPORT_FILE to the location of the CSV index file in Cloud Storage.
End of explanation
"""
dataset = aiplatform.ImageDataset.create(
display_name="Flowers" + "_" + TIMESTAMP,
gcs_source=[IMPORT_FILE],
import_schema_uri=aiplatform.schema.dataset.ioformat.image.single_label_classification,
)
print(dataset.resource_name)
"""
Explanation: Create the Dataset
Next, create the Dataset resource using the create method for the ImageDataset class, which takes the following parameters:
display_name: The human readable name for the Dataset resource.
gcs_source: A list of one or more dataset index files to import the data items into the Dataset resource.
import_schema_uri: The data labeling schema for the data items.
This operation may take several minutes.
End of explanation
"""
dag = aiplatform.AutoMLImageTrainingJob(
display_name="flowers_" + TIMESTAMP,
prediction_type="classification",
multi_label=False,
model_type="CLOUD",
base_model=None,
)
print(dag)
"""
Explanation: Create and run training pipeline
To train an AutoML model, you perform two steps: 1) create a training pipeline, and 2) run the pipeline.
Create training pipeline
An AutoML training pipeline is created with the AutoMLImageTrainingJob class, with the following parameters:
display_name: The human readable name for the TrainingJob resource.
prediction_type: The type task to train the model for.
classification: An image classification model.
object_detection: An image object detection model.
multi_label: If a classification task, whether single (False) or multi-labeled (True).
model_type: The type of model for deployment.
CLOUD: Deployment on Google Cloud
CLOUD_HIGH_ACCURACY_1: Optimized for accuracy over latency for deployment on Google Cloud.
CLOUD_LOW_LATENCY_: Optimized for latency over accuracy for deployment on Google Cloud.
MOBILE_TF_VERSATILE_1: Deployment on an edge device.
MOBILE_TF_HIGH_ACCURACY_1:Optimized for accuracy over latency for deployment on an edge device.
MOBILE_TF_LOW_LATENCY_1: Optimized for latency over accuracy for deployment on an edge device.
base_model: (optional) Transfer learning from existing Model resource -- supported for image classification only.
The instantiated object is the DAG (directed acyclic graph) for the training job.
End of explanation
"""
model = dag.run(
dataset=dataset,
model_display_name="flowers_" + TIMESTAMP,
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
budget_milli_node_hours=8000,
disable_early_stopping=False,
)
"""
Explanation: Run the training pipeline
Next, you run the DAG to start the training job by invoking the method run, with the following parameters:
dataset: The Dataset resource to train the model.
model_display_name: The human readable name for the trained model.
training_fraction_split: The percentage of the dataset to use for training.
test_fraction_split: The percentage of the dataset to use for test (holdout data).
validation_fraction_split: The percentage of the dataset to use for validation.
budget_milli_node_hours: (optional) Maximum training time specified in unit of millihours (1000 = hour).
disable_early_stopping: If True, training maybe completed before using the entire budget if the service believes it cannot further improve on the model objective measurements.
The run method when completed returns the Model resource.
The execution of the training pipeline will take upto 20 minutes.
End of explanation
"""
model_evaluations = model.list_model_evaluations()
for model_evaluation in model_evaluations:
print(model_evaluation.to_dict())
"""
Explanation: Retrieving the default evaluation for AutoML Model resource
BLAH GAPIC Placeholder
After your model has finished training, you can review the evaluation scores for it.
First, you need to get a reference to the new model. As with datasets, you can either use the reference to the model variable you created when you deployed the model or you can list all of the models in your project.
End of explanation
"""
EVAL_SLICE = BUCKET_URI + "/flowers_eval.jsonl"
! gsutil cat {IMPORT_FILE} | head -n 200 >tmp.csv
import csv
entries = []
with open("tmp.csv", "r") as f:
reader = csv.reader(f)
for row in reader:
path = row[0]
label = row[1]
file = path.split("/")[-1]
new_path = BUCKET_URI + "/flowers/" + file
! gsutil cp {path} {new_path} >/dev/null
entries.append({"content": new_path, "mime_type": "jpeg"})
import json
with open("tmp.jsonl", "w") as f:
for entry in entries:
f.write(json.dumps(entry) + "\n")
! gsutil cp tmp.jsonl {EVAL_SLICE}
#! rm tmp.csv tmp.jsonl
"""
Explanation: Evaluating on a custom evaluation slice
PLACEHOLDER - BLAH
Make the batch input file
Now make a batch input file, which you store in your local Cloud Storage bucket. The batch input file must be in JSONL format. In For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains the key/value pairs:
content: The Cloud Storage path to the image.
mime_type: The content type. In our example, it is a jpeg file.
For example:
{'content': '[your-bucket]/file1.jpg', 'mime_type': 'jpeg'}
For demonstration purposes, to create an evaluation slice, you use a portion of the training data -- as if it was separate (non-training) data, such as instances seen in production.
End of explanation
"""
batch_predict_job = model.batch_predict(
job_display_name="flowers_" + TIMESTAMP,
instances_format="jsonl",
gcs_source=EVAL_SLICE,
gcs_destination_prefix=BUCKET_URI,
sync=True,
)
print(batch_predict_job)
"""
Explanation: Make the batch prediction request
Now that your Model resource is trained, you can make a batch prediction by invoking the batch_predict() method, with the following parameters:
job_display_name: The human readable name for the batch prediction job.
instances_format: The format of the prediction request; can only be JSONL (default).
gcs_source: A list of one or more batch request input files.
gcs_destination_prefix: The Cloud Storage location for storing the batch prediction resuls.
sync: If set to True, the call will block while waiting for the asynchronous batch job to complete.
End of explanation
"""
import json
import tensorflow as tf
bp_iter_outputs = batch_predict_job.iter_outputs()
prediction_results = list()
for blob in bp_iter_outputs:
if blob.name.split("/")[-1].startswith("prediction"):
prediction_results.append(blob.name)
tags = list()
for prediction_result in prediction_results:
gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{prediction_result}"
with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile:
for line in gfile.readlines():
line = json.loads(line)
print(line)
break
"""
Explanation: TODO: Get batch results
Get the predictions
Next, get the results from the completed batch prediction job.
The results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method iter_outputs() to get a list of each Cloud Storage file generated with the results. Each file contains one or more prediction requests in a JSON format:
content: The prediction request.
prediction: The prediction response.
ids: The internal assigned unique identifiers for each prediction request.
displayNames: The class names for each class label.
confidences: The predicted confidence, between 0 and 1, per class label.
End of explanation
"""
try:
dag.delete()
model.delete()
batch_predict_job.delete()
except Exception as e:
print(e)
"""
Explanation: Delete temporary resources
Next, you delete all the temporary resources created by this example.
End of explanation
"""
IMPORT_FILE = "bq://bigquery-public-data.ml_datasets.penguins"
BQ_TABLE = "bigquery-public-data.ml_datasets.penguins"
"""
Explanation: Introduction to Vertex AI Model Evaluation for BigQuery ML models.
For BigQuery ML models, you can retrieve the model evaluation metrics that were obtained during training from the dataset split into train and test, using the Vertex AI Model Evaluation service. Additionally, you can further evaluate the model with custom evaluation slices.
Location of the BigQuery training data
Now set the variable IMPORT_FILE and BQ_TABLE to the location of the training data in BigQuery.
End of explanation
"""
BQ_DATASET_NAME = "penguins"
DATASET_QUERY = f"""CREATE SCHEMA {BQ_DATASET_NAME}
"""
job = bqclient.query(DATASET_QUERY)
"""
Explanation: Create BQ dataset resource
First, you create an empty dataset resource in your project.
End of explanation
"""
! gcloud projects add-iam-policy-binding $PROJECT_ID \
--member='serviceAccount:cloud-dataengine@system.gserviceaccount.com' \
--role='roles/aiplatform.admin'
! gcloud projects add-iam-policy-binding $PROJECT_ID \
--member='user:cloud-dataengine@prod.google.com' \
--role='roles/aiplatform.admin'
"""
Explanation: Setting permissions to automatically register the model
You need to set some additional IAM permissions for BigQuery ML to automatically upload and register the model after training. Depending on your service account, the setting of the permissions below may fail. In this case, we recommend executing the permissions in a Cloud Shell.
End of explanation
"""
MODEL_NAME = "penguins"
MODEL_QUERY = f"""
CREATE OR REPLACE MODEL `{BQ_DATASET_NAME}.{MODEL_NAME}`
OPTIONS(
model_type='DNN_CLASSIFIER',
labels = ['species'],
model_registry="vertex_ai",
vertex_ai_model_id="bqml_model_{TIMESTAMP}",
vertex_ai_model_version_aliases=["1"]
)
AS
SELECT *
FROM `{BQ_TABLE}`
"""
job = bqclient.query(MODEL_QUERY)
print(job.errors, job.state)
while job.running():
from time import sleep
sleep(30)
print("Running ...")
print(job.errors, job.state)
tblname = job.ddl_target_table
tblname = "{}.{}".format(tblname.dataset_id, tblname.table_id)
print("{} created in {}".format(tblname, job.ended - job.started))
"""
Explanation: Training and registering the BigQuery ML model
Next, you create and train a BigQuery ML tabular classification model from the public dataset penguins and store the model in your project using the CREATE MODEL statement. The model configuration is specified in the OPTIONS statement as follows:
model_type: The type and archictecture of tabular model to train, e.g., DNN classification.
labels: The column which are the labels.
model_registry: Set to "vertex_ai" to indicate automatic registation to Vertex AI Model Registry.
vertex_ai_model_id: The human readable display name for the registered model.
vertex_ai_model_version_aliases: Alternate names for the model.
Learn more about The CREATE MODEL statement.
End of explanation
"""
EVAL_QUERY = f"""
SELECT *
FROM
ML.EVALUATE(MODEL {BQ_DATASET_NAME}.{MODEL_NAME})
ORDER BY roc_auc desc
LIMIT 1"""
job = bqclient.query(EVAL_QUERY)
results = job.result().to_dataframe()
print(results)
"""
Explanation: Evaluate the trained BigQuery model using BigQuery
Next, retrieve the model evaluation from within BigQuery for the trained BigQuery ML model.
Learn more about The ML.EVALUATE function.
End of explanation
"""
models = aiplatform.Model.list(filter="display_name=bqml_model_" + TIMESTAMP)
model = models[0]
print(model.gca_resource)
"""
Explanation: Find the model in the Vertex AI Model Registry
Finally, you can use the Vertex AI Model list() method with a filter query to find the automatically registered model.
End of explanation
"""
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{REGION}-aiplatform.googleapis.com"}
model_service_client = aiplatform.gapic.ModelServiceClient(
client_options=client_options
)
model_evaluations = model_service_client.list_model_evaluations(
parent=model.resource_name
)
model_evaluation = list(model_evaluations)[0]
print(model_evaluation)
"""
Explanation: Retrieving the default evaluation for BigQuery ML Model resource from Vertex AI Model Registry
BLAH GAPIC Placeholder
After your model has finished training, you can review the evaluation scores for it.
First, you need to get a reference to the new model. As with datasets, you can either use the reference to the model variable you created when you deployed the model or you can list all of the models in your project.
End of explanation
"""
try:
model.delete()
batch_predict_job.delete()
except Exception as e:
print(e)
try:
# Delete the created BigQuery dataset
! bq rm -r -f $PROJECT_ID:$BQ_DATASET_NAME
except Exception as e:
print(e)
MODEL_QUERY = f"""
DROP MODEL `{BQ_DATASET_NAME}.{MODEL_NAME}`
"""
job = bqclient.query(MODEL_QUERY)
"""
Explanation: Evaluating on a custom evaluation slice
PLACEHOLDER - BLAH
BLAH - BATCH FILE FORMAT
Delete temporary resources
Next, you delete all the temporary resources created by this example.
End of explanation
"""
tfhub_model = tf.keras.Sequential(
[hub.KerasLayer("https://tfhub.dev/google/imagenet/resnet_v2_101/classification/5")]
)
tfhub_model.build([None, 224, 224, 3])
tfhub_model.summary()
"""
Explanation: Introduction to Vertex AI Model Evaluation for custom models.
For custom models, you can retrieve the model evaluation metrics that were
BLAH
obtained during training from the dataset split into train and test, using the Vertex AI Model Evaluation service. Additionally, you can further evaluate the model with custom evaluation slices.
Get pretrained model from TensorFlow Hub
For demonstration purposes, this tutorial uses a pretrained model from TensorFlow Hub (TFHub), which is then uploaded to a Vertex AI Model resource. Once you have a Vertex AI Model resource, the model can be deployed to a Vertex AI Endpoint resource.
Download the pretrained model
First, you download the pretrained model from TensorFlow Hub. The model gets downloaded as a TF.Keras layer. To finalize the model, in this example, you create a Sequential() model with the downloaded TFHub model as a layer, and specify the input shape to the model.
End of explanation
"""
MODEL_DIR = BUCKET_URI + "/model/1"
tfhub_model.save(MODEL_DIR)
"""
Explanation: Save the model artifacts
At this point, the model is in memory. Next, you save the model artifacts to a Cloud Storage location.
Note: For TF Serving, the MODEL_DIR must end in a subfolder that is a number, e.g., 1.
End of explanation
"""
CONCRETE_INPUT = "numpy_inputs"
def _preprocess(bytes_input):
decoded = tf.io.decode_jpeg(bytes_input, channels=3)
decoded = tf.image.convert_image_dtype(decoded, tf.float32)
resized = tf.image.resize(decoded, size=(224, 224))
return resized
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def preprocess_fn(bytes_inputs):
decoded_images = tf.map_fn(
_preprocess, bytes_inputs, dtype=tf.float32, back_prop=False
)
return {
CONCRETE_INPUT: decoded_images
} # User needs to make sure the key matches model's input
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def serving_fn(bytes_inputs):
images = preprocess_fn(bytes_inputs)
prob = m_call(**images)
return prob
m_call = tf.function(tfhub_model.call).get_concrete_function(
[tf.TensorSpec(shape=[None, 224, 224, 3], dtype=tf.float32, name=CONCRETE_INPUT)]
)
tf.saved_model.save(tfhub_model, MODEL_DIR, signatures={"serving_default": serving_fn})
"""
Explanation: Upload the model for serving
Next, you will upload your TF.Keras model from the custom job to Vertex Model service, which will create a Vertex Model resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex AI, your serving function ensures that the data is decoded on the model server before it is passed as input to your model.
How does the serving function work
When you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a tf.string.
The serving function consists of two parts:
preprocessing function:
Converts the input (tf.string) to the input shape and data type of the underlying model (dynamic graph).
Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc.
post-processing function:
Converts the model output to format expected by the receiving application -- e.q., compresses the output.
Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc.
Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content.
One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported.
Serving function for image data
Preprocessing
To pass images to the prediction service, you encode the compressed (e.g., JPEG) image bytes into base 64 -- which makes the content safe from modification while transmitting binary data over the network. Since this deployed model expects input data as raw (uncompressed) bytes, you need to ensure that the base 64 encoded data gets converted back to raw bytes, and then preprocessed to match the model input requirements, before it is passed as input to the deployed model.
To resolve this, you define a serving function (serving_fn) and attach it to the model as a preprocessing step. Add a @tf.function decorator so the serving function is fused to the underlying model (instead of upstream on a CPU).
When you send a prediction or explanation request, the content of the request is base 64 decoded into a Tensorflow string (tf.string), which is passed to the serving function (serving_fn). The serving function preprocesses the tf.string into raw (uncompressed) numpy bytes (preprocess_fn) to match the input requirements of the model:
io.decode_jpeg- Decompresses the JPG image which is returned as a Tensorflow tensor with three channels (RGB).
image.convert_image_dtype - Changes integer pixel values to float 32, and rescales pixel data between 0 and 1.
image.resize - Resizes the image to match the input shape for the model.
At this point, the data can be passed to the model (m_call), via a concrete function. The serving function is a static graph, while the model is a dynamic graph. The concrete function performs the tasks of marshalling the input data from the serving function to the model, and marshalling the prediction result from the model back to the serving function.
End of explanation
"""
loaded = tf.saved_model.load(MODEL_DIR)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
"""
Explanation: Get the serving function signature
You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.
For your purpose, you need the signature of the serving function. Why? Well, when we send our data for prediction as a HTTP request packet, the image data is base64 encoded, and our TF.Keras model takes numpy input. Your serving function will do the conversion from base64 to a numpy array.
When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.
End of explanation
"""
model = aiplatform.Model.upload(
display_name="example_" + TIMESTAMP,
artifact_uri=MODEL_DIR,
serving_container_image_uri=DEPLOY_IMAGE,
)
print(model)
"""
Explanation: Upload the TensorFlow Hub model to a Vertex AI Model resource
Finally, you upload the model artifacts from the TFHub model into a Vertex AI Model resource.
End of explanation
"""
IMPORT_FILE = "gs://cloud-ml-tables-data/bank-marketing.csv"
! gsutil cat {IMPORT_FILE} | head -n 40000 > train.csv
! gsutil cat {IMPORT_FILE} | head -n 1 >eval.csv
! gsutil cat {IMPORT_FILE} | tail -n 5200 >> eval.csv
IMPORT_TRAIN = BUCKET_NAME + "/train.csv"
IMPORT_EVAL = BUCKET_NAME + "/eval.csv"
! gsutil cp train.csv {IMPORT_TRAIN}
! gsutil cp eval.csv {IMPORT_EVAL}
! rm -f train.csv eval.csv
"""
Explanation: BLAH Do batch prediction on custom model
BLAH register the custom evaluation metrics
Model evaluation using Vertex AI Pipeline components
In this section, you perform model evaluations on AutoML, BigQuery ML and custom models using Vertex AI Pipeline components
AutoML model evaluation pipeline component
BLAH
Additionally, you can evaluate an AutoML model with custom evaluation slices using the combination of BatchPredictionOp and ModelEvaluationOp components, as:
The custom evaluation slice data contains the label values (ground truths).
Perform a batch prediction on the custom evaluation slice.
Perform a model evaluation with the batch prediction results and label values.
Location of Cloud Storage training data.
Now set the variable IMPORT_FILE to the location of the CSV index file in Cloud Storage.
End of explanation
"""
from kfp.v2.dsl import Artifact, Input, Model
@component(packages_to_install=["google-cloud-aiplatform"])
def evaluateAutoMLModelOp(model: Input[Artifact], region: str) -> str:
import logging
import google.cloud.aiplatform.gapic as gapic
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{region}-aiplatform.googleapis.com"}
model_service_client = gapic.ModelServiceClient(client_options=client_options)
model_id = model.metadata["resourceName"]
model_evaluations = model_service_client.list_model_evaluations(parent=model_id)
model_evaluation = list(model_evaluations)[0]
logging.info(model_evaluation)
return str(model_evaluation)
"""
Explanation: Create AutoML model evaluation component
The Vertex AI pre-built pipeline components does not currently have a component for retrieiving the model evaluations for a AutoML model. So, you will first write your own component, as follows:
Takes as input the region and Model artifacts returned from an AutoML training component.
Create a client interface to the Vertex AI Model service (`metadata["resource_name"]).
Construct the resource ID for the model from the model artifact parameter.
Retrieve the model evaluation
Return the model evaluation as a string.
End of explanation
"""
PIPELINE_ROOT = "{}/pipeline_root/automl_lbn_training".format(BUCKET_NAME)
@dsl.pipeline(
name="automl-lbn-training", description="AutoML tabular classification training"
)
def pipeline(
import_file: str,
batch_files: list,
display_name: str,
bucket: str = PIPELINE_ROOT,
project: str = PROJECT_ID,
region: str = REGION,
):
from google_cloud_pipeline_components import aiplatform as gcc_aip
from google_cloud_pipeline_components.experimental.evaluation import \
ModelEvaluationOp
from google_cloud_pipeline_components.v1.batch_predict_job import \
ModelBatchPredictOp
dataset_op = gcc_aip.TabularDatasetCreateOp(
project=project, display_name=display_name, gcs_source=import_file
)
training_op = gcc_aip.AutoMLTabularTrainingJobRunOp(
project=project,
display_name=display_name,
optimization_prediction_type="classification",
dataset=dataset_op.outputs["dataset"],
model_display_name=display_name,
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
budget_milli_node_hours=8000,
optimization_objective="minimize-log-loss",
target_column="Deposit",
)
eval_op = evaluateAutoMLModelOp(model=training_op.outputs["model"], region=region)
batch_op = ModelBatchPredictOp(
project=project,
job_display_name="batch_predict_job",
model=training_op.outputs["model"],
gcs_source_uris=batch_files,
gcs_destination_output_uri_prefix=bucket,
instances_format="csv",
predictions_format="jsonl",
model_parameters={},
machine_type=DEPLOY_COMPUTE,
starting_replica_count=1,
max_replica_count=1,
).after(eval_op)
batch_eval_op = ModelEvaluationOp(
project=project,
root_dir=bucket,
problem_type="classification",
classification_type="multiclass",
ground_truth_column="Deposit",
class_names=["0", "1"],
predictions_format="jsonl",
batch_prediction_job=batch_op.outputs["batchpredictionjob"],
)
"""
Explanation: Construct pipeline for AutoML training, and batch model evaluation
Next, construct the pipeline with the following tasks:
Create a Vertex AI Dataset resource.
Train a AutoML tabular classification model.
Retrieve the AutoML evaluation statistics.
Make a batch prediction with the AutoML model, using an evaluation slice that was not used during training.
Evaluate the AutoML model using the results from the batch prediction.
End of explanation
"""
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="automl_lbn_training.json"
)
pipeline = aip.PipelineJob(
display_name="automl_lbn_training",
template_path="automl_lbn_training.json",
pipeline_root=PIPELINE_ROOT,
parameter_values={
"import_file": IMPORT_TRAIN,
"batch_files": [IMPORT_EVAL],
"display_name": "bank" + TIMESTAMP,
"project": PROJECT_ID,
"region": REGION,
},
)
pipeline.run()
! rm -f automl_lbn_training.json
"""
Explanation: Compile and execute the AutoML training, and batch model evaluation pipeline
Next, you compile the pipeline and then execute it. The pipeline takes the following parameters, which are passed as the dictionary parameter_values:
import_file: The Cloud Storage location of the training data.
batch_files: A list of one or more Cloud Storage locations of evaluation data.
display_name: Display name for Vertex AI Model and Endpoint resources.
project: The project ID.
region: The region.
End of explanation
"""
PROJECT_NUMBER = pipeline.gca_resource.name.split("/")[1]
print(PROJECT_NUMBER)
def print_pipeline_output(job, output_task_name):
JOB_ID = job.name
print(JOB_ID)
for _ in range(len(job.gca_resource.job_detail.task_details)):
TASK_ID = job.gca_resource.job_detail.task_details[_].task_id
EXECUTE_OUTPUT = (
PIPELINE_ROOT
+ "/"
+ PROJECT_NUMBER
+ "/"
+ JOB_ID
+ "/"
+ output_task_name
+ "_"
+ str(TASK_ID)
+ "/executor_output.json"
)
GCP_RESOURCES = (
PIPELINE_ROOT
+ "/"
+ PROJECT_NUMBER
+ "/"
+ JOB_ID
+ "/"
+ output_task_name
+ "_"
+ str(TASK_ID)
+ "/gcp_resources"
)
if tf.io.gfile.exists(EXECUTE_OUTPUT):
! gsutil cat $EXECUTE_OUTPUT
break
elif tf.io.gfile.exists(GCP_RESOURCES):
! gsutil cat $GCP_RESOURCES
break
return EXECUTE_OUTPUT
print("tabular-dataset-create")
artifacts = print_pipeline_output(pipeline, "tabular-dataset-create")
print("\n\n")
print("automl-tabular-training-job")
artifacts = print_pipeline_output(pipeline, "automl-tabular-training-job")
print("\n\n")
print("evaluateautomlmodelop")
artifacts = print_pipeline_output(pipeline, "evaluateautomlmodelop")
output = !gsutil cat $artifacts
output = json.loads(output[0])
metrics = output["parameters"]["Output"]["stringValue"]
print("\n")
print(metrics)
print("\n\n")
print("model-batch-predict")
artifacts = print_pipeline_output(pipeline, "model-batch-predict")
output = !gsutil cat $artifacts
output = json.loads(output[0])
print("\n\n")
print(
output["artifacts"]["batchpredictionjob"]["artifacts"][0]["metadata"][
"gcsOutputDirectory"
]
)
print("model-evaluation")
artifacts = print_pipeline_output(pipeline, "model-evaluation")
"""
Explanation: View the AutoML training and batch evaluation pipeline results
End of explanation
"""
pipeline.delete()
"""
Explanation: Delete a pipeline job
After a pipeline job is completed, you can delete the pipeline job with the method delete(). Prior to completion, a pipeline job can be canceled with the method cancel().
End of explanation
"""
IMPORT_FILE = "bq://bigquery-public-data.ml_datasets.penguins"
BQ_TABLE = "bigquery-public-data.ml_datasets.penguins"
BQ_TABLE = "bigquery-public-data.ml_datasets.penguins"
BQ_DATASET = BQ_TABLE.split(".")[1]
def get_data(slice_name, limit):
query = f"""
CREATE OR REPLACE TABLE `{slice_name}`
AS (
WITH
penguins AS (
SELECT
island,
sex,
culmen_length_mm,
culmen_depth_mm,
flipper_length_mm,
body_mass_g,
species
FROM
`{BQ_TABLE}`
)
SELECT
island,
sex,
culmen_length_mm,
culmen_depth_mm,
flipper_length_mm,
body_mass_g,
species
FROM
penguins
LIMIT {limit}
)
"""
response = bqclient.query(query)
_ = response.result()
BQ_TABLE_EVAL = f"{PROJECT_ID}.{BQ_DATASET}.penguins_eval"
IMPORT_EVAL = f"bq://{BQ_TABLE_EVAL}"
LIMIT = 44
get_data(BQ_TABLE_EVAL, LIMIT)
BQ_TABLE_TRAIN = f"{PROJECT_ID}.{BQ_DATASET}.penguins_train"
IMPORT_TRAIN = f"bq://{BQ_TABLE_TRAIN}"
LIMIT = "300 OFFSET 44"
get_data(BQ_TABLE_TRAIN, LIMIT)
"""
Explanation: Introduction to Vertex AI Model Evaluation for BigQuery ML models.
For BigQuery ML models, you can retrieve the model evaluation metrics that were obtained during training from the dataset split into train and test, using the BigQuery ML service.
Additionally, you can evaluate an BigQuery ML model with custom evaluation slices using the combination of BLAH
BatchPredictionOp and ModelEvaluationOp components, as:
- The custom evaluation slice data contains the label values (ground truths).
- Perform a batch prediction on the custom evaluation slice.
- Perform a model evaluation with the batch prediction results and label values.
End of explanation
"""
PIPELINE_ROOT = f"{BUCKET_NAME}/bq_query"
@dsl.pipeline(name="bq-hello-world", pipeline_root=PIPELINE_ROOT)
def pipeline(
bq_train_table: str,
bq_eval_table: str,
label: str,
class_names: list,
dataset: str,
model: str,
artifact_uri: str,
# num_trials: int,
deploy_image: str,
machine_type: str,
min_replica_count: int,
max_replica_count: int,
display_name: str,
bucket: str,
accelerator_type: str = "",
accelerator_count: int = 0,
project: str = PROJECT_ID,
location: str = "US",
region: str = "us-central1",
):
from google_cloud_pipeline_components.experimental.evaluation import \
ModelEvaluationOp
from google_cloud_pipeline_components.v1.batch_predict_job import \
ModelBatchPredictOp
from google_cloud_pipeline_components.v1.bigquery import (
BigqueryCreateModelJobOp, BigqueryEvaluateModelJobOp,
BigqueryExportModelJobOp, BigqueryQueryJobOp)
from google_cloud_pipeline_components.v1.model import ModelUploadOp
bq_dataset = BigqueryQueryJobOp(
project=project, location="US", query=f"CREATE SCHEMA {dataset}"
)
bq_model = BigqueryCreateModelJobOp(
project=project,
location=location,
query=f"CREATE OR REPLACE MODEL {dataset}.{model} OPTIONS (model_type='dnn_classifier', labels=['{label}']) AS SELECT * FROM `{bq_train_table}` WHERE body_mass_g IS NOT NULL AND sex IS NOT NULL",
).after(bq_dataset)
bq_eval = BigqueryEvaluateModelJobOp(
project=PROJECT_ID, location="US", model=bq_model.outputs["model"]
).after(bq_model)
bq_export = BigqueryExportModelJobOp(
project=project,
location=location,
model=bq_model.outputs["model"],
model_destination_path=artifact_uri,
).after(bq_model)
model_upload = ModelUploadOp(
display_name=display_name,
artifact_uri=artifact_uri,
serving_container_image_uri=deploy_image,
project=project,
location=region,
).after(bq_export)
batch_predict = ModelBatchPredictOp(
project=project,
job_display_name="batch_predict_job",
model=model_upload.outputs["model"],
bigquery_source_input_uri=bq_eval_table,
bigquery_destination_output_uri=f"bq://{project}",
instances_format="bigquery",
predictions_format="bigquery",
model_parameters={},
machine_type=DEPLOY_COMPUTE,
starting_replica_count=min_replica_count,
max_replica_count=max_replica_count,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
).after(model_upload)
batch_eval = ModelEvaluationOp(
project=project,
root_dir=bucket,
problem_type="classification",
classification_type="multiclass",
ground_truth_column=label,
class_names=class_names,
predictions_format="jsonl",
batch_prediction_job=batch_predict.outputs["batchpredictionjob"],
)
"""
Explanation: Construct pipeline for BigQuery ML training, and batch model evaluation
Next, construct the pipeline with the following tasks:
Create a BigQuery ML Dataset resource.
Train a BigQuery ML tabular classification model.
Retrieve the BigQuery ML evaluation statistics.
Make a batch prediction with the BigQuery ML model, using an evaluation slice that was not used during training.
Evaluate the BigQuery ML model using the results from the batch prediction.
End of explanation
"""
MODEL_DIR = BUCKET_NAME + "/bqmodel"
compiler.Compiler().compile(pipeline_func=pipeline, package_path="bqml.json")
pipeline = aip.PipelineJob(
display_name="bqml",
template_path="bqml.json",
pipeline_root=PIPELINE_ROOT,
parameter_values={
"bq_train_table": BQ_TABLE_TRAIN,
"bq_eval_table": IMPORT_EVAL,
"label": "species",
"class_names": [
"Adelie Penguin (Pygoscelis adeliae)",
"Chinstrap penguin (Pygoscelis antarctica)",
"Gentoo penguin (Pygoscelis papua)",
],
"dataset": "bqml_tutorial",
"model": "penguins_model",
"artifact_uri": MODEL_DIR,
#'num_trials': 1,
"deploy_image": DEPLOY_IMAGE,
"display_name": "penguins",
"machine_type": DEPLOY_COMPUTE,
"min_replica_count": 1,
"max_replica_count": 1,
"accelerator_type": DEPLOY_GPU.name,
"accelerator_count": 1,
"bucket": BUCKET_NAME,
"project": PROJECT_ID,
"location": "US",
},
# enable_caching=False
)
pipeline.run()
! rm -rf bqml.json
"""
Explanation: Compile and execute the BigQuery ML training, and batch model evaluation pipeline
Next, you compile the pipeline and then execute it. The pipeline takes the following parameters, which are passed as the dictionary parameter_values:
bq_train_table: The BigQuery table containing the training data.
bq_eval_table: The BigQuery table containing the evaluation data.
label: The corresponding label for the BigQuery dataset.
dataset: The BigQuery dataset component name.
model: The BigQuery model component name.
artifact_uri: The Cloud Storage location to export the BigQuery model artifacts.
num_trials: If greater than one, will perform hyperparameter tuning for the specified number of trials using the Vertex AI Vizier service.
deploy_image: The container image for serving predictions.
machine_type: The VM for serving predictions.
min_replica_count/max_replica_count: The number of virtual machines for auto-scaling predictions.
display_name: Display name for Vertex AI Model resource.
project: The project ID.
region: The region.
End of explanation
"""
PROJECT_NUMBER = pipeline.gca_resource.name.split("/")[1]
print(PROJECT_NUMBER)
def print_pipeline_output(job, output_task_name):
JOB_ID = job.name
print(JOB_ID)
for _ in range(len(job.gca_resource.job_detail.task_details)):
TASK_ID = job.gca_resource.job_detail.task_details[_].task_id
EXECUTE_OUTPUT = (
PIPELINE_ROOT
+ "/"
+ PROJECT_NUMBER
+ "/"
+ JOB_ID
+ "/"
+ output_task_name
+ "_"
+ str(TASK_ID)
+ "/executor_output.json"
)
GCP_RESOURCES = (
PIPELINE_ROOT
+ "/"
+ PROJECT_NUMBER
+ "/"
+ JOB_ID
+ "/"
+ output_task_name
+ "_"
+ str(TASK_ID)
+ "/gcp_resources"
)
if tf.io.gfile.exists(EXECUTE_OUTPUT):
! gsutil cat $EXECUTE_OUTPUT
break
elif tf.io.gfile.exists(GCP_RESOURCES):
! gsutil cat $GCP_RESOURCES
break
return EXECUTE_OUTPUT
print("bigquery-query-job")
artifacts = print_pipeline_output(pipeline, "bigquery-query-job")
print("\n\n")
print("bigquery-create-model-job")
artifacts = print_pipeline_output(pipeline, "bigquery-create-model-job")
print("\n\n")
print("bigquery-evaluate-model-job")
artifacts = print_pipeline_output(pipeline, "bigquery-evaluate-model-job")
print("\n\n")
print("bigquery-export-model-job")
artifacts = print_pipeline_output(pipeline, "bigquery-export-model-job")
print("\n\n")
print("model-upload")
artifacts = print_pipeline_output(pipeline, "model-upload")
print("\n\n")
print("model-batch-predict")
artifacts = print_pipeline_output(pipeline, "model-batch-predict")
output = !gsutil cat $artifacts
output = json.loads(output[0])
print("\n\n")
print(
output["artifacts"]["batchpredictionjob"]["artifacts"][0]["metadata"][
"gcsOutputDirectory"
]
)
print("model-evaluation")
artifacts = print_pipeline_output(pipeline, "model-evaluation")
"""
Explanation: View the BigQuery ML training and batch evaluation pipeline results
End of explanation
"""
pipeline.delete()
"""
Explanation: Delete a pipeline job
After a pipeline job is completed, you can delete the pipeline job with the method delete(). Prior to completion, a pipeline job can be canceled with the method cancel().
End of explanation
"""
try:
job = bqclient.delete_model("bqml_tutorial.penguins_model")
except:
pass
job = bqclient.delete_dataset("bqml_tutorial", delete_contents=True)
"""
Explanation: Delete the BigQuery model and dataset
Next, delete the BigQuery model and dataset.
End of explanation
"""
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.undeploy_all()
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline training job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom training job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
"""
Explanation: Cleaning up
To clean up all Google Cloud resources used in this project, you can delete the Google Cloud
project you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial.
Dataset
Pipeline
Model
Endpoint
AutoML Training Job
Batch Job
Custom Job
Hyperparameter Tuning Job
Cloud Storage Bucket
End of explanation
"""
|
mjbrodzik/ipython_notebooks | modice/sii_monthly_for_modice.ipynb | apache-2.0 | monthly.shape
monthly = monthly[monthly['hemisphere'] == 'N']
monthly.shape
monthly.loc[:,'date'] = pd.to_datetime(monthly['month'])
# Set the month column to the DataFrame index
monthly.set_index('date', inplace=True, verify_integrity=True, drop=True)
monthly = monthly[monthly.index > '1998-12-31']
monthly.columns
monthly.shape
for column in monthly.columns:
matched = re.search(r"missing_km2", column)
if matched:
print("%s: " % (column))
print(monthly[column].min(), monthly[column].max())
del monthly[column]
monthly.shape
monthly['meier2007_laptev_area_km2'].plot()
fig, ax = plt.subplots(15, figsize=(8,25))
i = 0
for column in monthly.columns:
matched = re.search(r"area_km2", column)
if matched:
print("%s: %d" % (column, i))
monthly[column].plot(ax=ax[i], sharey=True, title=column)
i = i + 1
fig.tight_layout()
fig.savefig("nsidc0051_area_by_region.png")
"""
Explanation: <h2> Slicing monthly files</h2>
<ol>
<li> only keep hemisphere='N'
<li> only keep dates since Jan 1999
<li> validate that "missing" columns are zeroes and drop them
</ol>
Then set index to date.
End of explanation
"""
monthly
def convert_column_to_matrix(df, column):
short_column = column
short_column = re.sub("meier2007_", "", short_column)
nyears = 17
nmonths = 12
years = np.arange(nyears) + 1999
months = np.arange(nmonths) + 1
column_names = ["%02d_%s" % (month, short_column) for month in months]
data = pd.DataFrame(index=years, columns=column_names)
for year in years:
for month in months:
yyyymm = "%4d-%02d" % (year, month)
data.loc[year, column_names[month-1]] = df.get_value(
index=pd.to_datetime(yyyymm), col=column)
return(data)
start=True
for column in monthly.columns:
matched = re.search(r"area_km2|extent_km2", column)
if matched:
print("%s: " % (column))
new = convert_column_to_matrix(monthly, column)
if start:
all = new.copy()
start=False
else:
all = pd.concat([all, new], axis=1)
all.shape
all
col = 'beaufort_extent_km2'
print("from monthly: %f" % monthly.get_value(index=pd.to_datetime('2008-06-01'), col='meier2007_'+col))
print("from all : %f" % all.get_value(index=2008, col='06_' + col))
del monthly['month']
monthly.to_csv('nsidc0051_monthly_tseries.csv', sep='\t')
all.to_csv('nsidc0051_year_by_month.csv', index_label='Year')
%pwd
%more nsidc0051_year_by_month.csv
all.columns
"""
Explanation: <h2>Convert from monthly time series to years time series</h2>
End of explanation
"""
|
xoolive/scientificpython | labs/02_making_maps.ipynb | mit | shapefile_path = "./data/CNTR_2014_03M_SH/Data/CNTR_RG_03M_2014.shp"
"""
Explanation: Cartes du monde, cartes de France
Planisphères et projections
L'objectif de cette séance est de se familiariser avec un format courant de description de contours, le format shapefile, et avec différentes projections couramment utilisées.
Ce notebook est certainement (trop) long. Il n'est pas attendu de vous de tout faire dans le temps de la séance et il ne faut pas se décourager si de nombreux points restent non traités ; les nombreux « bonus » à la fin occuperont les meilleurs.
Plan du notebook:
1. Le format shapefile
2. Coordonnées et projections
3. Notions de géodésie
Il est indispensable pour la suite d'avoir :
- coder la projection Mercator
- coder la projection Lambert 93
C'est le minimum requis pour les séances suivantes.
Une fois ce minimum accompli, si vous êtes curieux, il est préférable de laisser dans un premier temps les bonus de côté pour voir les notions de géodésie.
Le format shapefile
Le site de la commission européenne met à disposition un certain nombre de données géographiques. On y trouve notamment une page permettant de télécharger les contours d'entités administratives, notamment les pays du monde.
La version actuelle en téléchargement est celle de 2014. On peut alors télécharger le fichier shapefile au 1/3000000. Le format shapefile est un format géographique standard qui permet de décrire la géométrie d'objets d'écrits, à base de points, de lignes et de polygones.
On peut alors récupérer sur la page indiquée le fichier CNTR_2014_03M_SH.zip dans lequel, on trouvera un fichier à l'extension .shp contenant les contours recherchés. Ces données sont déjà accessibles pour vous dans le dossier data.
End of explanation
"""
import fiona
items = [p for p in fiona.open(shapefile_path)]
items[0]
"""
Explanation: La bibliothèque fiona permet de déchiffrer les fichiers binaires au format shapefile .shp.
Observons la structure des données:
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
from shapely.geometry import MultiPolygon, Polygon, shape
s = shape(items[0]['geometry'])
print (type (s))
s
"""
Explanation: Chaque élément produit par fiona.open() est un élément graphique.
Les données sont présentées sous la forme d'un dictionnaire qui respecte l'arborescence suivante:
geometry
coordinates
type
id
properties
type
Note:
geometry.type précise la forme de la donnée géométrique (point, ligne, polygone) parmi un certain nombre de format standards. On recense notamment Point, Polygon, LineString et leur version en liste MultiPoint, MultiPolygon, MultiLineString.
properties contient également un dictionnaire pour lequel le modèle de données est libre. Chaque éditeur fournit les informations qu'il souhaite.
Parmi les propriétés fournies ici, on trouve un champ CNTR_ID, qui évoque country id. Ici, AD est le code ISO-3166-1 pour Andorre.
Le module shapely permet de construire des données géométriques à partir du dictionnaire rendu par fiona. La fonction shape produit un objet qui est rendu par sa représentation graphique dans le notebook.
End of explanation
"""
shapes = [shape(i['geometry']) for i in items if .... != 'AQ']
print (set([s.geom_type for s in shapes]))
"""
Explanation: <div class="alert alert-warning">
**Exercice:** Afficher les codes pays des données fournies dans le fichier shapefile donné.
</div>
Consigne: Utiliser la forme en compréhension de liste. Il est « interdit » d'utiliser plus d'une ligne de code pour cet exercice.
<div class="alert alert-warning">
**Exercice:** Trouver l'élément géométrique relatif à la Suisse dans les données téléchargées et l'afficher.
</div>
Note: Pour les plus patriotes qui auraient voulu afficher la France, les territoires d'Outre-Mer rendent l'aperçu peu lisible à l'échelle mondiale. Le choix s'est alors porté sur un pays réputé « neutre ».
On peut utiliser la forme en compréhension:
python
[i for i in item if i ...]
Pour la suite, nous allons toutefois afficher manuellement les données avec matplotlib.
Nous allons devoir manipuler deux types de données:
End of explanation
"""
from descartes import PolygonPatch
fig = plt.figure()
ax = fig.gca()
for s in shapes:
if s.geom_type == "Polygon":
s = MultiPolygon([s])
for idx, p in enumerate(s):
ax.add_patch(PolygonPatch(p, fc='#6699cc', ec='#6699cc', alpha=0.5, zorder=2))
# Finitions
ax.axis('scaled')
fig.set_size_inches(20, 10)
ax.set_frame_on(False)
"""
Explanation: Pour cela, nous allons utiliser les fonctions add_patch de matplotlib et l'objet PolygonPatch qui transforme un polygone shapefile en objet manipulable par matplotlib.
On notera :
l'accès à l'attribut geom_type;
les attributs de PolygonPatch: fc pour facecolor, ec pour edgecolor, alpha pour la transparence, et zorder pour le niveau de superposition des données;
les finitions en fin de code.
End of explanation
"""
from descartes import PolygonPatch
fig = plt.figure()
ax = fig.gca()
for s in shapes:
if s.geom_type == "Polygon":
s = MultiPolygon([s])
for idx, p in enumerate(s):
lon = np.array([lon for (lon, _) in list(p.exterior.coords)])
lat = np.array([lat for (_, lat) in list(p.exterior.coords)])
x =
y =
p = Polygon([a for a in zip(x, y)])
ax.add_patch(PolygonPatch(p, fc='#6699cc', ec='#6699cc', alpha=0.5, zorder=2))
# Finitions
ax.axis('scaled')
fig.set_size_inches(20, 10)
ax.set_frame_on(False)
"""
Explanation: Coordonnées et projections
La carte présentée ci-dessus est déformée/dilatée par rapport à l'image qui nous est familière. En réalité, c'est aussi une projection, équirectangulaire, appelée plate carrée. Sa seule propriété notable est qu'elle permet de retrouver facilement latitude et longitude à partir des coordonnées sur le plan. (sic!)
Les données que nous avons récupérées sont fournies en latitude/longitude dans le référentiel ETRS89. La Terre est modélisée au premier ordre par une sphère, mais les modélisations plus précises font appel à un ellipsoïde de référence. Historiquement, chaque pays maintient son système de référence; en effet, la dérive des continents complique l'utilisation d'un système de référence mondial.
Pour les systèmes GPS notamment, un référentiel normalisé mondial a été proposé: le WGS84 (World Geodetic System 1984, après 1972, 1964, 1960).
Celui-ci définit pour l'ellipsoïde de référence:
- un demi-grand axe $a = 6\,378\,137\,m$
- un aplatissement $f = 1\,/\,298,257\,223\,563$
Pour les précisions qui nous intéressent dans notre exemple, les systèmes ETRS89 et WGS84 sont compatibles. On va même, pour la suite, pousser jusqu'à considérer que les coordonnées manipulées sont compatibles avec des coordonnées sphériques.
Pour afficher une carte à l'écran, on choisit systématiquement une projection, c'est-à-dire une manière de représenter l'information de la surface d'une sphère sur une surface plane.
La projection de Mercator
La projection la plus connue est celle de Mercator, qui date du XVIe siècle, utilisée par les marins. C'est une projection conforme, qui respecte les angles et les formes. Les méridiens et les parallèles sont des droites perpendiculaires, et la déformation Est-Ouest inhérente à la projection sur un cône est compensée par une déformation Nord-Sud de même ampleur : l'échelle Est-Ouest est toujours de l'ordre de l'échelle Nord-Sud.
On peut calculer la projection $(x,y)$ des coordonnées en latitude $\varphi$ et longitude $\lambda$ avec les formules suivantes.
$$
x = \lambda\
y = \ln \left( \tan \varphi + \sec \varphi \right)
$$
<div class="alert alert-warning">
**Exercice:** Afficher la carte précédente en projection de Mercator.
</div>
Consigne: N'utiliser que des fonctions numpy pour garder des temps de calcul raisonnables.
À partir de p, l'argument de type Polygon passé à PolygonPatch, vous pouvez extraire les coordonnées en latitude et en longitude du polygone de la manière suivante:
python
lon = np.array([lon for (lon, _) in list(p.exterior.coords)])
lat = np.array([lat for (_, lat) in list(p.exterior.coords)])
Une fois votre polygone reconstruit dans le système de coordonnées qui vous convient, vous pouvez reconstruire un Polygon à passer en paramètre à PolygonPatch:
python
p = Polygon([a for a in zip(x, y)])
Pour éviter les ennuis près des pôles, éliminez l'Antartique (ISO 3166-1: AQ) de vos données.
End of explanation
"""
import geodesy.sphere as geo
# À l'origine, la définition du mille nautique était la distance entre deux minutes d'arc à l'équateur
# Néanmoins, aujourd'hui, la conversion est fixée à 1 nm = 1852 meters
geo.distance((0, 0), (0, 1./60))
"""
Explanation: La projection Lambert 93
Beaucoup de projections ne permettent pas d'afficher le globe terrestre dans son intégralité, au moins à cause des discontinuités aux pôles. On trouve notamment des projections qui sont élégantes localement mais beaucoup moins à l'échelle mondiale.
Les pilotes aiment cette projection parce qu'à l'échelle où l'on trace ces cartes, une ligne droite entre deux points est proche du grand cercle qui passe par ces deux points. Dans ce système de projection conforme, les méridiens sont des droites concourantes, et les parallèles des arcs de cercle centrés sur le point de convergence des méridiens.
La projection Lambert 93 est la projection officielle utilisée pour les cartes de France métropolitaine. Elle utilise deux parallèles sécants $\varphi_1$ à 44°N, $\varphi_2$ à 49°N, le méridien de référence $\lambda_0$ à 3°E et le parallèle d'origine $\varphi_0$ à 46°30'.
On peut calculer la projection $(x,y)$ des coordonnées en latitude $\varphi$ et longitude $\lambda$ avec les formules suivantes.
$$
x = x_0 + \rho \sin(n (\lambda - \lambda_0))\
y = y_0 + \rho_0 - \rho \cos(n (\lambda - \lambda_0))
$$
On choisit ici de rester en modèle sphérique pour ne pas trop compliquer l'expression de $n$.
$$
n = \frac{\ln(\cos \varphi_1 \sec \varphi_2)}{\ln (\tan (\frac14 \pi + \frac12 \varphi_2) \cot (\frac14 \pi + \frac12\varphi_1))}
$$
Les expressions manquantes sont alors exprimées comme suit.
$$
\rho = F \cot^{n} (\tfrac14 \pi + \tfrac12 \varphi)\
\rho_0 = F \cot^{n} (\tfrac14 \pi + \tfrac12 \varphi_0)\
F = R_T \cdot \frac{\cos \varphi_1 \tan^{n} (\frac14 \pi + \frac12 \varphi_1)}{n}
$$
Les coordonnées initiales sont de $x_0,y_0$ valent respectivement 700000m et 6600000m. Le rayon de la Terre $R_T$ mesure 6371000 m.
<div class="alert alert-warning">
**Exercice:** Afficher une carte de France (métropole et Corse) et de ses pays frontaliers en projection de Lambert 93.
</div>
Consigne: N'utiliser que des fonctions numpy pour garder des temps de calcul raisonnables.
Petits bonus
Afficher la France dans une couleur différente de ses pays frontaliers;
Ajouter un graticule (méridiens et parallèles multiples de 5° par exemple);
Gros bonus
Les projections présentées ici sont conformes: elles conservent les angles et les formes. D'autres projections sont équivalentes, c'est-à-dire qu'elles conservent localement les surfaces, mais beaucoup des projections les plus utilisées sont finalement ni conformes, ni équivalentes, mais des compromis.
À cet égard, la National Geographic Society a longtemps utilisé la projection Winkel-Tripel, conçue pour minimiser les distorsions de surface, de direction et de distance (d'où le terme allemand de tripel). Cette projection est très esthétique et harmonieuse, mais cette harmonie vient à un prix : il n'existe pas de formule exacte pour repasser des coordonnées cartésiennes en $(x,y)$ à des coordonnées en latitude et longitude.
<div class="alert alert-warning">
**Bonus:** Afficher une carte du monde en projection Winkel-Tripel
</div>
Notions de géodésie
Les calculs de distance, de cap, de chemin le plus court entre deux points ne sont pas immédiats en géométrie sphérique (et a fortiori, sur le modèle ellipsoïdal WGS84).
On sait notamment que la route la plus courte entre deux points à la surface d'une sphère est située sur l'intersection d'un plan qui passe par le centre de la Terre et nos deux points. On appelle ce(tte) (segment de) droite un « grand cercle ».
Nous avons à disposition une bibliothèque de calcul de géodésie sphérique. Un autre aspect de la bibliothèque propose les mêmes interfaces en WGS84, mais les calculs sont sensiblement plus lourds: nous nous contenterons de la géodésie sphérique.
End of explanation
"""
orly = (48.725278, 2.359444)
blagnac = (43.629075, 1.363819)
print("Distance de %.2f km" % (geo.distance(orly, blagnac)/1000))
"""
Explanation: <div class="alert alert-warning">
**Exercice**: Calculer la distance entre les aéroports de Paris Orly et de Toulouse Blagnac.
</div>
End of explanation
"""
cdg = (49.012779, 2.55)
tokyo = (35.764722, 140.386389)
gc = geo.greatcircle(cdg, tokyo)
"""
Explanation: <div class="alert alert-warning">
**Exercice**: Tracer un grand cercle entre les aéroports de Paris Charles de Gaulle et Tokyo Narita (projection de Mercator ou Winkel-Tripel)
</div>
End of explanation
"""
|
yugangzhang/CHX_Pipelines | 2019_1/Template/XPCS_Single_2019_V2.ipynb | bsd-3-clause | from pyCHX.chx_packages import *
%matplotlib notebook
plt.rcParams.update({'figure.max_open_warning': 0})
plt.rcParams.update({ 'image.origin': 'lower' })
plt.rcParams.update({ 'image.interpolation': 'none' })
import pickle as cpk
from pyCHX.chx_xpcs_xsvs_jupyter_V1 import *
import itertools
#from pyCHX.XPCS_SAXS import get_QrQw_From_RoiMask
%run /home/yuzhang/pyCHX_link/pyCHX/chx_generic_functions.py
#%matplotlib notebook
%matplotlib inline
"""
Explanation: XPCS&XSVS Pipeline for Single-(Gi)-SAXS Run
"This notebook corresponds to version {{ version }} of the pipeline tool: https://github.com/NSLS-II/pipelines"
This notebook begins with a raw time-series of images and ends with $g_2(t)$ for a range of $q$, fit to an exponential or stretched exponential, and a two-time correlation functoin.
Overview
Setup: load packages/setup path
Load Metadata & Image Data
Apply Mask
Clean Data: shutter open/bad frames
Get Q-Map
Get 1D curve
Define Q-ROI (qr, qz)
Check beam damage
One-time Correlation
Fitting
Two-time Correlation
The important scientific code is imported from the chxanalys and scikit-beam project. Refer to chxanalys and scikit-beam for additional documentation and citation information.
DEV
V8: Update visbility error bar calculation using pi = his/N +/- sqrt(his_i)/N
Update normlization in g2 calculation uing 2D-savitzky golay (SG ) smooth
CHX Olog NoteBook
CHX Olog (https://logbook.nsls2.bnl.gov/11-ID/)
Setup
Import packages for I/O, visualization, and analysis.
End of explanation
"""
scat_geometry = 'saxs' #suport 'saxs', 'gi_saxs', 'ang_saxs' (for anisotropics saxs or flow-xpcs)
#scat_geometry = 'ang_saxs'
#scat_geometry = 'gi_waxs'
#scat_geometry = 'gi_saxs'
analysis_type_auto = True #if True, will take "analysis type" option from data acquisition func series
qphi_analysis = False #if True, will do q-phi (anisotropic analysis for transmission saxs)
isotropic_Q_mask = 'normal' #'wide' # 'normal' # 'wide' ## select wich Q-mask to use for rings: 'normal' or 'wide'
phi_Q_mask = 'phi_4x_20deg' ## select wich Q-mask to use for phi analysis
q_mask_name = ''
force_compress = False #True #force to compress data
bin_frame = False #generally make bin_frame as False
para_compress = True #parallel compress
run_fit_form = False #run fit form factor
run_waterfall = False #True #run waterfall analysis
run_profile_plot = False #run prolfile plot for gi-saxs
run_t_ROI_Inten = True #run ROI intensity as a function of time
run_get_mass_center = False # Analysis for mass center of reflective beam center
run_invariant_analysis = False
run_one_time = True #run one-time
cal_g2_error = False #True #calculate g2 signal to noise
#run_fit_g2 = True #run fit one-time, the default function is "stretched exponential"
fit_g2_func = 'stretched'
run_two_time = True #run two-time
run_four_time = False #True #True #False #run four-time
run_xsvs= False #False #run visibility analysis
att_pdf_report = True #attach the pdf report to CHX olog
qth_interest = 1 #the intested single qth
use_sqnorm = True #if True, use sq to normalize intensity
use_SG = True # False #if True, use the Sawitzky-Golay filter for <I(pix)>
use_imgsum_norm= True #if True use imgsum to normalize intensity for one-time calculatoin
pdf_version='_%s'%get_today_date() #for pdf report name
run_dose = True #True # True #False #run dose_depend analysis
if scat_geometry == 'gi_saxs':run_xsvs= False;use_sqnorm=False
if scat_geometry == 'gi_waxs':use_sqnorm = False
if scat_geometry != 'saxs':qphi_analysis = False;scat_geometry_ = scat_geometry
else:scat_geometry_ = ['','ang_'][qphi_analysis]+ scat_geometry
if scat_geometry != 'gi_saxs':run_profile_plot = False
scat_geometry
taus=None;g2=None;tausb=None;g2b=None;g12b=None;taus4=None;g4=None;times_xsv=None;contrast_factorL=None; lag_steps = None
"""
Explanation: Control Runs Here
End of explanation
"""
CYCLE= '2019_1' #change clycle here
path = '/XF11ID/analysis/%s/masks/'%CYCLE
username = getpass.getuser()
username = 'commisionning'
username = 'petrash'
data_dir0 = create_user_folder(CYCLE, username)
print( data_dir0 )
"""
Explanation: Make a directory for saving results
End of explanation
"""
uid = 'd099ce48' #(scan num: 3567 (Measurement: 500k, 9kHz 5k CoralPor
uid = '0587b05b' #(scan num: 3570 (Measurement: 4M, 100Hz, 200 testing data processing CoralPor
uid = 'ad658cdf' #(scan num: 3571 (Measurement: 4M, 100Hz, 200 testing data processing CoralPor
uid = '9f849990' #(scan num: 3573 (Measurement: 500k, 9 kHz, 2000 testing data processing CoralPor
uid = '25171c35-ce50-450b-85a0-ba9e116651e3'
uid = uid[:8]
print('The current uid for analysis is: %s...'%uid)
#get_last_uids( -1)
sud = get_sid_filenames(db[uid])
for pa in sud[2]:
if 'master.h5' in pa:
data_fullpath = pa
print ('scan_id, full-uid, data path are: %s--%s--%s'%(sud[0], sud[1], data_fullpath ))
#start_time, stop_time = '2017-2-24 12:23:00', '2017-2-24 13:42:00'
#sids, uids, fuids = find_uids(start_time, stop_time)
data_dir = os.path.join(data_dir0, '%s/'%(sud[1]))
os.makedirs(data_dir, exist_ok=True)
print('Results from this analysis will be stashed in the directory %s' % data_dir)
uidstr = 'uid=%s'%uid
"""
Explanation: Load Metadata & Image Data
Change this line to give a uid
End of explanation
"""
md = get_meta_data( uid )
md_blue = md.copy()
#md_blue
#md_blue['detectors'][0]
#if md_blue['OAV_mode'] != 'none':
# cx , cy = md_blue[md_blue['detectors'][0]+'_beam_center_x'], md_blue[md_blue['detectors'][0]+'_beam_center_x']
#else:
# cx , cy = md_blue['beam_center_x'], md_blue['beam_center_y']
#print(cx,cy)
detectors = sorted(get_detectors(db[uid]))
print('The detectors are:%s'%detectors)
if len(detectors) >1:
md['detector'] = detectors[1]
print( md['detector'])
if md['detector'] =='eiger4m_single_image' or md['detector'] == 'image':
reverse= True
rot90= False
elif md['detector'] =='eiger500K_single_image':
reverse= True
rot90=True
elif md['detector'] =='eiger1m_single_image':
reverse= True
rot90=False
print('Image reverse: %s\nImage rotate 90: %s'%(reverse, rot90))
try:
cx , cy = md_blue['beam_center_x'], md_blue['beam_center_y']
print(cx,cy)
except:
print('Will find cx,cy later.')
"""
Explanation: Don't Change the lines below here
get metadata
End of explanation
"""
if analysis_type_auto:#if True, will take "analysis type" option from data acquisition func series
try:
qphi_analysis_ = md['analysis'] #if True, will do q-phi (anisotropic analysis for transmission saxs)
print(md['analysis'])
if qphi_analysis_ == 'iso':
qphi_analysis = False
elif qphi_analysis_ == '':
qphi_analysis = False
else:
qphi_analysis = True
except:
print('There is no analysis in metadata.')
print('Will %s qphis analysis.'%['NOT DO','DO'][qphi_analysis])
if scat_geometry != 'saxs':qphi_analysis = False;scat_geometry_ = scat_geometry
else:scat_geometry_ = ['','ang_'][qphi_analysis]+ scat_geometry
if scat_geometry != 'gi_saxs':run_profile_plot = False
print(scat_geometry_)
#isotropic_Q_mask
scat_geometry
"""
Explanation: Load ROI defined by "XPCS_Setup" Pipeline
Define data analysis type
End of explanation
"""
##For SAXS
roi_path = '/XF11ID/analysis/2019_1/masks/'
roi_date = 'Feb6'
if scat_geometry =='saxs':
if qphi_analysis == False:
if isotropic_Q_mask == 'normal':
#print('Here')
q_mask_name='rings'
if md['detector'] =='eiger4m_single_image' or md['detector'] == 'image': #for 4M
fp = roi_path + 'roi_mask_%s_4M_norm.pkl'%roi_date
elif md['detector'] =='eiger500K_single_image': #for 500K
fp = roi_path + 'roi_mask_%s_500K_norm.pkl'%roi_date
elif isotropic_Q_mask == 'wide':
q_mask_name='wide_rings'
if md['detector'] =='eiger4m_single_image' or md['detector'] == 'image': #for 4M
fp = roi_path + 'roi_mask_%s_4M_wide.pkl'%roi_date
elif md['detector'] =='eiger500K_single_image': #for 500K
fp = roi_path + 'roi_mask_%s_500K_wide.pkl'%roi_date
elif qphi_analysis:
if phi_Q_mask =='phi_4x_20deg':
q_mask_name='phi_4x_20deg'
if md['detector'] =='eiger4m_single_image' or md['detector'] == 'image': #for 4M
fp = roi_path + 'roi_mask_%s_4M_phi_4x_20deg.pkl'%roi_date
elif md['detector'] =='eiger500K_single_image': #for 500K
fp = roi_path + 'roi_mask_%s_500K_phi_4x_20deg.pkl'%roi_date
#fp = 'XXXXXXX.pkl'
roi_mask,qval_dict = cpk.load( open(fp, 'rb' ) ) #for load the saved roi data
#print(fp)
## Gi_SAXS
elif scat_geometry =='gi_saxs':
# dynamics mask
fp = '/XF11ID/analysis/2018_2/masks/uid=460a2a3a_roi_mask.pkl'
roi_mask,qval_dict = cpk.load( open(fp, 'rb' ) ) #for load the saved roi data
print('The dynamic mask is: %s.'%fp)
# static mask
fp = '/XF11ID/analysis/2018_2/masks/uid=460a2a3a_roi_masks.pkl'
roi_masks,qval_dicts = cpk.load( open(fp, 'rb' ) ) #for load the saved roi data
print('The static mask is: %s.'%fp)
# q-map
fp = '/XF11ID/analysis/2018_2/masks/uid=460a2a3a_qmap.pkl'
#print(fp)
qr_map, qz_map, ticks, Qrs, Qzs, Qr, Qz, inc_x0,refl_x0, refl_y0 = cpk.load( open(fp, 'rb' ) )
print('The qmap is: %s.'%fp)
## WAXS
elif scat_geometry =='gi_waxs':
fp = '/XF11ID/analysis/2018_2/masks/uid=db5149a1_roi_mask.pkl'
roi_mask,qval_dict = cpk.load( open(fp, 'rb' ) ) #for load the saved roi data
print(roi_mask.shape)
#qval_dict
#roi_mask = shift_mask(roi_mask, 10,30) #if shift mask to get new mask
show_img(roi_mask, aspect=1.0, image_name = fp)#, center=center[::-1])
#%run /home/yuzhang/pyCHX_link/pyCHX/chx_generic_functions.py
"""
Explanation: Load ROI mask depending on data analysis type
End of explanation
"""
imgs = load_data( uid, md['detector'], reverse= reverse, rot90=rot90 )
md.update( imgs.md );Nimg = len(imgs);
#md['beam_center_x'], md['beam_center_y'] = cx, cy
#if 'number of images' not in list(md.keys()):
md['number of images'] = Nimg
pixel_mask = 1- np.int_( np.array( imgs.md['pixel_mask'], dtype= bool) )
print( 'The data are: %s' %imgs )
#md['acquire period' ] = md['cam_acquire_period']
#md['exposure time'] = md['cam_acquire_time']
mdn = md.copy()
"""
Explanation: get data
End of explanation
"""
if md['detector'] =='eiger1m_single_image':
Chip_Mask=np.load( '/XF11ID/analysis/2017_1/masks/Eiger1M_Chip_Mask.npy')
elif md['detector'] =='eiger4m_single_image' or md['detector'] == 'image':
Chip_Mask= np.array(np.load( '/XF11ID/analysis/2017_1/masks/Eiger4M_chip_mask.npy'), dtype=bool)
BadPix = np.load('/XF11ID/analysis/2018_1/BadPix_4M.npy' )
Chip_Mask.ravel()[BadPix] = 0
elif md['detector'] =='eiger500K_single_image':
#print('here')
Chip_Mask= np.load( '/XF11ID/analysis/2017_1/masks/Eiger500K_Chip_Mask.npy') #to be defined the chip mask
Chip_Mask = np.rot90(Chip_Mask)
pixel_mask = np.rot90( 1- np.int_( np.array( imgs.md['pixel_mask'], dtype= bool)) )
else:
Chip_Mask = 1
#show_img(Chip_Mask)
print(Chip_Mask.shape, pixel_mask.shape)
use_local_disk = True
import shutil,glob
save_oavs = False
if len(detectors)==2:
if '_image' in md['detector']:
pref = md['detector'][:-5]
else:
pref=md['detector']
for k in [ 'beam_center_x', 'beam_center_y','cam_acquire_time','cam_acquire_period','cam_num_images',
'wavelength', 'det_distance', 'photon_energy']:
md[k] = md[ pref + '%s'%k]
if 'OAV_image' in detectors:
try:
#tifs = list( db[uid].data( 'OAV_image') )[0]
#print(len(tifs))
save_oavs_tifs( uid, data_dir )
save_oavs = True
## show all images
#fig, ax = show_tif_series( tifs, Nx = None, vmin=1.0, vmax=20, logs=False,
# cmap= cm.gray, figsize=[4,6] )
##show one image
#show_img(tifs[0],cmap= cm.gray,)
except:
pass
print_dict( md, ['suid', 'number of images', 'uid', 'scan_id', 'start_time', 'stop_time', 'sample', 'Measurement',
'acquire period', 'exposure time',
'det_distance', 'beam_center_x', 'beam_center_y', ] )
"""
Explanation: Load Chip mask depeding on detector
End of explanation
"""
if scat_geometry =='gi_saxs':
inc_x0 = md['beam_center_x']
inc_y0 = imgs[0].shape[0] - md['beam_center_y']
refl_x0 = md['beam_center_x']
refl_y0 = 1000 #imgs[0].shape[0] - 1758
print( "inc_x0, inc_y0, ref_x0,ref_y0 are: %s %s %s %s."%(inc_x0, inc_y0, refl_x0, refl_y0) )
else:
if md['detector'] =='eiger4m_single_image' or md['detector'] == 'image' or md['detector']=='eiger1m_single_image':
inc_x0 = imgs[0].shape[0] - md['beam_center_y']
inc_y0= md['beam_center_x']
elif md['detector'] =='eiger500K_single_image':
inc_y0 = imgs[0].shape[1] - md['beam_center_y']
inc_x0 = imgs[0].shape[0] - md['beam_center_x']
print(inc_x0, inc_y0)
###for this particular uid, manually give x0/y0
#inc_x0 = 1041
#inc_y0 = 1085
dpix, lambda_, Ldet, exposuretime, timeperframe, center = check_lost_metadata(
md, Nimg, inc_x0 = inc_x0, inc_y0= inc_y0, pixelsize = 7.5*10*(-5) )
if scat_geometry =='gi_saxs':center=center[::-1]
setup_pargs=dict(uid=uidstr, dpix= dpix, Ldet=Ldet, lambda_= lambda_, exposuretime=exposuretime,
timeperframe=timeperframe, center=center, path= data_dir)
print_dict( setup_pargs )
setup_pargs
"""
Explanation: Overwrite Some Metadata if Wrong Input
Define incident beam center (also define reflection beam center for gisaxs)
End of explanation
"""
if scat_geometry == 'gi_saxs':
mask_path = '/XF11ID/analysis/2018_2/masks/'
mask_name = 'July13_2018_4M.npy'
elif scat_geometry == 'saxs':
mask_path = '/XF11ID/analysis/2019_1/masks/'
if md['detector'] =='eiger4m_single_image' or md['detector'] == 'image':
mask_name = 'Feb6_2019_4M_SAXS.npy'
elif md['detector'] =='eiger500K_single_image':
mask_name = 'Feb6_2019_500K_SAXS.npy'
elif scat_geometry == 'gi_waxs':
mask_path = '/XF11ID/analysis/2018_2/masks/'
mask_name = 'July20_2018_1M_WAXS.npy'
mask = load_mask(mask_path, mask_name, plot_ = False, image_name = uidstr + '_mask', reverse= reverse, rot90=rot90 )
mask = mask * pixel_mask * Chip_Mask
show_img(mask,image_name = uidstr + '_mask', save=True, path=data_dir, aspect=1, center=center[::-1])
mask_load=mask.copy()
imgsa = apply_mask( imgs, mask )
"""
Explanation: Apply Mask
load and plot mask if exist
otherwise create a mask using Mask pipeline
Reverse the mask in y-direction due to the coordination difference between python and Eiger software
Reverse images in y-direction
Apply the mask
Change the lines below to give mask filename
End of explanation
"""
img_choice_N = 3
img_samp_index = random.sample( range(len(imgs)), img_choice_N)
avg_img = get_avg_img( imgsa, img_samp_index, plot_ = False, uid =uidstr)
if avg_img.max() == 0:
print('There are no photons recorded for this uid: %s'%uid)
print('The data analysis should be terminated! Please try another uid.')
#show_img( imgsa[1000], vmin=.1, vmax= 1e1, logs=True, aspect=1,
# image_name= uidstr + '_img_avg', save=True, path=data_dir, cmap = cmap_albula )
print(center[::-1])
show_img( imgsa[ 5], vmin = -1, vmax = 20, logs=False, aspect=1, #save_format='tif',
image_name= uidstr + '_img_avg', save=True, path=data_dir, cmap=cmap_albula,center=center[::-1])
# select subregion, hard coded center beam location
#show_img( imgsa[180+40*3/0.05][110:110+840*2, 370:370+840*2], vmin = 0.01, vmax = 20, logs=False, aspect=1, #save_format='tif',
# image_name= uidstr + '_img_avg', save=True, path=data_dir, cmap=cmap_albula,center=[845,839])
"""
Explanation: Check several frames average intensity
End of explanation
"""
compress=True
photon_occ = len( np.where(avg_img)[0] ) / ( imgsa[0].size)
#compress = photon_occ < .4 #if the photon ocupation < 0.5, do compress
print ("The non-zeros photon occupation is %s."%( photon_occ))
print("Will " + 'Always ' + ['NOT', 'DO'][compress] + " apply compress process.")
if md['detector'] =='eiger4m_single_image' or md['detector'] == 'image':
good_start = 5 #make the good_start at least 0
elif md['detector'] =='eiger500K_single_image':
good_start = 100 #5 #make the good_start at least 0
elif md['detector'] =='eiger1m_single_image' or md['detector'] == 'image':
good_start = 5
bin_frame = False # True #generally make bin_frame as False
if bin_frame:
bin_frame_number=4
acquisition_period = md['acquire period']
timeperframe = acquisition_period * bin_frame_number
else:
bin_frame_number =1
force_compress = False
#force_compress = True
import time
t0= time.time()
if not use_local_disk:
cmp_path = '/nsls2/xf11id1/analysis/Compressed_Data'
else:
cmp_path = '/tmp_data/compressed'
cmp_path = '/nsls2/xf11id1/analysis/Compressed_Data'
if bin_frame_number==1:
cmp_file = '/uid_%s.cmp'%md['uid']
else:
cmp_file = '/uid_%s_bined--%s.cmp'%(md['uid'],bin_frame_number)
filename = cmp_path + cmp_file
mask2, avg_img, imgsum, bad_frame_list = compress_eigerdata(imgs, mask, md, filename,
force_compress= force_compress, para_compress= para_compress, bad_pixel_threshold = 1e14,
reverse=reverse, rot90=rot90,
bins=bin_frame_number, num_sub= 100, num_max_para_process= 500, with_pickle=True,
direct_load_data =use_local_disk, data_path = data_fullpath, )
min_inten = 10
good_start = max(good_start, np.where( np.array(imgsum) > min_inten )[0][0] )
print ('The good_start frame number is: %s '%good_start)
FD = Multifile(filename, good_start, len(imgs)//bin_frame_number )
#FD = Multifile(filename, good_start, 100)
uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end)
print( uid_ )
plot1D( y = imgsum[ np.array( [i for i in np.arange(good_start, len(imgsum)) if i not in bad_frame_list])],
title =uidstr + '_imgsum', xlabel='Frame', ylabel='Total_Intensity', legend='imgsum' )
Nimg = Nimg/bin_frame_number
run_time(t0)
mask = mask * pixel_mask * Chip_Mask
mask_copy = mask.copy()
mask_copy2 = mask.copy()
#%run ~/pyCHX_link/pyCHX/chx_generic_functions.py
try:
if md['experiment']=='printing':
#p = md['printing'] #if have this printing key, will do error function fitting to find t_print0
find_tp0 = True
t_print0 = ps( y = imgsum[:400] ) * timeperframe
print( 'The start time of print: %s.' %(t_print0 ) )
else:
find_tp0 = False
print('md[experiment] is not "printing" -> not going to look for t_0')
t_print0 = None
except:
find_tp0 = False
print('md[experiment] is not "printing" -> not going to look for t_0')
t_print0 = None
show_img( avg_img, vmin=1e-3, vmax= 1e1, logs=True, aspect=1, #save_format='tif',
image_name= uidstr + '_img_avg', save=True,
path=data_dir, center=center[::-1], cmap = cmap_albula )
"""
Explanation: Compress Data
Generate a compressed data with filename
Replace old mask with a new mask with removed hot pixels
Do average image
Do each image sum
Find badframe_list for where image sum above bad_pixel_threshold
Check shutter open frame to get good time series
End of explanation
"""
good_end= None # 2000
if good_end is not None:
FD = Multifile(filename, good_start, min( len(imgs)//bin_frame_number, good_end) )
uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end)
print( uid_ )
re_define_good_start =False
if re_define_good_start:
good_start = 180
#good_end = 19700
good_end = len(imgs)
FD = Multifile(filename, good_start, good_end)
uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end)
print( FD.beg, FD.end)
bad_frame_list = get_bad_frame_list( imgsum, fit='both', plot=True,polyfit_order = 30,
scale= 3.5, good_start = good_start, good_end=good_end, uid= uidstr, path=data_dir)
print( 'The bad frame list length is: %s'%len(bad_frame_list) )
"""
Explanation: Get bad frame list by a polynominal fit
End of explanation
"""
imgsum_y = imgsum[ np.array( [i for i in np.arange( len(imgsum)) if i not in bad_frame_list])]
imgsum_x = np.arange( len( imgsum_y))
save_lists( [imgsum_x, imgsum_y], label=['Frame', 'Total_Intensity'],
filename=uidstr + '_img_sum_t', path= data_dir )
"""
Explanation: Creat new mask by masking the bad pixels and get new avg_img
End of explanation
"""
plot1D( y = imgsum_y, title = uidstr + '_img_sum_t', xlabel='Frame', c='b',
ylabel='Total_Intensity', legend='imgsum', save=True, path=data_dir)
"""
Explanation: Plot time~ total intensity of each frame
End of explanation
"""
#%run /home/yuzhang/pyCHX_link/pyCHX/chx_packages.py
if md['detector'] =='eiger4m_single_image' or md['detector'] == 'image':
pass
elif md['detector'] =='eiger500K_single_image':
#if md['cam_acquire_period'] <= 0.00015: #will check this logic
if imgs[0].dtype == 'uint16':
print('Create dynamic mask for 500K due to 9K data acquistion!!!')
bdp = find_bad_pixels_FD( bad_frame_list, FD, img_shape = avg_img.shape, threshold=20 )
mask = mask_copy2.copy()
mask *=bdp
mask_copy = mask.copy()
show_img( mask, image_name='New Mask_uid=%s'%uid )
"""
Explanation: Get Dynamic Mask (currently designed for 500K)
End of explanation
"""
setup_pargs
#%run ~/pyCHX_link/pyCHX/chx_generic_functions.py
%run ~/pyCHX_link/pyCHX/XPCS_SAXS.py
if scat_geometry =='saxs':
## Get circular average| * Do plot and save q~iq
mask = mask_copy.copy()
hmask = create_hot_pixel_mask( avg_img, threshold = 1e8, center=center, center_radius= 10)
qp_saxs, iq_saxs, q_saxs = get_circular_average( avg_img * Chip_Mask , mask * hmask, pargs=setup_pargs )
plot_circular_average( qp_saxs, iq_saxs, q_saxs, pargs=setup_pargs, show_pixel=True,
xlim=[qp_saxs.min(), qp_saxs.max()*1.0], ylim = [iq_saxs.min(), iq_saxs.max()*2] )
mask =np.array( mask * hmask, dtype=bool)
if scat_geometry =='saxs':
if run_fit_form:
form_res = fit_form_factor( q_saxs,iq_saxs, guess_values={'radius': 2500, 'sigma':0.05,
'delta_rho':1E-10 }, fit_range=[0.0001, 0.015], fit_variables={'radius': T, 'sigma':T,
'delta_rho':T}, res_pargs=setup_pargs, xlim=[0.0001, 0.015])
qr = np.array( [qval_dict[k][0] for k in sorted( qval_dict.keys())] )
if qphi_analysis == False:
try:
qr_cal, qr_wid = get_QrQw_From_RoiMask( roi_mask, setup_pargs )
print(len(qr))
if (qr_cal - qr).sum() >=1e-3:
print( 'The loaded ROI mask might not be applicable to this UID: %s.'%uid)
print('Please check the loaded roi mask file.')
except:
print('Something is wrong with the roi-mask. Please check the loaded roi mask file.')
show_ROI_on_image( avg_img*roi_mask, roi_mask, center, label_on = False, rwidth = 840, alpha=.9,
save=True, path=data_dir, uid=uidstr, vmin= 1e-3,
vmax= 1e-1, #np.max(avg_img),
aspect=1,
show_roi_edge=True,
show_ang_cor = True)
plot_qIq_with_ROI( q_saxs, iq_saxs, np.unique(qr), logs=True, uid=uidstr,
xlim=[q_saxs.min(), q_saxs.max()*1.02],#[0.0001,0.08],
ylim = [iq_saxs.min(), iq_saxs.max()*1.02], save=True, path=data_dir)
roi_mask = roi_mask * mask
"""
Explanation: Static Analysis
SAXS Scattering Geometry
End of explanation
"""
if scat_geometry =='saxs':
Nimg = FD.end - FD.beg
time_edge = create_time_slice( Nimg, slice_num= 10, slice_width= 1, edges = None )
time_edge = np.array( time_edge ) + good_start
#print( time_edge )
qpt, iqst, qt = get_t_iqc( FD, time_edge, mask*Chip_Mask, pargs=setup_pargs, nx=1500, show_progress= False )
plot_t_iqc( qt, iqst, time_edge, pargs=setup_pargs, xlim=[qt.min(), qt.max()],
ylim = [iqst.min(), iqst.max()], save=True )
if run_invariant_analysis:
if scat_geometry =='saxs':
invariant = get_iq_invariant( qt, iqst )
time_stamp = time_edge[:,0] * timeperframe
if scat_geometry =='saxs':
plot_q2_iq( qt, iqst, time_stamp,pargs=setup_pargs,ylim=[ -0.001, 0.01] ,
xlim=[0.007,0.2],legend_size= 6 )
if scat_geometry =='saxs':
plot_time_iq_invariant( time_stamp, invariant, pargs=setup_pargs, )
if False:
iq_int = np.zeros( len(iqst) )
fig, ax = plt.subplots()
q = qt
for i in range(iqst.shape[0]):
yi = iqst[i] * q**2
iq_int[i] = yi.sum()
time_labeli = 'time_%s s'%( round( time_edge[i][0] * timeperframe, 3) )
plot1D( x = q, y = yi, legend= time_labeli, xlabel='Q (A-1)', ylabel='I(q)*Q^2', title='I(q)*Q^2 ~ time',
m=markers[i], c = colors[i], ax=ax, ylim=[ -0.001, 0.01] , xlim=[0.007,0.2],
legend_size=4)
#print( iq_int )
"""
Explanation: Time Depedent I(q) Analysis
End of explanation
"""
if scat_geometry =='gi_saxs':
plot_qzr_map( qr_map, qz_map, inc_x0, ticks = ticks, data= avg_img, uid= uidstr, path = data_dir )
"""
Explanation: GiSAXS Scattering Geometry
End of explanation
"""
if scat_geometry =='gi_saxs':
#roi_masks, qval_dicts = get_gisaxs_roi( Qrs, Qzs, qr_map, qz_map, mask= mask )
show_qzr_roi( avg_img, roi_masks, inc_x0, ticks[:4], alpha=0.5, save=True, path=data_dir, uid=uidstr )
if scat_geometry =='gi_saxs':
Nimg = FD.end - FD.beg
time_edge = create_time_slice( N= Nimg, slice_num= 3, slice_width= 2, edges = None )
time_edge = np.array( time_edge ) + good_start
print( time_edge )
qrt_pds = get_t_qrc( FD, time_edge, Qrs, Qzs, qr_map, qz_map, mask=mask, path=data_dir, uid = uidstr )
plot_qrt_pds( qrt_pds, time_edge, qz_index = 0, uid = uidstr, path = data_dir )
"""
Explanation: Static Analysis for gisaxs
End of explanation
"""
if scat_geometry =='gi_saxs':
if run_profile_plot:
xcorners= [ 1100, 1250, 1250, 1100 ]
ycorners= [ 850, 850, 950, 950 ]
waterfall_roi_size = [ xcorners[1] - xcorners[0], ycorners[2] - ycorners[1] ]
waterfall_roi = create_rectangle_mask( avg_img, xcorners, ycorners )
#show_img( waterfall_roi * avg_img, aspect=1,vmin=.001, vmax=1, logs=True, )
wat = cal_waterfallc( FD, waterfall_roi, qindex= 1, bin_waterfall=True,
waterfall_roi_size = waterfall_roi_size,save =True, path=data_dir, uid=uidstr)
if scat_geometry =='gi_saxs':
if run_profile_plot:
plot_waterfallc( wat, qindex=1, aspect=None, vmin=1, vmax= np.max( wat), uid=uidstr, save =True,
path=data_dir, beg= FD.beg)
"""
Explanation: Make a Profile Plot
End of explanation
"""
if scat_geometry =='gi_saxs':
show_qzr_roi( avg_img, roi_mask, inc_x0, ticks[:4], alpha=0.5, save=True, path=data_dir, uid=uidstr )
## Get 1D Curve (Q||-intensity¶)
qr_1d_pds = cal_1d_qr( avg_img, Qr, Qz, qr_map, qz_map, inc_x0= None, mask=mask, setup_pargs=setup_pargs )
plot_qr_1d_with_ROI( qr_1d_pds, qr_center=np.unique( np.array(list( qval_dict.values() ) )[:,0] ),
loglog=True, save=True, uid=uidstr, path = data_dir)
"""
Explanation: Dynamic Analysis for gi_saxs
End of explanation
"""
if scat_geometry =='gi_waxs':
#badpixel = np.where( avg_img[:600,:] >=300 )
#roi_mask[badpixel] = 0
show_ROI_on_image( avg_img, roi_mask, label_on = True, alpha=.5,
save=True, path=data_dir, uid=uidstr, vmin=0.1, vmax=5)
"""
Explanation: GiWAXS Scattering Geometry
End of explanation
"""
qind, pixelist = roi.extract_label_indices(roi_mask)
noqs = len(np.unique(qind))
print(noqs)
"""
Explanation: Extract the labeled array
End of explanation
"""
nopr = np.bincount(qind, minlength=(noqs+1))[1:]
nopr
"""
Explanation: Number of pixels in each q box
End of explanation
"""
roi_inten = check_ROI_intensity( avg_img, roi_mask, ring_number= 2, uid =uidstr ) #roi starting from 1
"""
Explanation: Check one ROI intensity
End of explanation
"""
qth_interest = 2 #the second ring. #qth_interest starting from 1
if scat_geometry =='saxs' or scat_geometry =='gi_waxs':
if run_waterfall:
wat = cal_waterfallc( FD, roi_mask, qindex= qth_interest, save =True, path=data_dir, uid=uidstr)
plot_waterfallc( wat, qth_interest, aspect= None, vmin=1e-1, vmax= wat.max(), uid=uidstr, save =True,
path=data_dir, beg= FD.beg, cmap = cmap_vge )
q_mask_name
ring_avg = None
if run_t_ROI_Inten:
times_roi, mean_int_sets = cal_each_ring_mean_intensityc(FD, roi_mask, timeperframe = None, multi_cor=True )
plot_each_ring_mean_intensityc( times_roi, mean_int_sets, uid = uidstr, save=True, path=data_dir )
roi_avg = np.average( mean_int_sets, axis=0)
"""
Explanation: Do a waterfall analysis
End of explanation
"""
if run_get_mass_center:
cx, cy = get_mass_center_one_roi(FD, roi_mask, roi_ind=25)
if run_get_mass_center:
fig,ax=plt.subplots(2)
plot1D( cx, m='o', c='b',ax=ax[0], legend='mass center-refl_X',
ylim=[940, 960], ylabel='posX (pixel)')
plot1D( cy, m='s', c='r',ax=ax[1], legend='mass center-refl_Y',
ylim=[1540, 1544], xlabel='frames',ylabel='posY (pixel)')
"""
Explanation: Analysis for mass center of reflective beam center
End of explanation
"""
define_good_series = False
#define_good_series = True
if define_good_series:
good_start = 200
FD = Multifile(filename, beg = good_start, end = 600) #end=1000)
uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end)
print( uid_ )
if use_sqnorm:#for transmision SAXS
norm = get_pixelist_interp_iq( qp_saxs, iq_saxs, roi_mask, center)
print('Using circular average in the normalization of G2 for SAXS scattering.')
elif use_SG:#for Gi-SAXS or WAXS
avg_imgf = sgolay2d( avg_img, window_size= 11, order= 5) * mask
norm=np.ravel(avg_imgf)[pixelist]
print('Using smoothed image by SavitzkyGolay filter in the normalization of G2.')
else:
norm= None
print('Using simple (average) normalization of G2.')
if use_imgsum_norm:
imgsum_ = imgsum
print('Using frame total intensity for intensity normalization in g2 calculation.')
else:
imgsum_ = None
import time
if run_one_time:
t0 = time.time()
if cal_g2_error:
g2,lag_steps,g2_err = cal_g2p(FD,roi_mask,bad_frame_list,good_start, num_buf = 8,
num_lev= None,imgsum= imgsum_, norm=norm, cal_error= True )
else:
g2,lag_steps = cal_g2p(FD,roi_mask,bad_frame_list,good_start, num_buf = 8,
num_lev= None,imgsum= imgsum_, norm=norm, cal_error= False )
run_time(t0)
#g2_err.shape, g2.shape
lag_steps = lag_steps[:g2.shape[0]]
g2.shape[1]
if run_one_time:
taus = lag_steps * timeperframe
try:
g2_pds = save_g2_general( g2, taus=taus,qr= np.array( list( qval_dict.values() ) )[:g2.shape[1],0],
qz = np.array( list( qval_dict.values() ) )[:g2.shape[1],1],
uid=uid_+'_g2.csv', path= data_dir, return_res=True )
except:
g2_pds = save_g2_general( g2, taus=taus,qr= np.array( list( qval_dict.values() ) )[:g2.shape[1],0],
uid=uid_+'_'+q_mask_name+'_g2.csv', path= data_dir, return_res=True )
if cal_g2_error:
try:
g2_err_pds = save_g2_general( g2_err, taus=taus,qr= np.array( list( qval_dict.values() ) )[:g2.shape[1],0],
qz = np.array( list( qval_dict.values() ) )[:g2.shape[1],1],
uid=uid_+'_g2_err.csv', path= data_dir, return_res=True )
except:
g2_err_pds = save_g2_general( g2_err, taus=taus,qr= np.array( list( qval_dict.values() ) )[:g2.shape[1],0],
uid=uid_+'_'+q_mask_name+'_g2_err.csv', path= data_dir, return_res=True )
#g2.shape
"""
Explanation: One time Correlation
Note : Enter the number of buffers for Muliti tau one time correlation
number of buffers has to be even. More details in https://github.com/scikit-beam/scikit-beam/blob/master/skbeam/core/correlation.py
if define another good_series
End of explanation
"""
if run_one_time:
g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus,
function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None,
fit_variables={'baseline':False, 'beta': True, 'alpha':True,'relaxation_rate':True,},
guess_values={'baseline':1.0,'beta': 0.03,'alpha':1.0,'relaxation_rate':0.0005},
guess_limits = dict( baseline =[.9, 1.3], alpha=[0, 2],
beta = [0, 1], relaxation_rate= [1e-7, 1000]) ,)
g2_fit_paras = save_g2_fit_para_tocsv(g2_fit_result, filename= uid_ +'_'+q_mask_name +'_g2_fit_paras.csv', path=data_dir )
scat_geometry_
if run_one_time:
if cal_g2_error:
g2_fit_err = np.zeros_like(g2_fit)
plot_g2_general( g2_dict={1:g2, 2:g2_fit}, taus_dict={1:taus, 2:taus_fit},
vlim=[0.95, 1.05], g2_err_dict= {1:g2_err, 2: g2_fit_err},
qval_dict = dict(itertools.islice(qval_dict.items(),g2.shape[1])), fit_res= g2_fit_result, geometry= scat_geometry_,filename= uid_+'_g2',
path= data_dir, function= fit_g2_func, ylabel='g2', append_name= '_fit')
else:
plot_g2_general( g2_dict={1:g2, 2:g2_fit}, taus_dict={1:taus, 2:taus_fit}, vlim=[0.95, 1.05],
qval_dict = dict(itertools.islice(qval_dict.items(),g2.shape[1])), fit_res= g2_fit_result, geometry= scat_geometry_,filename= uid_+'_g2',
path= data_dir, function= fit_g2_func, ylabel='g2', append_name= '_fit')
if run_one_time:
if True:
fs, fe = 0, 8
#fs,fe=0, 6
qval_dict_ = {k:qval_dict[k] for k in list(qval_dict.keys())[fs:fe] }
D0, qrate_fit_res = get_q_rate_fit_general( qval_dict_, g2_fit_paras['relaxation_rate'][fs:fe],
geometry= scat_geometry_ )
plot_q_rate_fit_general( qval_dict_, g2_fit_paras['relaxation_rate'][fs:fe], qrate_fit_res,
geometry= scat_geometry_,uid=uid_ , path= data_dir )
else:
D0, qrate_fit_res = get_q_rate_fit_general( qval_dict, g2_fit_paras['relaxation_rate'],
fit_range=[0, 26], geometry= scat_geometry_ )
plot_q_rate_fit_general( qval_dict, g2_fit_paras['relaxation_rate'], qrate_fit_res,
geometry= scat_geometry_,uid=uid_ ,
show_fit=False, path= data_dir, plot_all_range=False)
#plot1D( x= qr, y=g2_fit_paras['beta'], ls='-', m = 'o', c='b', ylabel=r'$\beta$', xlabel=r'$Q( \AA^{-1} ) $' )
"""
Explanation: Fit g2
End of explanation
"""
define_good_series = False
#define_good_series = True
if define_good_series:
good_start = 5
FD = Multifile(filename, beg = good_start, end = 1000)
uid_ = uidstr + '_fra_%s_%s'%(FD.beg, FD.end)
print( uid_ )
data_pixel = None
if run_two_time:
data_pixel = Get_Pixel_Arrayc( FD, pixelist, norm= norm ).get_data()
import time
t0=time.time()
g12b=None
if run_two_time:
g12b = auto_two_Arrayc( data_pixel, roi_mask, index = None )
if run_dose:
np.save( data_dir + 'uid=%s_g12b'%uid, g12b)
run_time( t0 )
if run_two_time:
show_C12(g12b, q_ind= 2, qlabel=dict(itertools.islice(qval_dict.items(),g2.shape[1])),N1= FD.beg,logs=False, N2=min( FD.end,10000), vmin= 1.0, vmax=1.18,timeperframe=timeperframe,save=True, path= data_dir, uid = uid_ ,cmap=plt.cm.jet)#cmap=cmap_albula)
multi_tau_steps = True
if run_two_time:
if lag_steps is None:
num_bufs=8
noframes = FD.end - FD.beg
num_levels = int(np.log( noframes/(num_bufs-1))/np.log(2) +1) +1
tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs)
max_taus= lag_steps.max()
#max_taus= lag_steps.max()
max_taus = Nimg
t0=time.time()
#tausb = np.arange( g2b.shape[0])[:max_taus] *timeperframe
if multi_tau_steps:
lag_steps_ = lag_steps[ lag_steps <= g12b.shape[0] ]
g2b = get_one_time_from_two_time(g12b)[lag_steps_]
tausb = lag_steps_ *timeperframe
else:
tausb = (np.arange( g12b.shape[0]) *timeperframe)[:-200]
g2b = (get_one_time_from_two_time(g12b))[:-200]
run_time(t0)
g2b_pds = save_g2_general( g2b, taus=tausb, qr= np.array( list( qval_dict.values() ) )[:g2.shape[1],0],
qz=None, uid=uid_+'_'+q_mask_name+'_g2b.csv', path= data_dir, return_res=True )
if run_two_time:
g2b_fit_result, tausb_fit, g2b_fit = get_g2_fit_general( g2b, tausb,
function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None,
fit_variables={'baseline':False, 'beta': True, 'alpha':True,'relaxation_rate':True},
guess_values={'baseline':1.0,'beta': 0.15,'alpha':1.0,'relaxation_rate':1e-3,},
guess_limits = dict( baseline =[1, 1.8], alpha=[0, 2],
beta = [0, 1], relaxation_rate= [1e-8, 5000]) )
g2b_fit_paras = save_g2_fit_para_tocsv(g2b_fit_result, filename= uid_ +'_'+q_mask_name+'_g2b_fit_paras.csv', path=data_dir )
#plot1D( x = tausb[1:], y =g2b[1:,0], ylim=[0.95, 1.46], xlim = [0.0001, 10], m='', c='r', ls = '-',
# logx=True, title='one_time_corelation', xlabel = r"$\tau $ $(s)$", )
if run_two_time:
plot_g2_general( g2_dict={1:g2b, 2:g2b_fit}, taus_dict={1:tausb, 2:tausb_fit}, vlim=[0.95, 1.05],
qval_dict=dict(itertools.islice(qval_dict.items(),g2.shape[1])), fit_res= g2b_fit_result, geometry=scat_geometry_,filename=uid_+'_g2',
path= data_dir, function= fit_g2_func, ylabel='g2', append_name= '_b_fit')
if run_two_time:
D0b, qrate_fit_resb = get_q_rate_fit_general( dict(itertools.islice(qval_dict.items(),g2.shape[1])), g2b_fit_paras['relaxation_rate'],
fit_range=[0, 10], geometry= scat_geometry_ )
#qval_dict, g2b_fit_paras['relaxation_rate']
if run_two_time:
if True:
fs, fe = 0,8
#fs, fe = 0,12
qval_dict_ = {k:qval_dict[k] for k in list(qval_dict.keys())[fs:fe] }
D0b, qrate_fit_resb = get_q_rate_fit_general( qval_dict_, g2b_fit_paras['relaxation_rate'][fs:fe], geometry= scat_geometry_ )
plot_q_rate_fit_general( qval_dict_, g2b_fit_paras['relaxation_rate'][fs:fe], qrate_fit_resb,
geometry= scat_geometry_,uid=uid_ +'_two_time' , path= data_dir )
else:
D0b, qrate_fit_resb = get_q_rate_fit_general( qval_dict, g2b_fit_paras['relaxation_rate'],
fit_range=[0, 10], geometry= scat_geometry_ )
plot_q_rate_fit_general( qval_dict, g2b_fit_paras['relaxation_rate'], qrate_fit_resb,
geometry= scat_geometry_,uid=uid_ +'_two_time', show_fit=False,path= data_dir, plot_all_range= True )
if run_two_time and run_one_time:
plot_g2_general( g2_dict={1:g2, 2:g2b}, taus_dict={1:taus, 2:tausb},vlim=[0.99, 1.007],
qval_dict=dict(itertools.islice(qval_dict.items(),g2.shape[1])), g2_labels=['from_one_time', 'from_two_time'],
geometry=scat_geometry_,filename=uid_+'_g2_two_g2', path= data_dir, ylabel='g2', )
"""
Explanation: For two-time
End of explanation
"""
#run_dose = True
if run_dose:
get_two_time_mulit_uids( [uid], roi_mask, norm= norm, bin_frame_number=1,
path= data_dir0, force_generate=False, compress_path = cmp_path + '/' )
try:
print( md['transmission'] )
except:
md['transmission'] =1
exposuretime
if run_dose:
N = len(imgs)
print(N)
#exposure_dose = md['transmission'] * exposuretime* np.int_([ N/16, N/8, N/4 ,N/2, 3*N/4, N*0.99 ])
exposure_dose = md['transmission'] * exposuretime* np.int_([ N/8, N/4 ,N/2, 3*N/4, N*0.99 ])
print( exposure_dose )
if run_dose:
taus_uids, g2_uids = get_series_one_time_mulit_uids( [ uid ], qval_dict, good_start=good_start,
path= data_dir0, exposure_dose = exposure_dose, num_bufs =8, save_g2= False,
dead_time = 0, trans = [ md['transmission'] ] )
if run_dose:
plot_dose_g2( taus_uids, g2_uids, ylim=[1.0, 1.2], vshift= 0.00,
qval_dict = qval_dict, fit_res= None, geometry= scat_geometry_,
filename= '%s_dose_analysis'%uid_,
path= data_dir, function= None, ylabel='g2_Dose', g2_labels= None, append_name= '' )
if run_dose:
qth_interest = 1
plot_dose_g2( taus_uids, g2_uids, qth_interest= qth_interest, ylim=[0.98, 1.2], vshift= 0.00,
qval_dict = qval_dict, fit_res= None, geometry= scat_geometry_,
filename= '%s_dose_analysis'%uidstr,
path= data_dir, function= None, ylabel='g2_Dose', g2_labels= None, append_name= '' )
0.33/0.00134
"""
Explanation: Run Dose dependent analysis
End of explanation
"""
if run_four_time:
t0=time.time()
g4 = get_four_time_from_two_time(g12b, g2=g2b)[:int(max_taus)]
run_time(t0)
if run_four_time:
taus4 = np.arange( g4.shape[0])*timeperframe
g4_pds = save_g2_general( g4, taus=taus4, qr=np.array( list( qval_dict.values() ) )[:,0],
qz=None, uid=uid_ +'_g4.csv', path= data_dir, return_res=True )
if run_four_time:
plot_g2_general( g2_dict={1:g4}, taus_dict={1:taus4},vlim=[0.95, 1.05], qval_dict=qval_dict, fit_res= None,
geometry=scat_geometry_,filename=uid_+'_g4',path= data_dir, ylabel='g4')
"""
Explanation: Four Time Correlation
End of explanation
"""
#run_xsvs =True
if run_xsvs:
max_cts = get_max_countc(FD, roi_mask )
#max_cts = 15 #for eiger 500 K
qind, pixelist = roi.extract_label_indices( roi_mask )
noqs = len( np.unique(qind) )
nopr = np.bincount(qind, minlength=(noqs+1))[1:]
#time_steps = np.array( utils.geometric_series(2, len(imgs) ) )
time_steps = [0,1] #only run the first two levels
num_times = len(time_steps)
times_xsvs = exposuretime + (2**( np.arange( len(time_steps) ) ) -1 ) * timeperframe
print( 'The max counts are: %s'%max_cts )
"""
Explanation: Speckle Visiblity
End of explanation
"""
if run_xsvs:
if roi_avg is None:
times_roi, mean_int_sets = cal_each_ring_mean_intensityc(FD, roi_mask, timeperframe = None, )
roi_avg = np.average( mean_int_sets, axis=0)
t0=time.time()
spec_bins, spec_his, spec_std, spec_sum = xsvsp( FD, np.int_(roi_mask), norm=None,
max_cts=int(max_cts+2), bad_images=bad_frame_list, only_two_levels=True )
spec_kmean = np.array( [roi_avg * 2**j for j in range( spec_his.shape[0] )] )
run_time(t0)
spec_pds = save_bin_his_std( spec_bins, spec_his, spec_std, filename=uid_+'_spec_res.csv', path=data_dir )
"""
Explanation: Do historam
End of explanation
"""
if run_xsvs:
ML_val, KL_val,K_ = get_xsvs_fit( spec_his, spec_sum, spec_kmean,
spec_std, max_bins=2, fit_range=[1,60], varyK= False )
#print( 'The observed average photon counts are: %s'%np.round(K_mean,4))
#print( 'The fitted average photon counts are: %s'%np.round(K_,4))
print( 'The difference sum of average photon counts between fit and data are: %s'%np.round(
abs(np.sum( spec_kmean[0,:] - K_ )),4))
print( '#'*30)
qth= 0
print( 'The fitted M for Qth= %s are: %s'%(qth, ML_val[qth]) )
print( K_[qth])
print( '#'*30)
"""
Explanation: Do historam fit by negtive binominal function with maximum likehood method
End of explanation
"""
if run_xsvs:
qr = [qval_dict[k][0] for k in list(qval_dict.keys()) ]
plot_xsvs_fit( spec_his, ML_val, KL_val, K_mean = spec_kmean, spec_std=spec_std,
xlim = [0,10], vlim =[.9, 1.1],
uid=uid_, qth= qth_interest, logy= True, times= times_xsvs, q_ring_center=qr, path=data_dir)
plot_xsvs_fit( spec_his, ML_val, KL_val, K_mean = spec_kmean, spec_std = spec_std,
xlim = [0,15], vlim =[.9, 1.1],
uid=uid_, qth= None, logy= True, times= times_xsvs, q_ring_center=qr, path=data_dir )
"""
Explanation: Plot fit results
End of explanation
"""
if run_xsvs:
contrast_factorL = get_contrast( ML_val)
spec_km_pds = save_KM( spec_kmean, KL_val, ML_val, qs=qr, level_time=times_xsvs, uid=uid_, path = data_dir )
#spec_km_pds
"""
Explanation: Get contrast
End of explanation
"""
if run_xsvs:
plot_g2_contrast( contrast_factorL, g2b, times_xsvs, tausb, qr,
vlim=[0.8,1.2], qth = qth_interest, uid=uid_,path = data_dir, legend_size=14)
plot_g2_contrast( contrast_factorL, g2b, times_xsvs, tausb, qr,
vlim=[0.8,1.2], qth = None, uid=uid_,path = data_dir, legend_size=4)
#from chxanalys.chx_libs import cmap_vge, cmap_albula, Javascript
"""
Explanation: Plot contrast with g2 results
End of explanation
"""
md['mask_file']= mask_path + mask_name
md['roi_mask_file']= fp
md['mask'] = mask
#md['NOTEBOOK_FULL_PATH'] = data_dir + get_current_pipeline_fullpath(NFP).split('/')[-1]
md['good_start'] = good_start
md['bad_frame_list'] = bad_frame_list
md['avg_img'] = avg_img
md['roi_mask'] = roi_mask
md['setup_pargs'] = setup_pargs
if scat_geometry == 'gi_saxs':
md['Qr'] = Qr
md['Qz'] = Qz
md['qval_dict'] = qval_dict
md['beam_center_x'] = inc_x0
md['beam_center_y']= inc_y0
md['beam_refl_center_x'] = refl_x0
md['beam_refl_center_y'] = refl_y0
elif scat_geometry == 'gi_waxs':
md['beam_center_x'] = center[1]
md['beam_center_y']= center[0]
else:
md['qr']= qr
#md['qr_edge'] = qr_edge
md['qval_dict'] = qval_dict
md['beam_center_x'] = center[1]
md['beam_center_y']= center[0]
md['beg'] = FD.beg
md['end'] = FD.end
md['t_print0'] = t_print0
md['qth_interest'] = qth_interest
md['metadata_file'] = data_dir + 'uid=%s_md.pkl'%uid
psave_obj( md, data_dir + 'uid=%s_md.pkl'%uid ) #save the setup parameters
save_dict_csv( md, data_dir + 'uid=%s_md.csv'%uid, 'w')
Exdt = {}
if scat_geometry == 'gi_saxs':
for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list', 'qr_1d_pds'],
[md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list, qr_1d_pds] ):
Exdt[ k ] = v
elif scat_geometry == 'saxs':
for k,v in zip( ['md', 'q_saxs', 'iq_saxs','iqst','qt','roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'],
[md, q_saxs, iq_saxs, iqst, qt,roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ):
Exdt[ k ] = v
elif scat_geometry == 'gi_waxs':
for k,v in zip( ['md', 'roi_mask','qval_dict','avg_img','mask','pixel_mask', 'imgsum', 'bad_frame_list'],
[md, roi_mask, qval_dict, avg_img,mask,pixel_mask, imgsum, bad_frame_list] ):
Exdt[ k ] = v
if run_waterfall:Exdt['wat'] = wat
if run_t_ROI_Inten:Exdt['times_roi'] = times_roi;Exdt['mean_int_sets']=mean_int_sets
if run_one_time:
if run_invariant_analysis:
for k,v in zip( ['taus','g2','g2_fit_paras', 'time_stamp','invariant'], [taus,g2,g2_fit_paras,time_stamp,invariant] ):Exdt[ k ] = v
else:
for k,v in zip( ['taus','g2','g2_fit_paras' ], [taus,g2,g2_fit_paras ] ):Exdt[ k ] = v
if run_two_time:
for k,v in zip( ['tausb','g2b','g2b_fit_paras', 'g12b'], [tausb,g2b,g2b_fit_paras,g12b] ):Exdt[ k ] = v
#for k,v in zip( ['tausb','g2b','g2b_fit_paras', ], [tausb,g2b,g2b_fit_paras] ):Exdt[ k ] = v
if run_dose:
for k,v in zip( [ 'taus_uids', 'g2_uids' ], [taus_uids, g2_uids] ):Exdt[ k ] = v
if run_four_time:
for k,v in zip( ['taus4','g4'], [taus4,g4] ):Exdt[ k ] = v
if run_xsvs:
for k,v in zip( ['spec_kmean','spec_pds','times_xsvs','spec_km_pds','contrast_factorL'],
[ spec_kmean,spec_pds,times_xsvs,spec_km_pds,contrast_factorL] ):Exdt[ k ] = v
#%run chxanalys_link/chxanalys/Create_Report.py
export_xpcs_results_to_h5( 'uid=%s_%s_Res.h5'%(md['uid'],q_mask_name), data_dir, export_dict = Exdt )
#extract_dict = extract_xpcs_results_from_h5( filename = 'uid=%s_Res.h5'%md['uid'], import_dir = data_dir )
#g2npy_filename = data_dir + '/' + 'uid=%s_g12b.npy'%uid
#print(g2npy_filename)
#if os.path.exists( g2npy_filename):
# print('Will delete this file=%s.'%g2npy_filename)
# os.remove( g2npy_filename )
#extract_dict = extract_xpcs_results_from_h5( filename = 'uid=%s_Res.h5'%md['uid'], import_dir = data_dir )
#extract_dict = extract_xpcs_results_from_h5( filename = 'uid=%s_Res.h5'%md['uid'], import_dir = data_dir )
"""
Explanation: Export Results to a HDF5 File
End of explanation
"""
pdf_out_dir = os.path.join('/XF11ID/analysis/', CYCLE, username, 'Results/')
pdf_filename = "XPCS_Analysis_Report2_for_uid=%s%s%s.pdf"%(uid,pdf_version,q_mask_name)
if run_xsvs:
pdf_filename = "XPCS_XSVS_Analysis_Report_for_uid=%s%s%s.pdf"%(uid,pdf_version,q_mask_name)
#%run /home/yuzhang/chxanalys_link/chxanalys/Create_Report.py
data_dir
make_pdf_report( data_dir, uid, pdf_out_dir, pdf_filename, username,
run_fit_form,run_one_time, run_two_time, run_four_time, run_xsvs, run_dose,
report_type= scat_geometry, report_invariant= run_invariant_analysis,
md = md )
"""
Explanation: Creat PDF Report
End of explanation
"""
#%run /home/yuzhang/chxanalys_link/chxanalys/chx_olog.py
if att_pdf_report:
os.environ['HTTPS_PROXY'] = 'https://proxy:8888'
os.environ['no_proxy'] = 'cs.nsls2.local,localhost,127.0.0.1'
update_olog_uid_with_file( uid[:6], text='Add XPCS Analysis PDF Report',
filename=pdf_out_dir + pdf_filename, append_name='_R1' )
"""
Explanation: Attach the PDF report to Olog
End of explanation
"""
if save_oavs:
os.environ['HTTPS_PROXY'] = 'https://proxy:8888'
os.environ['no_proxy'] = 'cs.nsls2.local,localhost,127.0.0.1'
update_olog_uid_with_file( uid[:6], text='Add OVA images',
filename= data_dir + 'uid=%s_OVA_images.png'%uid, append_name='_img' )
# except:
"""
Explanation: Save the OVA image
End of explanation
"""
uid
"""
Explanation: The End!
End of explanation
"""
#save_current_pipeline( NFP, data_dir)
#get_current_pipeline_fullpath(NFP)
"""
Explanation: Save the current pipeline in Results folder
End of explanation
"""
|
JohannesEH/time-series-analysis | Fremont Bridge Analysis.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt;
from jubiiworkflow.data import get_data
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.mixture import GaussianMixture
plt.style.use('seaborn');
"""
Explanation: Analysis of Seattle Fremont Bridge Bike Traffic
End of explanation
"""
data = get_data()
p = data.resample('W').sum().plot()
p.set_ylim(0, None);
"""
Explanation: Get Data
End of explanation
"""
pivoted = data.pivot_table('Total', index=data.index.time, columns=data.index.date)
pivoted.plot(legend=False, alpha=0.01);
"""
Explanation: Shows a graph of data on a weekly basis. Let's investigate what the pattern is when we look at hourly rates on individual days...
Pivot
Plot a hourly traffic rates for all days in data.
End of explanation
"""
X = pivoted.fillna(0).T.values
X.shape
X2 = PCA(2, svd_solver='full').fit_transform(X)
X2.shape
plt.scatter(X2[:, 0], X2[:, 1]);
"""
Explanation: We can see two types of lines in this graph... One type with two peaks, and another type that have a peak in the middle of the day. We hypothesize that this is a difference between weekdays and weekends. Let's investigate further.
Principal Component Analysis
End of explanation
"""
gmm = GaussianMixture(2).fit(X)
labels = gmm.predict(X)
plt.scatter(X2[:, 0], X2[:, 1], c=labels, cmap='rainbow')
plt.colorbar();
fig, ax = plt.subplots(1, 2, figsize=(14, 6))
pivoted.T[labels == 0].T.plot(legend=False, alpha=0.1, ax=ax[0]);
pivoted.T[labels == 1].T.plot(legend=False, alpha=0.1, ax=ax[1]);
ax[0].set_title('Purple Cluster')
ax[1].set_title('Red Cluster');
"""
Explanation: Unsupervised Clustering
End of explanation
"""
dayofweek = pd.DatetimeIndex(pivoted.columns).dayofweek
plt.scatter(X2[:, 0], X2[:, 1], c=dayofweek, cmap='rainbow')
plt.colorbar();
"""
Explanation: Comparing with Day of Week
End of explanation
"""
dates = pd.DatetimeIndex(pivoted.columns)
dates[(labels == 0) & (dayofweek < 5)]
"""
Explanation: Analyzing Outliers
The following points are weekdays in the "weekend" cluster
End of explanation
"""
|
ozorich/phys202-2015-work | assignments/assignment05/InteractEx03.ipynb | mit | %matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
"""
Explanation: Interact Exercise 3
Imports
End of explanation
"""
def soliton(x, t, c, a):
"""Return phi(x, t) for a soliton wave with constants c and a."""
solt=0.5*c*(1/np.cosh(0.5*(c**.5)*(x-c*t-a))**2)
return np.array(solt)
x=np.array([1,2,3,4,5])
t=np.array([6,7,8,9,10])
soliton(x,t,1,2)
assert np.allclose(soliton(np.array([0]),0.0,1.0,0.0), np.array([0.5]))
"""
Explanation: Using interact for animation with data
A soliton is a constant velocity wave that maintains its shape as it propagates. They arise from non-linear wave equations, such has the Korteweg–de Vries equation, which has the following analytical solution:
$$
\phi(x,t) = \frac{1}{2} c \mathrm{sech}^2 \left[ \frac{\sqrt{c}}{2} \left(x - ct - a \right) \right]
$$
The constant c is the velocity and the constant a is the initial location of the soliton.
Define soliton(x, t, c, a) function that computes the value of the soliton wave for the given arguments. Your function should work when the postion x or t are NumPy arrays, in which case it should return a NumPy array itself.
End of explanation
"""
tmin = 0.0
tmax = 10.0
tpoints = 100
t = np.linspace(tmin, tmax, tpoints)
xmin = 0.0
xmax = 10.0
xpoints = 200
x = np.linspace(xmin, xmax, xpoints)
c = 1.0
a = 0.0
"""
Explanation: To create an animation of a soliton propagating in time, we are going to precompute the soliton data and store it in a 2d array. To set this up, we create the following variables and arrays:
End of explanation
"""
phi=np.ndarray(shape=(xpoints,tpoints), dtype=float) #collaberated with Jack Porter
for i in x:
for j in t:
phi[i,j]=soliton(x[i],t[j],c,a)
phi
assert phi.shape==(xpoints, tpoints)
assert phi.ndim==2
assert phi.dtype==np.dtype(float)
assert phi[0,0]==soliton(x[0],t[0],c,a)
"""
Explanation: Compute a 2d NumPy array called phi:
It should have a dtype of float.
It should have a shape of (xpoints, tpoints).
phi[i,j] should contain the value $\phi(x[i],t[j])$.
End of explanation
"""
def plot_soliton_data(i=0):
"""Plot the soliton data at t[i] versus x."""
plt.plot(soliton(x,t[i],c,a))
plot_soliton_data(0)
assert True # leave this for grading the plot_soliton_data function
"""
Explanation: Write a plot_soliton_data(i) function that plots the soliton wave $\phi(x, t[i])$. Customize your plot to make it effective and beautiful.
End of explanation
"""
interact(plot_soliton_data,i=(0,100,10))
assert True # leave this for grading the interact with plot_soliton_data cell
"""
Explanation: Use interact to animate the plot_soliton_data function versus time.
End of explanation
"""
|
materialsvirtuallab/matgenb | notebooks/2017-03-02-Getting data from Materials Project.ipynb | bsd-3-clause | from pymatgen.ext.matproj import MPRester
from pymatgen.core import Composition
import re
import pprint
# Make sure that you have the Materials API key. Put the key in the call to
# MPRester if needed, e.g, MPRester("MY_API_KEY")
mpr = MPRester()
"""
Explanation: Introduction
This notebook demonstrates how you can obtain various data from the Materials Project using pymatgen's interface to the Materials API.
End of explanation
"""
comp = Composition("Fe2O3")
anon_formula = comp.anonymized_formula
# We need to convert the formula to the dict form used in the database.
anon_formula = {m.group(1): int(m.group(2))
for m in re.finditer(r"([A-Z]+)(\d+)", anon_formula)}
data = mpr.query({"anonymous_formula": anon_formula},
properties=["task_id", "pretty_formula", "structure"])
print(len(data)) #Should show ~600 data.
# data now contains a list of dict. This shows you what each dict has.
# Note that the mp id is named "task_id" in the database itself.
pprint.pprint(data[0])
"""
Explanation: Getting structures with material ids
Let's say you want to find all structures with similar stoichiometry to Fe2O3.
End of explanation
"""
bs = mpr.get_bandstructure_by_material_id("mp-20470")
from pymatgen.electronic_structure.plotter import BSPlotter
%matplotlib inline
plotter = BSPlotter(bs)
plotter.show()
"""
Explanation: Getting band structures
Band structures are fairly large objects. It is not recommended that you download large quantities of bandstructures in one shot, but rather just download the ones you need.
End of explanation
"""
elastic_data = mpr.query({"elasticity": {"$exists": True}},
properties=["task_id", "pretty_formula", "elasticity"])
print(len(elastic_data))
pprint.pprint(elastic_data[0])
"""
Explanation: Getting elastic constants
We have 5000 elastic constants and growing. You can easily get all the elastic data with materials ids as follows.
End of explanation
"""
from pymatgen.analysis.structure_matcher import StructureMatcher
m = StructureMatcher() # You can customize tolerances etc., but the defaults usually work fine.
s1 = data[0]["structure"]
print(s1)
s2 = s1.copy()
s2.apply_strain(0.1)
print(s2)
print(m.fit(s1, s2))
"""
Explanation: More resources
In general, almost any data can be obtained from MP using the MPRester, either via the high-level functions or the very powerful "query" method.
For more complex queries, you can refer to the documentation for the Materials API at https://github.com/materialsproject/mapidoc.
Fitting structures
Pymatgen has its own structure matching algorithm, which we have used to effectively reduce the 130,000 structures in ICSD to ~60,000 - 70,000 structures. It is fast and accurate. Here's an example of how it works.
End of explanation
"""
matches = []
for d in data:
if m.fit_anonymous(d["structure"], s1):
matches.append(d)
# The above fitting took a few seconds. We have 32 similar structures.
print(len(matches))
# Let's see a few of the matches.
pprint.pprint(matches[0])
pprint.pprint(matches[1])
pprint.pprint(matches[2])
"""
Explanation: For something more challenging, let's see how many structures are similar to Gd2O3
End of explanation
"""
|
besser82/shogun | doc/ipython-notebooks/ica/bss_image.ipynb | bsd-3-clause | # change to the shogun-data directory
import os
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
os.chdir(os.path.join(SHOGUN_DATA_DIR, 'ica'))
from PIL import Image
import numpy as np
# Load Images as grayscale images and convert to numpy arrays
s1 = np.asarray(Image.open("lena.jpg").convert('L'))
s2 = np.asarray(Image.open("monalisa.jpg").convert('L'))
# Save Image Dimensions
# we'll need these later for reshaping the images
rows = s1.shape[0]
cols = s1.shape[1]
"""
Explanation: Blind Source Separation on Images with Shogun
by Kevin Hughes
This notebook illustrates <a href="http://en.wikipedia.org/wiki/Blind_signal_separation">Blind Source Seperation</a>(BSS) on images using <a href="http://en.wikipedia.org/wiki/Independent_component_analysis">Independent Component Analysis</a> (ICA) in Shogun. This is very similar to the <a href="http://www.shogun-toolbox.org/static/notebook/current/bss_audio.html">BSS audio notebook</a> except that here we have used images instead of audio signals.
The first step is to load 2 images from the Shogun data repository:
End of explanation
"""
%matplotlib inline
import pylab as pl
# Show Images
f,(ax1,ax2) = pl.subplots(1,2)
ax1.imshow(s1, cmap=pl.gray()) # set the color map to gray, only needs to be done once!
ax2.imshow(s2)
"""
Explanation: Displaying the images using pylab:
End of explanation
"""
# Convert Images to row vectors
# and stack into a Data Matrix
S = np.c_[s1.flatten(), s2.flatten()].T
"""
Explanation: In our previous ICA examples the input data or source signals were already 1D but these images are obviously 2D. One common way to handle this case is to simply "flatten" the 2D image matrix into a 1D row vector. The same idea can also be applied to 3D data, for example a 3 channel RGB image can be converted a row vector by reshaping each 2D channel into a row vector and then placing them after each other length wise.
Lets prep the data:
End of explanation
"""
# Mixing Matrix
A = np.array([[1, 0.5], [0.5, 1]])
# Mix Signals
X = np.dot(A,S)
# Show Images
f,(ax1,ax2) = pl.subplots(1,2)
ax1.imshow(X[0,:].reshape(rows,cols))
ax2.imshow(X[1,:].reshape(rows,cols))
"""
Explanation: It is pretty easy using a nice library like numpy.
Next we need to mix our source signals together. We do this exactly the same way we handled the audio data - take a look!
End of explanation
"""
import shogun as sg
mixed_signals = sg.features(X)
# Separating
jade = sg.transformer('Jade')
jade.fit(mixed_signals)
signals = jade.transform(mixed_signals)
S_ = signals.get('feature_matrix')
# Show Images
f,(ax1,ax2) = pl.subplots(1,2)
ax1.imshow(S_[0,:].reshape(rows,cols) *-1)
ax2.imshow(S_[1,:].reshape(rows,cols))
"""
Explanation: Notice how we had to reshape from a 1D row vector back into a 2D matrix of the correct shape. There is also another nuance that I would like to mention here: pylab is actually doing quite a lot for us here that you might not be aware of. It does a pretty good job determining the value range of the image to be shown and then it applies the color map. Many other libraries (for example OpenCV's highgui) won't be this helpful and you'll need to remember to scale the image appropriately on your own before trying to display it.
Now onto the exciting step, unmixing the images using ICA! Again this step is the same as when using Audio data. Again we need to reshape the images before viewing them and an additional nuance was to add the *-1 to the first separated signal. I did this after viewing the result the first time as the image was clearly inversed, this can happen because ICA can't necessarily capture the correct phase.
End of explanation
"""
|
amitkaps/machine-learning | time_series/4-Explore.ipynb | mit | # Import the library we need, which is Pandas
import pandas as pd
"""
Explanation: 4. Explore the Data
"I don't know, what I don't know"
We want to first visually explore the data to see if we can confirm some of our initial hypotheses as well as make new hypothesis about the problem we are trying to solve.
For this we will start by loading the data and understanding the data structure of the dataframe we have.
Lets read the data
End of explanation
"""
# Read the csv file of Monthwise Quantity and Price csv file we have.
df = pd.read_csv('MonthWiseMarketArrivals_clean.csv')
"""
Explanation: You will find the variable df used quite often to store a dataframe
End of explanation
"""
df.shape
df.head()
"""
Explanation: Understand Data Structure and Types
End of explanation
"""
# Get the typeof each column
df.dtypes
"""
Explanation: Data Structure
So we have ten columns in our dataset. Let us understand what each one is.
Three are about the location of the Wholesale Market where Onion where sold.
- state: This is the 2/3 letter abbreviation for the state in India (PB is Punjab and so on)
- city: This is the city in India (ABOHAR, BANGALORE and so on)
- market: This is a string with the combination of the state and city
Three are related to the
- month: Month in January, February and so on.
- year: Year in YYYY representastion
- date: The combination of the two above.
Four are about quantity and price in these wholesale market.
- quantity: The quanity of Onion arriving in the market in that month in quintals (100 kg)
- priceMin: The minimum price in the month in Rs./quintal
- priceMax: The maximum price in the month in Rs./quintal
- priceMod: The modal price in the month in Rs./quintal
We would expect the following the columns to be of the following type
- CATEGORICAL: state, city, market
- TIME INTERVAL: month, year, date
- QUANTITATIVE: quantity, priceMin, priceMax, priceModal
Let us see what pandas dataframe is reading these columns as.
End of explanation
"""
# Changing the date column to a Time Interval columnn
df.date = pd.DatetimeIndex(df.date)
df.shape
# Now checking for type of each column
df.dtypes
# Let us see the dataframe again now
df.head()
# df.city.unique()
# Functional Approach
pd.unique(df.city)
"""
Explanation: So we are getting the quantitive columns are correctly being shown as integers and the categorical columns are showing as objects(strings) which is fine.
However, the date columns is being read as an object and not a Time-Interval. Let us at least fix the date column and make it into a datetime object
End of explanation
"""
df2010 = df[df.year == 2010]
df2010.head()
# We can also subset on multiple criterias
df2010Bang = df[(df.year == 2010) & (df.city == 'BANGALORE')]
df2010Bang.head()
"""
Explanation: Question 1 - How big is the Bangalore onion market compared to other cities in India?
Let us try to do this examination for one of the year only. So we want to reduce our dataframe for only where the year = 2010. This process is called subset.
PRINCIPLE: Subset a Dataframe
How do you subset a dataframe on a given criteria
newDataframe = df[ <subset condition> ]
End of explanation
"""
# Group by using city
df2010City = df2010.groupby(['city']).sum()
df2010City.head()
type(df2010City)
# If we only want to apply the sum function on quantity, then we specify the quantity column
df2010City = df2010.groupby(['city']).quantity.sum()
# Let us see this dataframe
df2010City.head()
type(df2010City)
# To create a dataframe again, it is best to specify index as false
df2010City = df2010.groupby(['city'], as_index=False).quantity.sum()
df2010City.head()
sorted(df2010City.quantity)
# Sort the Dataframe by Quantity to see which one is on top
df2010City = df2010City.sort_values(by = "quantity", ascending = False)
df2010City.head()
%timeit sorted(df2010City.quantity)
%timeit df2010City.quantity.sort_values()
%timeit df2010City.sort_values(by = "quantity", ascending = False)
"""
Explanation: Principle: Split Apply Combine
How do we get the sum of quantity for each city.
We need to SPLIT the data by each city, APPLY the sum to the quantity row and then COMBINE the data again
In pandas, we use the groupby function to do this.
End of explanation
"""
# Load the visualisation libraries - Matplotlib
import matplotlib.pyplot as plt
# Let us see the output plots in the notebook itself
%matplotlib inline
# Set some parameters to get good visuals - style to ggplot and size to 15,10
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (15, 10)
# Plot the Data
df2010City.plot(kind ="barh", x = 'city', y = 'quantity')
"""
Explanation: PRINCIPLE: Visual Exploration
Lets load the libraries required for plotting in python
End of explanation
"""
df2015 = df[]
"""
Explanation: Exercise: Find the State with Highest Quantity Sales in 2015?
End of explanation
"""
df.head()
dfBang = df[df.city == 'BANGALORE']
dfBang.head()
dfBang.describe()
# Reduce the precision of numbers - so that it is easy to read
pd.set_option('precision', 0)
dfBang.describe()
"""
Explanation: Exercise: Plot the State and Cities with Quantity Sales in 2015
Question 2 - Have the price variation in Onion prices in Bangalore really gone up over the years?
End of explanation
"""
dfBang.head()
dfBang.index
# Set the index as date
dfBang = dfBang.sort_values(by = "date")
dfBang.head()
# Set the Index for the Dataframe
dfBang.index = pd.PeriodIndex(dfBang.date, freq='M')
dfBang.head()
dfBang.priceMod.plot()
dfBang.plot(kind = "line", y = ['priceMin', 'priceMod', 'priceMax'])
"""
Explanation: PRINCIPLE: Setting Index
End of explanation
"""
dfBang['priceDiff'] = dfBang['priceMax'] - dfBang['priceMin']
dfBang.head()
dfBang.plot(kind = 'line', y = 'priceDiff')
"""
Explanation: To calculate the range of change, we will create a new price difference variable - which is the difference between the priceMin and priceMax
End of explanation
"""
# Create new variable for Integer Month
dfBang['monthVal'] = pd.DatetimeIndex(dfBang['date']).month
dfBang.head()
dfBangPivot = pd.pivot_table(dfBang, values = "priceDiff",
columns = "year", index = "monthVal")
dfBangPivot
dfBangPivot.plot()
dfBangPivot.plot(subplots = True, figsize=(15, 15), layout=(3, 5), sharey=True)
"""
Explanation: PRINCIPLE: Pivot Table
Pivot table is a way to summarize data frame data into index (rows), columns and value
End of explanation
"""
dfBangPivot.plot(subplots = True, figsize=(15, 15), layout=(3, 5),
sharey=False)
"""
Explanation: Exercise: Find the price variation for LASALGAON city?
End of explanation
"""
|
Caranarq/01_Dmine | Datasets/SEPOMEX/SEPOMEX.ipynb | gpl-3.0 | # Librerias utilizadas
import pandas as pd
import sys
import os
import csv
import urllib
# Descarga de archivos a carpeta local
fuente = r'https://github.com/redrbrt/sepomex-zip-codes/raw/master/sepomex_abril-2016.csv'
destino = r'D:\PCCS\00_RawData\01_CSV\SEPOMEX\sepomex_abril-2016.csv'
urllib.request.urlretrieve(fuente, destino)
# Importa el dataset a Python
dataset = pd.read_csv(destino)
dataset.head(3)
# Corregir longitud de claves
dataset['idEstado'] = dataset.idEstado.apply(lambda x: str(int(x)).zfill(2)) # Correccion a 2 digitos (00)
dataset['idMunicipio'] = dataset.idMunicipio.apply(lambda x: str(int(x)).zfill(3)) # Correccion a 3 digitos (000)
dataset['cp'] = dataset.cp.apply(lambda x: str(int(x)).zfill(5)) # Correccion a 5 digitos (00000)
dataset['CVE_MUN'] = dataset[['idEstado', 'idMunicipio']].apply(lambda x: ''.join(x), axis=1) # Crea CVE_MUN estandar
dataset = dataset.rename(columns={'cp': 'CP'})
dataset.head(3)
# Creo una copia de trabajo
SEPOMEX1 = dataset
"""
Explanation: Base de datos de códigos postales del Servicio Postal Mexicano
Introduccion
Tuve la fortuna de encontrar la base de datos en un repositorio de Github que alguien ya había minado previamente.
la URL al repositorio es https://github.com/redrbrt/sepomex-zip-codes
Revision al dataset
End of explanation
"""
columnasnointeresantes = ['idEstado', 'estado', 'idMunicipio', 'municipio', 'ciudad', 'zona', 'asentamiento', 'tipo']
for columna in columnasnointeresantes:
del(SEPOMEX1[columna])
orden = ['CP', 'CVE_MUN']
SEPOMEX1 = SEPOMEX1[orden]
SEPOMEX1.head(3)
"""
Explanation: Debido a que el estado y municipio pueden ser derivados desde la CVE_MUN de 5 dígitos, voy a eliminar las columnas:
* idEstado
* estado
* idMunicipio
* municipio
Tambien eliminaré otras columnas por diferentes motivos:
Ciudad, pues esta será asignada acorde a la Clave SUN
Zona, porque 'rural/urbano' no se utiliza para la construccion de parametros
Asentamiento, porque es un nivel de desagregación que no se utiliza
tipo, porque no se utiliza para la construccion de parametros
End of explanation
"""
print('[{}] El dataset inició con {} filas, incluyendo repetidas'.format(len(SEPOMEX1), len(SEPOMEX1)))
SEPOMEX1 = SEPOMEX1.drop_duplicates(keep='first', subset=['CP'])
print('[{}] Al quitar las repetidas, el dataset queda con {} filas'.format(len(SEPOMEX1), len(SEPOMEX1)))
SEPOMEX1.head()
# Guardar dataset como archivo excel
file = r'D:\PCCS\01_Dmine\Datasets\SEPOMEX\sepomex_CP_CVEMUN.xlsx'
writer = pd.ExcelWriter(file)
SEPOMEX1.to_excel(writer, sheet_name = 'DATOS')
writer.save()
print('---------------TERMINADO---------------')
"""
Explanation: De este dataset hay que eliminar renglones duplicados (como ejemplo, los renglones 1 y 2 impresos arriba), que existen porque la base de datos del SEPOMEX contiene un renglón individual cada colonia de las que integran un código postal.
End of explanation
"""
|
SunPower/pvfactors | docs/tutorials/Create_discretized_pvarray.ipynb | bsd-3-clause | # Import external libraries
import matplotlib.pyplot as plt
# Settings
%matplotlib inline
"""
Explanation: Discretize PV row sides and indexing
In this section, we will learn how to:
create a PV array with discretized PV row sides
understand the indices of the timeseries surfaces of a PV array
plot a PV array with indices shown on plot
Imports and settings
End of explanation
"""
pvarray_parameters = {
'n_pvrows': 3, # number of pv rows
'pvrow_height': 1, # height of pvrows (measured at center / torque tube)
'pvrow_width': 1, # width of pvrows
'axis_azimuth': 0., # azimuth angle of rotation axis
'surface_tilt': 20., # tilt of the pv rows
'surface_azimuth': 270., # azimuth of the pv rows front surface
'solar_zenith': 40., # solar zenith angle
'solar_azimuth': 150., # solar azimuth angle
'gcr': 0.5, # ground coverage ratio
}
"""
Explanation: Prepare PV array parameters
End of explanation
"""
discretization = {'cut':{
0: {'back': 5}, # discretize the back side of the leftmost PV row into 5 segments
1: {'front': 3} # discretize the front side of the center PV row into 3 segments
}}
pvarray_parameters.update(discretization)
"""
Explanation: Create discretization scheme
End of explanation
"""
from pvfactors.geometry import OrderedPVArray
# Create pv array
pvarray = OrderedPVArray.fit_from_dict_of_scalars(pvarray_parameters)
"""
Explanation: Create a PV array
Import the OrderedPVArray class and create a PV array object using the parameters above
End of explanation
"""
# Plot pvarray shapely geometries
f, ax = plt.subplots(figsize=(10, 3))
pvarray.plot_at_idx(0, ax)
plt.show()
"""
Explanation: Plot the PV array at index 0
End of explanation
"""
pvrow_left = pvarray.ts_pvrows[0]
n_segments = len(pvrow_left.back.list_segments)
print("Back side of leftmost PV row has {} segments".format(n_segments))
pvrow_center = pvarray.ts_pvrows[1]
n_segments = len(pvrow_center.front.list_segments)
print("Front side of center PV row has {} segments".format(n_segments))
"""
Explanation: As we can see, there is some discretization on the leftmost and the center PV rows.
We can check that it was correctly done using the pvarray object.
End of explanation
"""
# List some indices
ts_surface_list = pvrow_center.front.all_ts_surfaces
print("Indices of surfaces on front side of center PV row")
for ts_surface in ts_surface_list:
index = ts_surface.index
print("... surface index: {}".format(index))
"""
Explanation: Indexing the timeseries surfaces in a PV array
In order to perform some calculations on PV array surfaces, it is often important to index them.
pvfactors takes care of this.
We can for instance check the index of the timeseries surfaces on the front side of the center PV row
End of explanation
"""
for ts_surface in ts_surface_list:
index = ts_surface.index
shaded = ts_surface.shaded
length = ts_surface.length
print("Surface with index: '{}' has shading status '{}' and length {} m".format(index, shaded, length))
"""
Explanation: Intuitively, one could have expected only 3 timeseries surfaces because that's what the previous plot at index 0 was showing.
But it is important to understand that ALL timeseries surfaces are created at PV array fitting time, even the ones that don't exist for the given timestamps.
So in this example:
- we have 3 illuminated timeseries surfaces, which do exist at timestamp 0
- and 3 shaded timeseries surfaces, which do NOT exist at timestamp 0 (so they have zero length).
Let's check that.
End of explanation
"""
# Plot pvarray shapely geometries with surface indices
f, ax = plt.subplots(figsize=(10, 4))
pvarray.plot_at_idx(0, ax, with_surface_index=True)
ax.set_xlim(-3, 5)
plt.show()
"""
Explanation: As expected, all shaded timeseries surfaces on the front side of the PV row have length zero.
Plot PV array with indices
It is possible also to visualize the PV surface indices of all the non-zero surfaces when plotting a PV array, for a given timestamp (here at the first timestamp, so 0).
End of explanation
"""
|
AMICI-developer/AMICI | documentation/GettingStarted.ipynb | bsd-2-clause | import amici
sbml_importer = amici.SbmlImporter('model_steadystate_scaled.xml')
"""
Explanation: Getting Started in AMICI
This notebook is a brief tutorial for new users that explains the first steps necessary for model simulation in AMICI, including pointers to documentation and more advanced notebooks.
Model Compilation
Before simulations can be run, the model must be imported and compiled. In this process, AMICI performs all symbolic manipulations that later enable scalable simulations and efficient sensitivity computation. The first towards model compilation is the creation of an SbmlImporter instance, which requires an SBML Document that specifies the model using the Systems Biology Markup Language (SBML).
For the purpose of this tutorial, we will use model_steadystate_scaled.xml, which is contained in the same directory as this notebook.
End of explanation
"""
model_name = 'model_steadystate'
model_dir = 'model_dir'
sbml_importer.sbml2amici(model_name, model_dir)
"""
Explanation: Next, we will compile the model as python extension using the amici.SBMLImporter.sbml2amici method. The first two arguments of this method are the name of the model, which will also be the name of the generated python module, and the model directory, which defines the directory in which the model module will be placed. Compilation will take a couple of seconds.
End of explanation
"""
# load the model module
model_module = amici.import_model_module(model_name, model_dir)
# instantiate model
model = model_module.getModel()
# instantiate solver
solver = model.getSolver()
"""
Explanation: Loading the model module
To run simulations, we need to instantiate amici.Model and amici.Solver instances. As simulations requires instances matching the imported model, they have to be imported from the generated model module.
End of explanation
"""
model.setParameterByName('p1',1e-3)
"""
Explanation: The model allows the user to manipulate model related properties of simulations. This includes the values of model parameters that can be set by using amici.Model.setParameterByName. Here, we set the model parameter p1 to a value of 1e-3.
End of explanation
"""
solver.setAbsoluteTolerance(1e-10)
"""
Explanation: In contrast, the solver instance allows the specification of simulation related properties. This includes setting options for the SUNDIALS solver such as absolute tolerances via amici.Solver.setAbsoluteTolerance. Here we set the absolute integration tolerances to 1e-10.
End of explanation
"""
# set timepoints
model.setTimepoints([0,1])
rdata = amici.runAmiciSimulation(model, solver)
"""
Explanation: Running Model Simulations
Model simulations can be executed using the amici.runAmiciSimulations routine. By default the model does not not contain any timepoints for which the model is to be simulated. Here we define a simulation timecourse with two timepoints at 0 and 1 and then run the simulation.
End of explanation
"""
rdata.x
"""
Explanation: Simulation results are returned as ReturnData instance. The simulated SBML species are stored as x attribute, where rows correspond to the different timepoints and columns correspond to different species.
End of explanation
"""
model.getStateNames()
"""
Explanation: All results attributes are always ordered according to the model. For species, this means that the columns of rdata.x match the ordering of species in the model, which can be accessed as amici.Model.getStateNames
End of explanation
"""
|
bambinos/bambi | docs/notebooks/alternative_links_binary.ipynb | mit | import arviz as az
import bambi as bmb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.special import expit as invlogit
from scipy.stats import norm
az.style.use("arviz-darkgrid")
np.random.seed(1234)
"""
Explanation: Regression for Binary responses: Alternative link functions
In this example we use a simple dataset to fit a Generalized Linear Model for a binary response using different link functions.
End of explanation
"""
def invcloglog(x):
return 1 - np.exp(-np.exp(x))
x = np.linspace(-5, 5, num=200)
# inverse of the logit function
logit = invlogit(x)
# cumulative density function of standard gaussian
probit = norm.cdf(x)
# inverse of the cloglog function
cloglog = invcloglog(x)
plt.plot(x, logit, color="C0", lw=2, label="Logit")
plt.plot(x, probit, color="C1", lw=2, label="Probit")
plt.plot(x, cloglog, color="C2", lw=2, label="CLogLog")
plt.axvline(0, c="k", alpha=0.5, ls="--")
plt.axhline(0.5, c="k", alpha=0.5, ls="--")
plt.xlabel(r"$x$")
plt.ylabel(r"$\pi$")
plt.legend();
"""
Explanation: Generalized linear models for binary response
First of all, let's review some concepts. A Generalized Linear Model (GLM) is made of three components.
1. Random component
A set of independent and identically distributed random variables $Y_i$. Their (conditional) probability distribution belongs to the same family $f$ with a mean given by $\mu_i$.
2. Systematic component (a.k.a linear predictor)
Constructed by a linear combination of the parameters $\beta_j$ and explanatory variables $x_j$, represented by $\eta_i$
$$
\eta_i = \mathbf{x}i^T\mathbf{\beta} = x{i1}\beta_1 + x_{i2}\beta_2 + \cdots + x_{ip}\beta_p
$$
3. Link function
A monotone and differentiable function $g$ such that
$$
g(\mu_i) = \eta_i = \mathbf{x}_i^T\mathbf{\beta}
$$
where $\mu_i = E(Y_i)$
As we can see, this function specifies the link between the random and the systematic components of the model.
An important feature of GLMs is that no matter we are modeling a function of $\mu$ (and not just $\mu$, unless $g$ is the identity function) is that we can show predictions in terms of the mean $\mu$ by using the inverse of $g$ on the linear predictor $\eta_i$
$$
g^{-1}(\eta_i) = g^{-1}(\mathbf{x}_i^T\mathbf{\beta}) = \mu_i
$$
In Bambi, we can use family="bernoulli" to tell we are modeling a binary variable that follows a Bernoulli distribution and our random component is of the form
$$
Y_i =
\left{
\begin{array}{ll}
1 & \textrm{with probability } \pi_i \
0 & \textrm{with probability } 1 - \pi_i
\end{array}
\right.
$$
that has a mean $\mu_i$ equal to the probability of success $\pi_i$.
By default, this family implies $g$ is the logit function.
$$
\begin{array}{lcr}
\displaystyle \text{logit}(\pi_i) = \log{\left( \frac{\pi_i}{1 - \pi_i} \right)} = \eta_i &
\text{ with } &
\displaystyle g^{-1}(\eta) = \frac{1}{1 + e^{-\eta}} = \pi_i
\end{array}
$$
But there are other options available, like the probit and the cloglog link functions.
The probit function is the inverse of the cumulative density function of a standard Gaussian distribution
$$
\begin{array}{lcr}
\displaystyle \text{probit}(\pi_i) = \Phi^{-1}(\pi_i) = \eta_i &
\text{ with } &
\displaystyle g^{-1}(\eta) = \Phi(\eta_i) = \pi_i
\end{array}
$$
And with the cloglog link function we have
$$
\begin{array}{lcr}
\displaystyle \text{cloglog}(\pi_i) = \log(-\log(1 - \pi)) = \eta_i &
\text{ with } &
\displaystyle g^{-1}(\eta) = 1 - \exp(-\exp(\eta_i)) = \pi_i
\end{array}
$$
cloglog stands for complementary log-log and $g^{-1}$ is the cumulative density function of the extreme minimum value distribution.
Let's plot them to better understand the implications of what we're saying.
End of explanation
"""
x = np.array([1.6907, 1.7242, 1.7552, 1.7842, 1.8113, 1.8369, 1.8610, 1.8839])
n = np.array([59, 60, 62, 56, 63, 59, 62, 60])
y = np.array([6, 13, 18, 28, 52, 53, 61, 60])
data = pd.DataFrame({"x": x, "n": n, "y": y})
"""
Explanation: In the plot above we can see both the logit and the probit links are symmetric in terms of their slopes at $-x$ and $x$. We can say the function approaches $\pi = 0.5$ at the same rate as it moves away from it. However, these two functions differ in their tails. The probit link approaches 0 and 1 faster than the logit link as we move away from $x=0$. Just see the orange line is below the blue one for $x < 0$ and it is above for $x > 0$. In other words, the logit function has heavier tails than the probit.
On the other hand, the cloglog does not present this symmetry, and we can clearly see it since the green line does not cross the point (0, 0.5). This function approaches faster the 1 than 0 as we move away from $x=0$.
Load data
We use a data set consisting of the numbers of beetles dead after five hours of exposure to gaseous carbon disulphide at various concentrations. This data can be found in An Introduction to Generalized Linear Models by A. J. Dobson and A. G. Barnett, but the original source is (Bliss, 1935).
| Dose, $x_i$ <br />($\log_{10}\text{CS}_2\text{mgl}^{-1}$)| Number of beetles, $n_i$ | Number killed, $y_i$ |
| --- | --- | --- |
| 1.6907 | 59 | 6 |
| 1.7242 | 60 | 13 |
| 1.7552 | 62 | 18 |
| 1.7842 | 56 | 28 |
| 1.8113 | 63 | 52 |
| 1.8369 | 59 | 53 |
| 1.8610 | 62 | 61 |
| 1.8839 | 60 | 60 |
We create a data frame where the data is in long format (i.e. each row is an observation with a 0-1 outcome).
End of explanation
"""
formula = "p(y, n) ~ x"
"""
Explanation: Build the models
Bambi has two families to model binary data: Bernoulli and Binomial. The first one can be used when each row represents a single observation with a column containing the binary outcome, while the second is used when each row represents a group of observations or realizations and there's one column for the number of successes and another column for the number of trials.
Since we have aggregated data, we're going to use the Binomial family. This family requires using the function proportion(y, n) on the left side of the model formula to indicate we want to model the proportion between two variables. This function can be replaced by any of its aliases prop(y, n) or p(y, n). Let's use the shortest one here.
End of explanation
"""
model_logit = bmb.Model(formula, data, family="binomial")
idata_logit = model_logit.fit(draws=2000)
"""
Explanation: Logit link
The logit link is the default link when we say family="binomial", so there's no need to add it.
End of explanation
"""
model_probit = bmb.Model(formula, data, family="binomial", link="probit")
idata_probit = model_probit.fit(draws=2000)
"""
Explanation: Probit link
End of explanation
"""
model_cloglog = bmb.Model(formula, data, family="binomial", link="cloglog")
idata_cloglog = model_cloglog.fit(draws=2000)
"""
Explanation: Cloglog link
End of explanation
"""
def get_predictions(model, idata, seq):
# Create a data frame with the new data
new_data = pd.DataFrame({"x": seq})
# Predict probability of dying using out of sample data
model.predict(idata, data=new_data)
# Stack chains and draws
posterior = idata.posterior["p(y, n)_mean"].stack(samples=("chain", "draw"))
# Get posterior mean across all draws
mu = posterior.mean(axis=1)
return mu
x_seq = np.linspace(1.6, 2, num=200)
mu_logit = get_predictions(model_logit, idata_logit, x_seq)
mu_probit = get_predictions(model_probit, idata_probit, x_seq)
mu_cloglog = get_predictions(model_cloglog, idata_cloglog, x_seq)
plt.scatter(x, y / n, c = "white", edgecolors = "black", s=100)
plt.plot(x_seq, mu_logit, lw=2, label="Logit")
plt.plot(x_seq, mu_probit, lw=2, label="Probit")
plt.plot(x_seq, mu_cloglog, lw=2, label="CLogLog")
plt.axhline(0.5, c="k", alpha=0.5, ls="--")
plt.xlabel(r"Dose $\log_{10}CS_2mgl^{-1}$")
plt.ylabel("Probability of death")
plt.legend();
"""
Explanation: Results
We can use the samples from the posteriors to see the mean estimate for the probability of dying at each concentration level. To do so, we use a little helper function that will help us to write less code. This function leverages the power of the new Model.predict() method that is helpful to obtain both in-sample and out-of-sample predictions.
End of explanation
"""
%load_ext watermark
%watermark -n -u -v -iv -w
"""
Explanation: In this example, we can see the models using the logit and probit link functions present very similar estimations. With these particular data, all the three link functions fit the data well and the results do not differ significantly. However, there can be scenarios where the results are more sensitive to the choice of the link function.
References
Bliss, C. I. (1935). The calculation of the dose-mortality curve. Annals of Applied Biology 22, 134–167
End of explanation
"""
|
tolaoniyangi/dmc | notebooks/week-4/02-tensorflow ANN for classification.ipynb | apache-2.0 | %matplotlib inline
import math
import random
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import load_boston
'''Since this is a classification problem, we will need to
represent our targets as one-hot encoding vectors (see previous lab).
To do this we will use scikit-learn's OneHotEncoder module
which we import here'''
from sklearn.preprocessing import OneHotEncoder
import numpy as np
import tensorflow as tf
sns.set(style="ticks", color_codes=True)
"""
Explanation: Assignment - part 2
Now that we have a better understanding of how to set up a basic neural network in Tensorflow, let's see if we can convert our dataset to a classification problem, and then rework our neural network to solve it. I will replicate most of our code from the previous assignment below, but leave blank spots where you should implement changes to convert our regression model into a classification one. Look for text descriptions above code blocks explaining the changes that need to be made, and #UPPERCASE COMMENTS where the new code should be written.
End of explanation
"""
dataset = load_boston()
houses = pd.DataFrame(dataset.data, columns=dataset.feature_names)
houses['target'] = dataset.target #target would be y
# WRITE CODE TO CONVERT 'TARGET' COLUMN FROM CONTINUOUS TO CATEGORICAL
averageValue = np.mean (dataset.target)
houses['target'] = dataset.target.astype(int)
lengthofTarget = len(dataset.target) #number of things in target
i=0
while lengthofTarget > i: #gotta target what values you want it to put in
if dataset.target[i]> averageValue:
houses.target.set_value(i,1) #converts value of the target in the class 'houses' to 1
else:
houses.target.set_value (i,0)
i+=1
'''check your work'''
print np.max(houses['target']), "<-- should be 1"
print np.min(houses['target']), "<-- should be 0"
"""
Explanation: 1. Target data format
The first step is to change the target of the dataset from a continuous variable (the value of the house) to a categorical one. In this case we will change it to have two categories, specifying whether the value of the house is higher or lower than the average.
In the code block below, write code to change the ‘target’ column to a categorical variable instead of a continuous one. This variable should be 1 if the target is higher than the average value, and 0 if it is lower. You can use np.mean() to calculate the average value. Then, you can iterate over all entries in the column, and compare each value to the average to decide if it is higher or lower. Finally, you can use the int() function to convert the True/False values to 0 and 1.
End of explanation
"""
houses_array = houses.as_matrix().astype(float)
np.random.shuffle(houses_array)
X = houses_array[:, :-1]
y = houses_array[:, -1]
# USE SCIKIT-LEARN'S ONE-HOT ENCODING MODULE TO
# CONVERT THE y ARRAY OF TARGETS TO ONE-HOT ENCODING.
enc = OneHotEncoder() # create an instance of the one-hot encoding function from the sci-kit learn library
y = y.reshape(-1,1) # convert the list of targets to a vertical matrix with the dimensions [1 x number of samples]
# this is necessary for later computation
enc.fit(y) # use the function to figure out how many categories exist in the data
y = enc.transform(y).toarray()
#code below is from danil
X = X / X.max(axis=0)
trainingSplit = int(.7 * houses_array.shape[0]) # split data into training and test sets
X_train = X[:trainingSplit]
y_train = y[:trainingSplit]
X_test = X[trainingSplit:]
y_test = y[trainingSplit:]
print('Training set', X_train.shape, y_train.shape)
print('Test set', X_test.shape, y_test.shape)
'''check your work'''
print y_train.shape[1], "<-- should be 2"
print y_test.shape[1], "<-- should be 2"
print y_train[0], "<-- should be either [0. 1.] or [1. 0.]"
# helper variables
num_samples = X_train.shape[0]
num_features = X_train.shape[1]
num_outputs = y_train.shape[1]
# Hyper-parameters
batch_size = 22 #reducing batch size
num_hidden_1 = 28 #slightly reducing number of hidden neurons
num_hidden_2 = 28
learning_rate = 0.1 #increasing
training_epochs = 500 #increasing
dropout_keep_prob = 1 # 0.5 # set to no dropout by default
# variable to control the resolution at which the training results are stored
display_step = 1
"""
Explanation: 2. Target data encoding
Since we are now dealing with a classification problem, our target values need to be encoded using one-hot encoding (OHE) (see Lab 3 for a description of what this is and why it's necessary). In the code block below, use scikit-learn's OneHotEncoder() module to ocnvert the y target array to OHE.
hint: when you create the onehotencoder object, pass in the variable sparse=false to give the resulting data the proper formatting each value in y should be a two-part array, either [0,1] or [1,0] depending on the target value.
End of explanation
"""
def accuracy(predictions, targets):
# IMPLEMENT THE NEW ACCURACY MEASURE HERE
maxPredictions= np.argmax(predictions,1) #convert the confidence measures to the single most likely category
maxTarget = np.argmax (targets,1) #convert the target measures to the single most likely category
accuracy = (np.sum((maxPredictions == maxTarget))/float(maxPredictions.shape[0]))*100.0 # to find total # of true statements/total # of
# predictions to get ratio of accurate predictions
return accuracy
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
"""
Explanation: 3. Perfomance measure
Instead of measuring the average error in the prediction of a continuous variable, we now want our performance measure to be the number of samples for which we guess the right category.
As before, this function takes in an array of predictions and an array of targets. This time, however, each prediction or target is represented by a two-piece array. With the predictions, the two values represent the confidence of the system for choosing either value as the category. Because these predictions are generated through the softmax function, they are guaranteed to add up to 1.0, so they can be interpreted as the percentage of confidence behind each category. In our two category example,
A prediction of [1,0] means complete confidence that the sample belongs in the first category
A prediction of [0,1] means complete confidence that the sample belongs in the second category
A prediction of [0.5,0.5] means the system is split, and cannot clearly decide which category the sample belongs to.
With the targets, the two values are the one-hot encodings generated previously. You can now see how the one-hot encoding actually represents the target values in the same format as the predictions coming from the model. This is helpful because while the model is training all it has to do is try to match the prediction arrays to the encoded targets. Infact, this is exactly what our modified cost function will do.
For our accuracy measure, we want to take these two arrays of predictions and targets, see how many of them match (correct classification), then devide by the total number of predictions to get the ratio of accurate guesses, and multiply by 100.0 to convert it to a percentage.
hints:
numpy's np.argmax() function will give you the position of the largest value in the array along an axis, so executing np.argmax(predictions, 1) will convert the confidence measures to the single most likely category.
once you have a list of single-value predictions, you can compare them using the '==' operator to see how many match (matches result in a 'True' and mismatches result in a 'False')
you can use numpy's np.sum() function to find out the total number of 'True' statements, and divide them by the total number of predictions to get the ratio of accurate predictions.
End of explanation
"""
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(tf.float32, shape=(None, num_features))
_y = tf.placeholder(tf.float32, shape=(None))
keep_prob = tf.placeholder(tf.float32)
tf_X_test = tf.constant(X_test, dtype=tf.float32)
tf_X_train = tf.constant(X_train, dtype=tf.float32)
W_fc1 = weight_variable([num_features, num_hidden_1])
b_fc1 = bias_variable([num_hidden_1])
W_fc2 = weight_variable([num_hidden_1, num_hidden_2])
b_fc2 = bias_variable([num_hidden_2])
W_fc3 = weight_variable([num_hidden_2, num_outputs])
b_fc3 = bias_variable([num_outputs])
def model(data, keep):
fc1 = tf.nn.sigmoid(tf.matmul(data, W_fc1) + b_fc1)
fc1_drop = tf.nn.dropout(fc1, keep)
fc2 = tf.nn.sigmoid(tf.matmul(fc1_drop, W_fc2) + b_fc2)
fc2_drop = tf.nn.dropout(fc2, keep)
fc3 = tf.matmul(fc2_drop, W_fc3) + b_fc3
return fc3
'''for our loss function we still want to get the raw outputs
of the model, but since it no longer represents the actual prediction
we rename the variable to ‘output’'''
output = model(x, keep_prob)
# WHEN WE CALCULATE THE PREDICTIONS, WE NEED TO WRAP EACH OUTPUT IN A
# tf.nn.softmax() FUNCTION. THE FIRST ONE HAS BEEN DONE FOR YOU:
prediction = tf.nn.softmax(output)
test_prediction = model(tf_X_test, 1.0)
train_prediction = model(tf_X_train, 1.0)
'''finally, we replace our previous MSE cost function with the
cross-entropy function included in Tensorflow. This function takes in the
raw output of the network and calculates the average loss with the target'''
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, _y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
saver = tf.train.Saver()
"""
Explanation: 4. Model definition
For the most part, our model definition will stay roughtly the same. The major difference is that the final layer in our network now contains two values, which are interpreted as the confidence that the network has in classifying each input set of data as belonging to either the first or second category.
However, as the raw output of the network, these outputs can take on any value. In order to interpret them for categorization it is typical to use the softmax function, which converts a range of values to a probability distribution along a number of categories. For example, if the outputs from the network from a given input are [1,000,000 and 10], we would like to interpret that as [0.99 and 0.01], or almost full confidence that the sample belongs in the first category. Similarly, if the outputs are closer together, such as 10 and 5, we would like to interpret it as something like [0.7 and 0.3], which shows that the first category is still more likely, but it is not as confident as before. This is exactly what the softmax function does. The exact formulation of the softmax function is not so important, as long as you know that the goal is to take the raw outputs from the neural network, and convert them to a set of values that preserve the relationship between the outputs while summing up to 1.0.
To adapt our code for classification, we simply have to wrap all of our outputs in a tf.nn.softmax() function, which will convert the raw outputs to confidence measures. We will also replace the MSE error function with a cross-entropy function which performs better with classification tasks. Look for comments below for implementation details.
End of explanation
"""
results = []
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
for epoch in range(training_epochs):
indexes = range(num_samples)
random.shuffle(indexes)
for step in range(int(math.floor(num_samples/float(batch_size)))):
offset = step * batch_size
batch_data = X_train[indexes[offset:(offset + batch_size)]]
batch_labels = y_train[indexes[offset:(offset + batch_size)]]
feed_dict = {x : batch_data, _y : batch_labels, keep_prob: dropout_keep_prob}
_, l, p = session.run([optimizer, loss, prediction], feed_dict=feed_dict)
if (epoch % display_step == 0):
batch_acc = accuracy(p, batch_labels)
train_acc = accuracy(train_prediction.eval(session=session), y_train)
test_acc = accuracy(test_prediction.eval(session=session), y_test)
results.append([epoch, batch_acc, train_acc, test_acc])
save_path = saver.save(session, "model_houses_classification.ckpt")
print("Model saved in file: %s" % save_path)
df = pd.DataFrame(data=results, columns = ["epoch", "batch_acc", "train_acc", "test_acc"])
df.set_index("epoch", drop=True, inplace=True)
fig, ax = plt.subplots(1, 1, figsize=(10, 4))
ax.plot(df)
ax.set(xlabel='Epoch',
ylabel='Error',
title='Training result')
ax.legend(df.columns, loc=1)
print "Maximum test accuracy: %.2f%%" % np.max(df["test_acc"])
"""
Explanation: Now that we have replaced the relevant accuracy measures and loss function, our training process is exactly the same, meaning we can run the same training process and plotting code to visualize the results. The only difference is that with classificiation we are using an accuracy rather than an error measure, so the better our model is performing, the higher the graph should be (higher accuracy is better, while lower error is better).
End of explanation
"""
|
beralt85/current_cumulants | example.ipynb | mit | # inline plotting/interaction
%pylab inline
# replace the line above with the line below for command line scripts:
# from pylab import *
from sympy import * # symbolic python
init_printing() # pretty printing
import numpy as np # numeric python
import time # timing, for performance monitoring
# activate latex text rendering
from matplotlib import rc
rc('text', usetex=True)
"""
Explanation: How to analyse your favourite dynamically reversible Markov model
This notebooks contains a simple example of how to analyse fluctuating currents in dynamically reversible Markov models.
NOTE: This script runs only on a python 2 kernel.
References
[1] Fluctuating Currents in Stochastic Thermodynamics I. Gauge Invariance of Asymptotic Statistics. Wachtel, Altaner and Vollmer (2015)
[2] Fluctuating Currents in Stochastic Thermodynamics II. Energy Conversion and Nonequilibrium Response in Kinesin Models. Altaner, Wachtel and Vollmer (2015)
Set-up jupyter (ipython-notebook) and sympy libraries
End of explanation
"""
import cumulants # cumulants.py implements the algorithm presented in Ref [2]
"""
Explanation: Defining models and using the algorithm presented in Ref [2]
End of explanation
"""
# We define the transition rates as symbolic expressions...
w01, w10, w02, w20, w03, w30, w12, w21, w23, w32 = \
symbols("w_{01}, w_{10}, w_{02}, w_{20}, w_{03}, w_{30},"+\
"w_{12}, w_{21}, w_{23}, w_{32}",\
real=True, positive=True)
# ... and specify the model topology as a dictionary
model4State = {(0,1): w01, (1,0): w10,\
(0,2): w02, (2,0): w20,\
(0,3): w03, (3,0): w30,\
(1,2): w12, (2,1): w21,\
(2,3): w23, (3,2): w32 \
}
"""
Explanation: A simple model
In this script, a model is a dictionary with
keys == edges (tuples of integers)
values == transition rates (symbolic expressions)
We start with the definition of simple model on four states with the following topology:
0 – 1
| \ |
3 – 2
It features two independent cycles (cf. Ref [1]).
End of explanation
"""
# Define the symbols for parameters, place-holders, etc.
b = symbols("b", positive=True)
f, g = symbols("f, g", real=True)
x, y = symbols("x, y", real=True)
u, v, w = symbols("u, v, w", positive=True)
# symmetric-antisymmetric substitutions
onehalf = Rational(1,2)
# Generate a substitution list to parametrize the symbolic transition rates
rate_cw = exp(+onehalf*f)
rate_ccw = exp(-onehalf*f)
# Outer circle, clockwise
w01p = rate_cw
w12p = rate_cw
w23p = rate_cw
w30p = rate_cw
# Outer circle, counter-clockwise
w03p = rate_ccw
w32p = rate_ccw
w21p = rate_ccw
w10p = rate_ccw
# Center rates
w02p = b*exp(+onehalf*g)
w20p = b*exp(-onehalf*g)
# Create the substitution list that gives the parametric dependence of the rates:
rates_parametrized = [(w01,w01p),(w10,w10p),(w02,w02p),(w20,w20p),\
(w12,w12p),(w21,w21p),(w23,w23p),(w32,w32p),(w30,w30p),(w03,w03p)]
"""
Explanation: Parametrizing the transition rates
In our toy model, transitions along the outer edges occur with rate $r$ and $l$ in clockwise and counter-clockwise directions, respectively.
For convenience, we will use the substitutions
$r = k\exp{\frac{f}{2}}$ and $l=k\exp{\frac{f}{2}}$,
which separates the anti-symmetric edge motance $f = \log{\frac{r}{l}}$ from the symmetric part $k=\sqrt{rl}$.
Transitions through the edge connecting states $0$ and $2$ have same symmetric part $b*k$, and a potentially different driving field $g$. The positive parameter $b$ can be used to turn off the center transition:
$w^0_2 = kb\exp{\frac{g}{2}}$ and $w^2_0 = kb\exp{\frac{g}{2}}$
As a global factor of the transition matrix, $k$ can be absorbed into the time-scale of the model and we set $k\equiv 1$.
End of explanation
"""
# calculate the cumulants (this takes only a few seconds)
c, C = cumulants.getCumulants( model4State, [(0,1),(0,2)], rates_parametrized)
## WARNING: READ BEFORE EXECUTING THIS CELL
##
## Additional simplifications may help if numerical problems are encountered (e.g. for very stiff rate matrices).
## They can be very time-consuming.
## Note: The time of simplification steps strongly increases with the number of free symbolic parameters
##
## For the present example, they are NOT needed!
##
t0 = time.time() # Time the simplification steps
c = simplify(factor(c))
C = factor(simplify(C))
display(time.time() - t0)
"""
Explanation: Calculating the first and second fundamental cumulants
The function getCumulants(model, chords, rates_params) from the cumulants library takes a dictionary model and a corresponding substitution list rates_params to calculate the fundamental cumulants specified by the fundamental chords given in chords.
While some consistency checks are given, you have to be sure that removing the edges contained in chords from the graph of the model yields a spanning tree, see Ref. [1].
Here, we chose the edges $(0,1)$ and $(0,2)$ as fundamental chords. The corresponding fundamental cycles are then the outer circuit and the lower half circuit (both in clockwise direction):
```
0 –> 1
| | (0,1)
3 –– 2
0
| \
| \ (0,2)
3 – 2
```
End of explanation
"""
## Calculate cycle affinities:
# First fundamental cycle (for chord $(0,1)$): (0,1,2,3):
aff0 = simplify((log((w01*w12*w23*w30)/(w10*w21*w32*w03))).subs(rates_parametrized))
# Second fundamental cycle (for chord $(0,2)$ (0,2,3):
aff1 = simplify((log((w02*w23*w30)/(w03*w32*w20))).subs(rates_parametrized))
# affinities should be $4f$ and $2f+g$, respectively:
display((aff0,aff1))
## Define expressions for quantities of interest
topc = c[0] # average current through top edge
cenc = c[1] # average current through center edge
ep = simplify(c[0]*aff0 + c[1]*aff1) # entropy production
topd = onehalf*C[0,0] # two times variance yields diffusion constants
cend = onehalf*C[1,1]
cov = C[0,1] #co-variance of both currents
res = 2*c[0].diff(f)/C[0,0] # response of top current to the driving affinity $f$ divided by top diffusion constant
#res = simplify(res)
"""
Explanation: Observables
The fundamental cumulants regard the counting statistics along the fundamental chords. As shown in Ref. [1], this is sufficient in order to determine the counting statistics of arbitrary observables.
In the following we consider three observables:
The current flowing in the top edge (fundamental chord $(0,1)$, i.e. c[0]).
The current through the center edge (fundamental chord $(0,2)$, i.e. c[1]).
The entropy production of the whole network
The entropy production is an observable that takes the value $\log{\frac{w_{\rightarrow}}{w_{\leftarrow}}}$ on each edge, where $w_{\rightarrow}$ and $w_{\leftarrow}$ are the corresponding forward and backward rates. The generalized Schnakenberg decomposition (cf. Ref [1]) ensures that for the calculation of the entropy statistics, we only need the affinities of the fundamental cycles.
End of explanation
"""
# Show some analytical expressions for...
# ...top and center average currents
display('Average currents')
display(topc)
display(cenc)
# ...entropy production
display('Entropy production')
display(ep)
## ...diffusion constants and response (WARNING: these are longish expressions...)
#display('Diffusion constants')
#display(topd)
#display(cend)
#display('Normalized response')
#display(res)
"""
Explanation: Exploring the parameter dependence
Our model depends on three symbolic parameters: $(f,g,b)$
End of explanation
"""
## Lambdify all SymPy expressions into NumPy Expressions
topcL, cencL, epL, topdL, cendL, covL, resL\
= [ lambdify( (f,g,b), N(thing), "numpy" )\
for thing in ( topc, cenc, ep, topd, cend, cov, res\
) ]
## Prepare 2D plotting range
from pylab import meshgrid,cm,imshow,contour,clabel,colorbar,axis,title,show
# prepare the plotting grid
[xmin, xmax, ymin, ymax] = [-10,10,.01,20] # boundaries of the grid
resolution = 400 # plot resolution
plotarea = [xmin, xmax, ymin, ymax] # full plotarea
# prepare the plotting grid for kinesin 6 figures
xpts = linspace(xmin, xmax, resolution)
ypts = linspace(ymin, ymax, resolution)
X, Y = meshgrid(xpts, ypts)
## General setup for figures
fig_size = (6,5) # in inch
fs = 22 # font size
colormap1 = cm.gist_earth # linear color gradient (blue) for densityplots
colormap2 = cm.coolwarm # color gradient (red-white-blue) for densityplots with highlighted center
# font setup
font = {'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : fs,
}
ts = 16 # tick+contour label size
figdir = "toymodel/"
## This function takes a lambda function and creates a (logarithmic) 2D plot
## g: lambda function with *exactly two* arguments
## t: title string
## x,y: x and y axis strings
## logplot: flag whether to plot in logscale or not
## crop: min/max values
def pplot(g, t, x='', y='', logplot=True, highlight=0, crop=[]):
fig = figure(figsize=fig_size)
if(logplot):
G = np.log(np.abs(g(X,Y)))/np.log(10)
GG = g(X,Y)
ccmm = colormap1
else:
G = g(X,Y)
GG = G
ccmm = colormap2
# the slicing parameter [::-1] reverses the y-axis before plotting
im = imshow( G[::-1], cmap=ccmm, extent=plotarea ) # drawing the function
if(len(crop)==2):
im.set_clim(vmin=crop[0], vmax=crop[1])
# adding the contour lines with labels
cset1 = contour( X,Y, G, arange(-20,20,1),linewidths=1,linestyles="-",colors='black')
stalling = contour( X,Y, GG, [highlight], linewidths=3,linestyles="-",colors='white')
# adding the colorbar on the right
cb = colorbar(im)
# latex fashion title
title(t, fontdict=font)
xlabel(x, fontdict=font)
ylabel(y, fontdict=font)
# Set tick label size
tick_params(axis='both', which='major', labelsize=ts )
#savefig(figdir+t+".png")
return(fig)
## NOTE: Our lambda functions take three parameters (f,g,b).
## For plotting, we need to define a new anonymous lambda function, that takes only two parameters
# Currents and EP depending on two forces f,g with same symmetric contribution on the center edge
pplot(lambda f,g: topcL(f,g,1),"Average current through top edge (log scale)",'$f$','$g$')
pplot(lambda f,g: cencL(f,g,1),"Average current through center edge (log scale)",'$f$','$g$')
pplot(lambda f,g: epL(f,g,1),"Steady state entropy production (log scale)",'$f$','$g$')
# Currents and EP depending on one force f=g while increasing strength of center edge
pplot(lambda f,b: topcL(f,f,b),"Average current through top edge (log scale)",'$f$','$b$')
pplot(lambda f,b: cencL(f,f,b),"Average current through center edge (log scale)",'$f$','$b$')
pplot(lambda f,b: epL(f,f,b),"Steady state entropy production (log scale)",'$f$','$b$')
print("Done")
# Diffusion constant and normalized response depending on two forces f,g with same symmetric contribution on the center edge
pplot(lambda f,g: topdL(f,g,1),"Diffusion top edge (log scale)",'$f$','$g$')
pplot(lambda f,g: cendL(f,g,1),"Diffusion center edge (log scale)",'$f$','$g$')
pplot(lambda f,g: resL(f,g,1),"Normalized $f$-response in top edge",'$f$','$g$', False)
# Diffusion constant and normalized response depending on one force f=g while increasing strength of center edge
pplot(lambda f,b: topdL(f,f,b),"Average current through top edge (log scale)",'$f$','$b$')
pplot(lambda f,b: cendL(f,f,b),"Average current through center edge (log scale)",'$f$','$b$')
pplot(lambda f,b: resL(f,f,b),"Normalized $f$-response in top edge",'$f$','$b$', False)
print("Done")
"""
Explanation: Plotting
Efficient (and numerically stable) plotting requires us to transform symbolic expressions into lambda functions (using the method lambdify()). The lambda functions are used to evaluate the expressions on a grid.
NOTE: lambdify() has problems with symbolic expressions that have $\LaTeX$ commands as their representations.
If you use something like
eps = symbols('\\varepsilon')
lambdify() may not work and you have to substitute the $\LaTeX$-style expressions by other (dummy) variables.
End of explanation
"""
|
jsgreenwell/teaching-python | tutorial_files/presentations/list_comp_example.ipynb | mit | class vector_math:
'''
This is the base class for vector math - which allows for initialization with two vectors.
'''
def __init__(self, vectors = [[1,2,2],[3,4,3]]):
self.vect1 = vectors[0]
self.vect2 = vectors[1]
def set_vects(self, vectors):
self.vect1 = vectors[0]
self.vect2 = vectors[1]
def sum_vects(self):
return [x + y for x, y in zip(self.vect1, self.vect2)]
def sub_vects(self):
# default should be [-2,-2,-1]
return [x - y for x, y in zip(self.vect1, self.vect2)]
# Can expand out to for x, y in zip: ... to show what it and sum do
def multi_vects(self):
#default should be [3,8,6]
return [x * y for x, y in zip(self.vect1, self.vect2)]
def multi_scalar(self, scalar, vect):
return [e * scalar for e in vect]
# Show difference between just element * number and using tuple from zip()
def multi_scalar_l(self, scalar, vect):
return lambda e: e * scalar, vect
def mean_vects(self):
mean_vect = self.sum_vects()
return self.multi_scalar(1/len(mean_vect), mean_vect)
def dot_product(self):
return sum(self.multi_vects())
vect = vector_math()
sum_vect = vect.sum_vects()
print("Sum of vectors = {}".format(sum_vect))
print("Subtraction of vectors = {}".format(vect.sub_vects()))
print("Product of vectors = {}".format(vect.multi_vects()))
print("Product of Sum of vectors and 2 = {}\n".format(vect.multi_scalar(2, sum_vect)))
# Yep can still use character returns and others in format
print("Average of vectors = {}".format(["{:.2f}".format(e) for e in vect.mean_vects()]))
# Now there are other ways to reduce the decimal places but this was just to show a nested format call
# TODO: Consider adding timeit to show difference between calling multi_scalar directly and calling mean_vect:
#print("Average of vectors through calling scalar = {}".format(
# ["{:.2f}".format(e) for e in vect.multi_scalar(1/len(sum_vect), sum_vect)]))
print("The Dot Product is {}".format(vect.dot_product()))
"""
Explanation: Example of performing Vector mathmatical function using Python List structures
Vector methods to be created:
* Sum vectors
* Add vector elements of same sized vectors
* Return resulting vector
* Subtract vectors
* Subtract vector elements of same sized vectors
* Return resulting vector
* Product of vectors
* Product of components of vectors
* Return resulting vector
* Product of vector and scalar
* Return scalar product of each element of vector
* Mean of vectors
* Sum Vector method / number of elements for each element (or 1/len scalar multiply)
* Dot Product
* Sum of component wise products
* Multiply vectors
* Sum vectors
* Return resulting vector
Teaching notes delete when finished
Remember to explain that in the real world numpy and other libraries would be used to do this
For teaching list methods
Particuliarly allows for a number of list comprehensions to be explained
Basic Class definition and issues
Start with just calling a definition directly (which will Error with a not found)
Show how adding self.function_name() works and explain
Move into using decorators
Start with a vector with a small number of elements
So students can do calculations in their heads and follow along
End of explanation
"""
from math import sqrt
# Using the vect variables showing without functions
sum_of_squares = sum([x * y for x, y in zip(vect.vect1, vect.vect1)])
magnitude = sqrt(sum_of_squares)
distance = sqrt(sum([(x - y) ** 2 for x, y in zip(vect.vect1, vect.vect2)]))
print("Sum of Squares is {}".format(sum_of_squares))
print("Magnitude is {:.2f}".format(magnitude))
print("Distance is {}".format(distance))
"""
Explanation: Other vector operations that could be done
End of explanation
"""
import dis
import time
# For instruction - shows disassemble of methods and performs quick time check
vect = [2,3,3,3,4,5,6,6,4,3,2,1,3,4,5,6,4,3,2,1,3,4,5,6,4,3,2]
t1 = time.time()
print("list comp")
dis.dis(compile("[e * 2 for e in vect]", '<stdin>', 'exec'))
d_l = time.time() - t1
print(d_l)
t2 = time.time()
print("\n\n\nlambda")
dis.dis(compile("lambda e: e * 2, vect", '<stdin>', 'exec'))
d_lam = time.time() - t2
print(d_lam)
"""
Explanation: List Comprehensions are Powerful tools in Python
Expect to see them throughout code one has to maintain but also understand they are not always the optimal solution
When an iteration is needed to build a composite value, list comprehensions are considered the most readable or understandable way to achieve this. Loops may be used instead if one wants the "side effect" of an interation while functional tools may be used if optimization and code speed is important.
For instance, the above examples could also have been performed with an annoymous lambda or reduce, like:
def multi_scalar(self, vect, scalar):
return lambda e: e * scalar, vect
In this case, the lambda would be faster by a minimal amount and actually have one less function call - which are expensive in Python. This is not always true as the need for an increasing amount of functional methods can change both the speed and amount of function call required. code example is below
End of explanation
"""
|
moonbury/pythonanywhere | RegressionAnalysisWithPython/Chap_6 - Achieving Generalization.ipynb | gpl-3.0 | import pandas as pd
from sklearn.datasets import load_boston
boston = load_boston()
dataset = pd.DataFrame(boston.data, columns=boston.feature_names)
dataset['target'] = boston.target
observations = len(dataset)
variables = dataset.columns[:-1]
X = dataset.ix[:,:-1]
y = dataset['target'].values
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=101)
print ("Train dataset sample size: %i" % len(X_train))
print ("Test dataset sample size: %i" % len(X_test))
X_train, X_out_sample, y_train, y_out_sample = train_test_split(X, y, test_size=0.40, random_state=101)
X_validation, X_test, y_validation, y_test = train_test_split(X_out_sample, y_out_sample, test_size=0.50, random_state=101)
print ("Train dataset sample size: %i" % len(X_train))
print ("Validation dataset sample size: %i" % len(X_validation))
print ("Test dataset sample size: %i" % len(X_test))
"""
Explanation: Achieving Generalization
Testing and cross-validation
Train-test split
End of explanation
"""
from sklearn.cross_validation import cross_val_score, KFold, StratifiedKFold
from sklearn.metrics import make_scorer
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
import numpy as np
def RMSE(y_true, y_pred):
return np.sum((y_true -y_pred)**2)
lm = LinearRegression()
cv_iterator = KFold(n=len(X), n_folds=10, shuffle=True, random_state=101)
edges = np.histogram(y, bins=5)[1]
binning = np.digitize(y, edges)
stratified_cv_iterator = StratifiedKFold(binning, n_folds=10, shuffle=True, random_state=101)
second_order=PolynomialFeatures(degree=2, interaction_only=False)
third_order=PolynomialFeatures(degree=3, interaction_only=True)
over_param_X = second_order.fit_transform(X)
extra_over_param_X = third_order.fit_transform(X)
cv_score = cross_val_score(lm, over_param_X, y, cv=cv_iterator, scoring='mean_squared_error', n_jobs=1)
print (cv_score)
print ('Cv score: mean %0.3f std %0.3f' % (np.mean(np.abs(cv_score)), np.std(cv_score)))
cv_score = cross_val_score(lm, over_param_X, y, cv=stratified_cv_iterator, scoring='mean_squared_error', n_jobs=1)
print ('Cv score: mean %0.3f std %0.3f' % (np.mean(np.abs(cv_score)), np.std(cv_score)))
"""
Explanation: Cross validation
End of explanation
"""
import random
def Bootstrap(n, n_iter=3, random_state=None):
"""
Random sampling with replacement cross-validation generator.
For each iter a sample bootstrap of the indexes [0, n) is
generated and the function returns the obtained sample
and a list of all the excluded indexes.
"""
if random_state:
random.seed(random_state)
for j in range(n_iter):
bs = [random.randint(0, n-1) for i in range(n)]
out_bs = list({i for i in range(n)} - set(bs))
yield bs, out_bs
boot = Bootstrap(n=10, n_iter=5, random_state=101)
for train_idx, validation_idx in boot:
print (train_idx, validation_idx)
import numpy as np
boot = Bootstrap(n=len(X), n_iter=10, random_state=101)
lm = LinearRegression()
bootstrapped_coef = np.zeros((10,13))
for k, (train_idx, validation_idx) in enumerate(boot):
lm.fit(X.ix[train_idx,:],y[train_idx])
bootstrapped_coef[k,:] = lm.coef_
print(bootstrapped_coef[:,10])
print(bootstrapped_coef[:,6])
"""
Explanation: Valid options are ['accuracy', 'adjusted_rand_score', 'average_precision', 'f1', 'f1_macro', 'f1_micro', 'f1_samples', 'f1_weighted', 'log_loss', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'precision', 'precision_macro', 'precision_micro', 'precision_samples', 'precision_weighted', 'r2', 'recall', 'recall_macro', 'recall_micro', 'recall_samples', 'recall_weighted', 'roc_auc'
http://scikit-learn.org/stable/modules/model_evaluation.html
Bootstrapping
End of explanation
"""
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=3)
lm = LinearRegression()
lm.fit(X_train,y_train)
print ('Train (cases, features) = %s' % str(X_train.shape))
print ('Test (cases, features) = %s' % str(X_test.shape))
print ('In-sample mean squared error %0.3f' % mean_squared_error(y_train,lm.predict(X_train)))
print ('Out-sample mean squared error %0.3f' % mean_squared_error(y_test,lm.predict(X_test)))
from sklearn.preprocessing import PolynomialFeatures
second_order=PolynomialFeatures(degree=2, interaction_only=False)
third_order=PolynomialFeatures(degree=3, interaction_only=True)
lm.fit(second_order.fit_transform(X_train),y_train)
print ('(cases, features) = %s' % str(second_order.fit_transform(X_train).shape))
print ('In-sample mean squared error %0.3f' % mean_squared_error(y_train,lm.predict(second_order.fit_transform(X_train))))
print ('Out-sample mean squared error %0.3f' % mean_squared_error(y_test,lm.predict(second_order.fit_transform(X_test))))
lm.fit(third_order.fit_transform(X_train),y_train)
print ('(cases, features) = %s' % str(third_order.fit_transform(X_train).shape))
print ('In-sample mean squared error %0.3f' % mean_squared_error(y_train,lm.predict(third_order.fit_transform(X_train))))
print ('Out-sample mean squared error %0.3f' % mean_squared_error(y_test,lm.predict(third_order.fit_transform(X_test))))
"""
Explanation: Greedy selection of features
Controlling for over-parameterization
End of explanation
"""
try:
import urllib.request as urllib2
except:
import urllib2
import numpy as np
train_data = 'https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/MADELON/madelon_train.data'
validation_data = 'https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/MADELON/madelon_valid.data'
train_response = 'https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/MADELON/madelon_train.labels'
validation_response = 'https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/madelon_valid.labels'
try:
Xt = np.loadtxt(urllib2.urlopen(train_data))
yt = np.loadtxt(urllib2.urlopen(train_response))
Xv = np.loadtxt(urllib2.urlopen(validation_data))
yv = np.loadtxt(urllib2.urlopen(validation_response))
except:
# In case downloading the data doesn't works,
# just manually download the files into the working directory
Xt = np.loadtxt('madelon_train.data')
yt = np.loadtxt('madelon_train.labels')
Xv = np.loadtxt('madelon_valid.data')
yv = np.loadtxt('madelon_valid.labels')
print ('Training set: %i observations %i feature' % (Xt.shape))
print ('Validation set: %i observations %i feature' % (Xv.shape))
from scipy.stats import describe
print (describe(Xt))
import matplotlib.pyplot as plt
import matplotlib as mpl
%matplotlib inline
def visualize_correlation_matrix(data, hurdle = 0.0):
R = np.corrcoef(data, rowvar=0)
R[np.where(np.abs(R)<hurdle)] = 0.0
heatmap = plt.pcolor(R, cmap=mpl.cm.coolwarm, alpha=0.8)
heatmap.axes.set_frame_on(False)
plt.xticks(rotation=90)
plt.tick_params(axis='both', which='both', bottom='off', top='off', left = 'off',
right = 'off')
plt.colorbar()
plt.show()
visualize_correlation_matrix(Xt[:,100:150], hurdle=0.0)
from sklearn.cross_validation import cross_val_score
from sklearn.linear_model import LogisticRegression
logit = LogisticRegression()
logit.fit(Xt,yt)
from sklearn.metrics import roc_auc_score
print ('Training area under the curve: %0.3f' % roc_auc_score(yt,logit.predict_proba(Xt)[:,1]))
print ('Validation area under the curve: %0.3f' % roc_auc_score(yv,logit.predict_proba(Xv)[:,1]))
"""
Explanation: Madelon dataset
End of explanation
"""
from sklearn.feature_selection import SelectPercentile, f_classif
selector = SelectPercentile(f_classif, percentile=50)
selector.fit(Xt,yt)
variable_filter = selector.get_support()
plt.hist(selector.scores_, bins=50, histtype='bar')
plt.grid()
plt.show()
variable_filter = selector.scores_ > 10
print ("Number of filtered variables: %i" % np.sum(variable_filter))
from sklearn.preprocessing import PolynomialFeatures
interactions = PolynomialFeatures(degree=2, interaction_only=True)
Xs = interactions.fit_transform(Xt[:,variable_filter])
print ("Number of variables and interactions: %i" % Xs.shape[1])
logit.fit(Xs,yt)
Xvs = interactions.fit_transform(Xv[:,variable_filter])
print ('Validation area Under the Curve before recursive selection: %0.3f' % roc_auc_score(yv,logit.predict_proba(Xvs)[:,1]))
"""
Explanation: Univariate selection of features
End of explanation
"""
# Execution time: 3.15 s
from sklearn.feature_selection import RFECV
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1)
lm = LinearRegression()
cv_iterator = KFold(n=len(X_train), n_folds=10, shuffle=True, random_state=101)
recursive_selector = RFECV(estimator=lm, step=1, cv=cv_iterator, scoring='mean_squared_error')
recursive_selector.fit(second_order.fit_transform(X_train),y_train)
print ('Initial number of features : %i' % second_order.fit_transform(X_train).shape[1])
print ('Optimal number of features : %i' % recursive_selector.n_features_)
a = second_order.fit_transform(X_train)
print (a)
essential_X_train = recursive_selector.transform(second_order.fit_transform(X_train))
essential_X_test = recursive_selector.transform(second_order.fit_transform(X_test))
lm.fit(essential_X_train, y_train)
print ('cases = %i features = %i' % essential_X_test.shape)
print ('In-sample mean squared error %0.3f' % mean_squared_error(y_train,lm.predict(essential_X_train)))
print ('Out-sample mean squared error %0.3f' % mean_squared_error(y_test,lm.predict(essential_X_test)))
edges = np.histogram(y, bins=5)[1]
binning = np.digitize(y, edges)
stratified_cv_iterator = StratifiedKFold(binning, n_folds=10, shuffle=True, random_state=101)
essential_X = recursive_selector.transform(second_order.fit_transform(X))
cv_score = cross_val_score(lm, essential_X, y, cv=stratified_cv_iterator, scoring='mean_squared_error', n_jobs=1)
print ('Cv score: mean %0.3f std %0.3f' % (np.mean(np.abs(cv_score)), np.std(cv_score)))
"""
Explanation: Recursive feature selection
End of explanation
"""
from sklearn.linear_model import Ridge
ridge = Ridge(normalize=True)
# The following commented line is to show a logistic regression with L2 regularization
# lr_l2 = LogisticRegression(C=1.0, penalty='l2', tol=0.01)
ridge.fit(second_order.fit_transform(X), y)
lm.fit(second_order.fit_transform(X), y)
print ('Average coefficient: Non regularized = %0.3f Ridge = %0.3f' % (np.mean(lm.coef_), np.mean(ridge.coef_)))
print ('Min coefficient: Non regularized = %0.3f Ridge = %0.3f' % (np.min(lm.coef_), np.min(ridge.coef_)))
print ('Max coefficient: Non regularized = %0.3f Ridge = %0.3f' % (np.max(lm.coef_), np.max(ridge.coef_)))
"""
Explanation: Regularization
Ridge
End of explanation
"""
from sklearn.grid_search import GridSearchCV
edges = np.histogram(y, bins=5)[1]
binning = np.digitize(y, edges)
stratified_cv_iterator = StratifiedKFold(binning, n_folds=10, shuffle=True, random_state=101)
search = GridSearchCV(estimator=ridge, param_grid={'alpha':np.logspace(-4,2,7)}, scoring = 'mean_squared_error',
n_jobs=1, refit=True, cv=stratified_cv_iterator)
search.fit(second_order.fit_transform(X), y)
print ('Best alpha: %0.5f' % search.best_params_['alpha'])
print ('Best CV mean squared error: %0.3f' % np.abs(search.best_score_))
search.grid_scores_
# Alternative: sklearn.linear_model.RidgeCV
from sklearn.linear_model import RidgeCV
auto_ridge = RidgeCV(alphas=np.logspace(-4,2,7), normalize=True, scoring = 'mean_squared_error', cv=None)
auto_ridge.fit(second_order.fit_transform(X), y)
print ('Best alpha: %0.5f' % auto_ridge.alpha_)
"""
Explanation: Grid search for optimal parameters
End of explanation
"""
from sklearn.grid_search import RandomizedSearchCV
from scipy.stats import expon
np.random.seed(101)
search_func=RandomizedSearchCV(estimator=ridge, param_distributions={'alpha':np.logspace(-4,2,100)}, n_iter=10,
scoring='mean_squared_error', n_jobs=1, iid=False, refit=True, cv=stratified_cv_iterator)
search_func.fit(second_order.fit_transform(X), y)
print ('Best alpha: %0.5f' % search_func.best_params_['alpha'])
print ('Best CV mean squared error: %0.3f' % np.abs(search_func.best_score_))
"""
Explanation: Random Search
End of explanation
"""
from sklearn.linear_model import Lasso
lasso = Lasso(alpha=1.0, normalize=True, max_iter=2*10**5)
#The following comment shows an example of L1 logistic regression
#lr_l1 = LogisticRegression(C=1.0, penalty='l1', tol=0.01)
from sklearn.grid_search import RandomizedSearchCV
from scipy.stats import expon
np.random.seed(101)
stratified_cv_iterator = StratifiedKFold(binning, n_folds=10, shuffle=True, random_state=101)
search_func=RandomizedSearchCV(estimator=lasso, param_distributions={'alpha':np.logspace(-5,2,100)}, n_iter=10,
scoring='mean_squared_error', n_jobs=1, iid=False, refit=True, cv=stratified_cv_iterator)
search_func.fit(second_order.fit_transform(X), y)
print ('Best alpha: %0.5f' % search_func.best_params_['alpha'])
print ('Best CV mean squared error: %0.3f' % np.abs(search_func.best_score_))
print ('Zero value coefficients: %i out of %i' % (np.sum(~(search_func.best_estimator_.coef_==0.0)),
len(search_func.best_estimator_.coef_)))
# Alternative: sklearn.linear_model.LassoCV
# Execution time: 54.9 s
from sklearn.linear_model import LassoCV
auto_lasso = LassoCV(alphas=np.logspace(-5,2,100), normalize=True, n_jobs=1, cv=None, max_iter=10**6)
auto_lasso.fit(second_order.fit_transform(X), y)
print ('Best alpha: %0.5f' % auto_lasso.alpha_)
"""
Explanation: Lasso
End of explanation
"""
# Execution time: 1min 3s
from sklearn.linear_model import ElasticNet
elasticnet = ElasticNet(alpha=1.0, l1_ratio=0.15, normalize=True, max_iter=10**6, random_state=101)
from sklearn.grid_search import RandomizedSearchCV
from scipy.stats import expon
np.random.seed(101)
search_func=RandomizedSearchCV(estimator=elasticnet, param_distributions={'alpha':np.logspace(-5,2,100),
'l1_ratio':np.arange(0.0, 1.01, 0.05)}, n_iter=10,
scoring='mean_squared_error', n_jobs=1, iid=False, refit=True, cv=stratified_cv_iterator)
search_func.fit(second_order.fit_transform(X), y)
print ('Best alpha: %0.5f' % search_func.best_params_['alpha'])
print ('Best l1_ratio: %0.5f' % search_func.best_params_['l1_ratio'])
print ('Best CV mean squared error: %0.3f' % np.abs(search_func.best_score_))
print ('Zero value coefficients: %i out of %i' % (np.sum(~(search_func.best_estimator_.coef_==0.0)),
len(search_func.best_estimator_.coef_)))
# Alternative: sklearn.linear_model.ElasticNetCV
from sklearn.linear_model import ElasticNetCV
auto_elastic = ElasticNetCV(alphas=np.logspace(-5,2,100), normalize=True, n_jobs=1, cv=None, max_iter=10**6)
auto_elastic.fit(second_order.fit_transform(X), y)
print ('Best alpha: %0.5f' % auto_elastic.alpha_)
print ('Best l1_ratio: %0.5f' % auto_elastic.l1_ratio_)
"""
Explanation: Elasticnet
End of explanation
"""
from sklearn.cross_validation import cross_val_score
from sklearn.linear_model import RandomizedLogisticRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
threshold = 0.03
stability_selection = RandomizedLogisticRegression(n_resampling=300, n_jobs=1, random_state=101, scaling=0.15,
sample_fraction=0.50, selection_threshold=threshold)
interactions = PolynomialFeatures(degree=4, interaction_only=True)
model = make_pipeline(stability_selection, interactions, logit)
model.fit(Xt,yt)
print ('Number of features picked by stability selection: %i' % np.sum(model.steps[0][1].all_scores_ >= threshold))
from sklearn.metrics import roc_auc_score
print ('Area Under the Curve: %0.3f' % roc_auc_score(yv,model.predict_proba(Xv)[:,1]))
"""
Explanation: Stability selection
End of explanation
"""
|
ferasz/LCCM | Example/LCCM code example.ipynb | bsd-3-clause | import lccm
import numpy as np
import pandas as pd
import pylogit
import warnings
from collections import OrderedDict
"""
Explanation: LCCM Code Walk-through Example
The following notebook demonstrates how this latent class choice model code works. We will be using an example dataset (Qualtrics data long format.csv) to perform model specification, estimation and sample enumeration. Our objective is to understand mode choice decisions across different latent classes.
The dataset being used entails a carsharing dataset used by a UC Berkeley student as part of his/her research. The dataset, called "Qualtrics data long format.csv" can be found in the github repository. More detail on the dataset could be found on the github repository as well (QualtricsData.pdf) in terms of the variables being used, etc.
The data set was obtained by surveying Bay Area private transit users to understand if users of these services have different modality styles (unobserved latent lifetstyles that dictate the set of modes they consider when making trips and mode choice decisions). Each respondent provided his travel diary for an entire day indicating travel time for the available modes, incurred travel cost, and several other information. Socio-demographic variables where also obtained via the survey.
The overall set of possible choices in this dataset was enumerated from 1 to 6 to denote "Auto", "Walk", "Bike", "Walk to Transit", "Drive to Transit", and "Private Transit" alternatives.
Troughout this example, we will be highlighting the various steps required to make the lccm package run.
Step 1: Import the LCCM Package and All other Necessary Packages
End of explanation
"""
# Load the data file
inputFilePath = 'C:/Users/Feras/Desktop/LCCM github exmaple/'
inputFileName = 'Qualtrics data long format.csv'
df = pd.read_csv(open(inputFilePath + inputFileName, 'rb'))
# Let's have a quick look at the long format data (first 5 rows)
df.head()
"""
Explanation: Step 2: Load the Dataset.
The lccm package handles data in long format (NOT wide format).
To see the difference between the two formats run the following lines of code, (make sure your excel files are in the same directory as your code file):
df_long = pd.read_csv('Qualtrics data long format.csv')
df_long.head()
df_wide = pd.read_excel('Qualtrics data wide format.xlsx')
df_wide.head()
If you do not want to convert the wide formatted data yourself, you can check the following link, which provides instructions regarding how one can convert his/her dataset from wide format to long format. https://github.com/timothyb0912/pylogit/blob/master/examples/notebooks/Main%20PyLogit%20Example.ipynb
End of explanation
"""
# Setting the number of classes in the model to 2
n_classes = 2
"""
Explanation: Step 3: Model Specification
3.1- Tips for running the latent class choice model:
1) Latent class models are mixture models and have several local maxima so their estimation is not straight forward.
2) Begin with a simple two class model by randomly intializing starting values of the parameters.
3) Run the model several times until you get reasonable estimates.
4) Store these parameter estimates and add another class to the model.
5) Use the parameter estimates stored earlier as starting values in the new model for the first two classes, and randomize the starting value of parameter estimates for the new latent class.
6) Repeat the steps(3,4, and 5) as you increase the number of latent classes in the model specification.
End of explanation
"""
# Create a dummy variable indicating whether a certain individual in the sample is male or female
df['male'] = (df['Gender']==1).astype(int)
# Create a categorical variable to indicate the income level of the household corresponding to each individual
# in the sample. We will define three categories: low income (income category < 6) , medium income
# (6 <= income category < 12) and high income (12 <= income category < 16).
df['low_income'] = (df['HHIncome']<6).astype(int)
df['medium_income'] = ((df['HHIncome']>=6)&(df['HHIncome']<12)).astype(int)
df['high_income'] = ((df['HHIncome']>=12)&(df['HHIncome']<16)).astype(int)
"""
Explanation: Modify variables, create dummy variables,and scale variables so the estimated coefficients are of similar magnitudes.
End of explanation
"""
# NOTE: Specification and variable names must be in list data structures.
# class_membership_spec defines the variables names to be used in the specification of the
# class membership model.
# class_membership_labels defines the names associated with each of the variables which
# will be displayed in the output table after estimation.
# NOTE: by specifying the word 'intercept' in the class_membership_spec, the code
# understands that this denotes an alternative specific constant.
# class membership model constrains the utility of the first class to zero by default.
# The same socio-demographic variables will be included in the class memeberhship
# model of each of the remaining classes (excluding the first class as it is the base).
class_membership_spec = ['intercept', 'car_ownership','low_income','high_income',
'male','distance']
class_membership_labels = ['Class-specific constant', 'Car ownership', 'Low Income','High Income',
'Male','Distance Traveled (miles)']
"""
Explanation: 3.2- Membership Model :
The specification of the class memeberhsip model used in this example is as follows:
$V_{class1} = 0 $
$V_{class2} = ASCClass2 + \beta_{CarOwnership, class2} * CarOwnerhsip $
$+ \beta_{LowIncome, class2} * LowIncome + \beta_{HighIncome, class2} * HighIncome $
$+ \beta_{Male, class2} * Male + \beta_{DistanceTraveled, class2} * DistanceTraveled $
End of explanation
"""
# Set the available alternatives for each latent class
# Each array entials the alternatives available in the choice set for each latent class
# NOTE: By default the code does not require the user to specify the choice set
# for each class. The code assumes that all alternatives are available in the choice
# set for each latent class.
# We are assuming that the bike alternative does not exist in the choice set for latent class 1
# We are assuming that all alternatives are available for latent class 2
avail_alts = (np.array([1,2,4,5,6]),
np.array([1,2,3,4,5,6]))
"""
Explanation: 3.3- Defining the choice set for each latent class
End of explanation
"""
# NOTE: Specification and variable names must be in lists of ordered dictionaries.
# class_specific_specs defines the variables names to be used in the specification of the
# class specific choice model of each class.
# class_specific_labels defines the names associated with each of the variables which
# will be displayed in the output tables after estimation.
# NOTE: by specifying the word 'intercept' in the class_specific_specs, the code
# understands that this denotes an alternative specific constant.
class_specific_specs = [OrderedDict([('intercept', [2,4,5,6]),
('travel_time', [[1,2,4,5,6]]),
('travel_cost', [[1,5],[4,6]])]),
OrderedDict([('intercept', [2,3,4,5,6]),
('travel_time', [[1,2,3,4,5,6]]),
('travel_cost', [[1,4,5,6]])])]
class_specific_labels = [OrderedDict([('ASC', ['ASC(Walk)',
'ASC(Walk to Transit)','ASC(Drive to Transit)',
'ASC(Private Transit)']),
('Travel Time',['Travel Time ']),
('Travel Cost',['Travel Cost Auto and Drive to Transit', 'Travel Cost WalktoTransit and PrivateTransit'])]),
OrderedDict([('ASC', ['ASC(Walk)','ASC(Bike)',
'ASC(Walk to Transit)','ASC(Drive to Transit)',
'ASC(Private Transit)']),
('Travel Time',['Travel Time']),
('Travel Cost',['Travel Cost'])])]
"""
Explanation: 3.4- Class-specific Choice Model:
You can specify your parameters as generic or alternative specific. The following example entails both types of specifications to help the modeler identify how to specify generic versus alternative specific parameters accordingly.
In this example, the intercepts are alternative specific and that is done by using only one bracket i.e:'intercept', [2,3,4,5,6]. The first alternative will be the base alternative and hence no intercept is allocated in its utility.
Note that we will be constraining the choice set also for latent class 1 whereby for this class we have the following specification: 'intercept', [2,4,5,6]. The bike alternative does not belong in the choice set for this class and hence no parameters will be estimated including the ASC.
Travel time parameters across all alternatives for both latent classes are generic. This is done by using two brackets i.e: 'travel_time', [[1,2,3,4,5,6]] according to the specification below. Note that for latent class 1, we drop travel time from alternative 3 (bike) as that alternative does not exist in the choice set.
Travel cost parameter is constrained to be the same for the auto and drive to transit alternatives for latent class 1. Also, the travel cost parameter is constrained to be the same for the remaining alternatives in latent class 1. Such a specification is done according to the following script: 'travel_cost', [[1,5],[4,6]] based on the specification below.
Travel cost parameter is generic for all alternatives for latent class 2.
The specification of the class specific choice model used in this example is as follows:
Latent Class 1:
$V_{auto} = \beta_{tt, class1} * TravelTime_{auto} + \beta_{cost_Auto-DrivetoTransit, class1} * TravelCost_{auto} $
$V_{walk} = ASCWalk_{class1} + \beta_{tt, class1} * TravelTime_{walk}$
$V_{WalkToTransit} = ASCWalkToTransit_{class1} + \beta_{tt, class1} * TravelTime_{walktotransit} + \beta_{cost_WalktoTransit-PrivateTransit, class1} * TravelCost_{walktotransit} $
$V_{DriveToTransit} = ASCDriveToTransit_{class1} + \beta_{tt, class1} * TravelTime_{drivetotransit} +
\beta_{cost_Auto-DrivetoTransit, class1} * TravelCost_{drivetotransit} $
$V_{PrivateTransit} = ASCPrivateTransit_{class1} + \beta_{tt, class1} * TravelTime_{privatetransit} + \beta_{cost_WalktoTransit-PrivateTransit, class1} * TravelCost_{privatetransit} $
Latent Class 2:
$V_{auto} = \beta_{tt, class2} * TravelTime_{auto} + \beta_{cost, class2} * TravelCost_{auto} $
$V_{walk} = ASCWalk_{class2} + \beta_{tt, class2} * TravelTime_{walk}$
$V_{bike} = ASCBike_{class2} + \beta_{tt, class2} * TravelTime_{bike}$
$V_{WalkToTransit} = ASCWalkToTransit_{class2} + \beta_{tt, class2} * TravelTime_{walktotransit} + \beta_{cost, class2} * TravelCost_{walktotransit} $
$V_{DriveToTransit} = ASCDriveToTransit_{class2} + \beta_{tt, class2} * TravelTime_{drivetotransit} + \beta_{cost, class2} * TravelCost_{drivetotransit} $
$V_{PrivateTransit} = ASCPrivateTransit_{class2} + \beta_{tt, class2} * TravelTime_{privatetransit} + \beta_{cost, class2} * TravelCost_{privatetransit} $
End of explanation
"""
# Specify starting values for model parameters. Again this is optional and the modeler does
# not have to do so for estimation.
# This section can be completely skipped.
# Class membership model parameters
paramClassMem = np.array([0,0,0,0,0,0])
# Class specific choice model parameters
paramClassSpec = []
for s in range(0, n_classes):
paramClassSpec.append(np.array([-2.14036027,-2.60680512,-2.86731413,-2.65139932,
-0.0000189449556,-0.0489097045,-0.0489097045]))
paramClassSpec.append(np.array([1.353,-1.1648,1.0812,-1.9214,1.3328,
-1.2960,-0.0796]))
"""
Explanation: Step 4: Accounting for Choice-based Sampling
The code by default assumes a non-choice-based sampling method and hence all individual weights are assumed to be equal to one. However, if the sample is choice-based, then the modeler can account for this by incorporating individual weights for the log-likelihoods.
The user needs to specify a 1D numpy array of size that is equal to sample size.
Each element accounts for the associated weight for each individual in the data file to cater for the choice based sampling scheme, building off Ben-Akiva and Lerman (1983).
Step 5: Starting Values for Parameter Estimates
By default the code does not require the user to specifcy starting values for parameters for both the class membership and class specific choice models. The code will generate random starting values automatically.
However, since this is a non-convex optimization problem with multiple local maxima, starting values for parameter estimates are most likely needed as the number of latent classes increases.
End of explanation
"""
# Fit the model
# In order to better understand the various variables that are needed as input
# in the lccm_fit function, the user is encouraged to use the following command
# help(lccm.lccm_fit), which will identify the required input variables in the
# lccm_fit function below.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
lccm.lccm_fit(data = df,
ind_id_col = 'ID',
obs_id_col = 'custom_id',
alt_id_col = 'mode_id',
choice_col = 'choice',
n_classes = n_classes,
class_membership_spec = class_membership_spec,
class_membership_labels = class_membership_labels,
class_specific_specs = class_specific_specs,
class_specific_labels = class_specific_labels,
avail_alts = avail_alts,
#indWeights = indWeights,
outputFilePath = inputFilePath,
paramClassMem = paramClassMem,
paramClassSpec = paramClassSpec)
"""
Explanation: Step 6: Estimation of Latent Class Choice Model and Output Table
Estimation of the latent class choice model happens here via this chunk of code that incorporates the specification needed, starting values for parameter estiamtes if needed, choice set for each class if needed, choice-based sampling weights if needed.
Following that, the model outputs parameter estimates for the class membership and class-specific choice models in addition to the standard errors, t-stats and p-values. Statistical measures of fit, rho bar squared, AIC, BIC, fitted log-likelihood and other meaures are computed and displayed as well.
End of explanation
"""
|
garibaldu/multicauseRBM | Max/RBM-ORBM-Single-Models.ipynb | mit | from scipy.special import expit
from rbmpy.rbm import RBM
from rbmpy.sampler import VanillaSampler, PartitionedSampler, ApproximatedSampler, LayerWiseApproxSampler,ApproximatedMulDimSampler, ContinuousSampler
from rbmpy.trainer import VanillaTrainier
from rbmpy.performance import Result
import numpy as np
import rbmpy.datasets, rbmpy.performance, rbmpy.plotter, rbmpy.mnist, pickle, rbmpy.rbm, os, logging, rbmpy.sampler,math
from sklearn.linear_model import Perceptron
from sklearn.neural_network import BernoulliRBM
import rbmpy.plotter as pp
from numpy import newaxis
from collections import Counter
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
logger = logging.getLogger()
# Set the logging level to logging.DEBUG
logger.setLevel(logging.INFO)
%matplotlib inline
from IPython.core.debugger import Tracer; debug_here = Tracer()
# Helper Methods
def squash_images(imgs):
squashed = np.array(imgs)
old_shape = squashed.shape
squashed = squashed.reshape(old_shape[0], old_shape[1] * old_shape[2])
return squashed
def inflate_images(imgs):
inflated = np.array(imgs)
old_shape = inflated.shape
size= math.sqrt(old_shape[1])
inflated = inflated.reshape(old_shape[0], size, size)
return inflated
def gen_square(xy,sq_shape, img_size):
"""Square image starting at i, of sq_size within img_size. i must be < (sq_size + img_size)"""
img = np.zeros(img_size)
x = xy[0]
y = xy[1]
x2 = x + sq_shape[0]
y2 = y + sq_shape[1]
img[x:x2,y:y2] = 1
return img
def gen_training(sq_shape, img_size):
if img_size[0] != img_size[1]:
logger.warn("Unsquashing will not work with none squares yet!")
training = []
for x in range(img_size[0]- 1):
for y in range(img_size[1]-1):
training.append(gen_square((x,y), sq_shape, img_size))
return np.array(training)
def ll_score(v, v_prime):
if v == 1:
return np.log(v_prime)
elif v == 0:
return np.log(1 - v_prime)
else:
raise NotImplementedError()
ll_score = np.vectorize(ll_score)
def evaluate_model(training, model):
s = VanillaSampler(model)
results = []
avg = 0
for i in range(5000):
results.append(ll_score(squash_images(train),s.reconstruction_given_visible(squash_images(train), return_sigmoid=True)).sum())
avg = avg/i
npr = np.array(results)
return npr
# return np.median(npr,axis=0), np.min(npr, axis = 0), np.max(npr,axis = 0), np.mean(npr,axis = 0)
def plot_eval(train,model):
# look at the reconstructions
dreams = []
for i in range(16):
dreams.append(s.dream(model).reshape(5,5))
pp.images(np.array(dreams))
# Lets also look at it's weights
pp.images(rbmpy.rbm.weights_into_hiddens(model.weights)[:10], cmap='Greys',title= "Hinton Diagrams",filename="Results/Weights.png")
result = evaluate_model(train,model)
plt.plot(result)
plt.show()
print("mean{:.2f} Worst {:.2f} Best {:.2f}".format( np.mean(result), np.min(result), np.max(result)))
pp.images(inflate_images(squash_images(train) - s.reconstruction_given_visible(squash_images(train))))
train = gen_training((2,2),(5,5))
# np.random.shuffle(train)
pp.images(train, title="Training Set", filename="Results/Training.png")
"""
Explanation: 2 Dimension Square Separation
In this notebook I was able to separate two overlapping squares
Log Likelihood
We want to look for the log likelihood of producing the dataset $ \mathcal{D} $ for a given image from dreams of the image. Wait the dreams? Or should I look at the reconstructions? For looking at the RBM I should be able to get away with a reconstruction
$$ LL_{\mathcal{D}} = \sum_{i} v_i \log( \sigma_i) + (1 - v_i) \log(1 - \sigma_i) $$
$$
\log P\big(v\big|h_{a}\big) = \begin{cases}
\log( \sigma_i) & \text{if $v_i=1$}\
\log(1 - \sigma_i) & \text{if $v_i = 0$}
\end{cases}
$$
End of explanation
"""
model = RBM(25,25,16)
s = VanillaSampler(model)
t = VanillaTrainier(model, s)
t.train(200, squash_images(train), learning_rate=0.05, use_visible_bias = False)
# plot the 16 centers
plot_eval(train, model)
"""
Explanation: Train and Evaluate the Traditional Model
End of explanation
"""
help(s.dream)
s = VanillaSampler(model)
dream1 = s.dream(model, num_gibbs = 500)
dream2 = s.dream(model, num_gibbs = 500)
phi_1 = np.dot(s.visible_to_hidden(dream1), model.weights)
phi_2 = np.dot(s.visible_to_hidden(dream2), model.weights)
pp.image(expit(phi_1).reshape(5,5))
pp.image(expit(phi_2).reshape(5,5))
# pp.image((expit(phi_1) + expit(phi_2)).reshape(5,5))
comp = expit(phi_1 + phi_2)
pp.image(comp.reshape(5,5))
orbm_sampler = ApproximatedSampler(model.weights,model.weights,model.hidden_bias, model.hidden_bias)
rand_h = np.random.randint(0,2,size=( model.num_hid()))
left, right = orbm_sampler.v_to_v(rand_h,rand_h, comp)
plt.suptitle("ORBM")
pp.image(left.reshape(5,5))
pp.image(right.reshape(5,5))
rbm_sampler = VanillaSampler(model)
plt.suptitle("RBM")
pp.image(rbm_sampler.reconstruction_given_visible(comp).reshape(5,5))
a = ApproximatedMulDimSampler(model.weights,model.weights, model.hidden_bias,model.hidden_bias)
data = model.visible.copy()
np.random.shuffle(data)
item_one = inflate_images(data)[0]
item_two = inflate_images(data)[1]
composite_v = np.maximum(item_one,item_two )
pp.image(item_one+ item_two,cmap='Paired',show_colorbar=False)
rand_h = np.random.randint(0,2,10)
approx= ApproximatedSampler(model.weights, model.weights, model.hidden_bias, model.hidden_bias)
reconstruction = approx.v_to_v(rand_h,rand_h, composite_v.reshape(25),num_gibbs=500)
pp.image(reconstruction[0].reshape(5,5),show_colorbar=False, title="V'_a")
pp.image(reconstruction[1].reshape(5,5), show_colorbar=False, title = "V'_b" )
pp.image(reconstruction[0].reshape(5,5) + reconstruction[1].reshape(5,5),title="Composite Recon" ,cmap ='Paired',show_colorbar=False)
pp.image(s.reconstruction_given_visible(composite_v.reshape(25)).reshape(5,5),show_colorbar=False)
"""
Explanation: Make the sampler and random Composite
End of explanation
"""
def gen_composite_training(sq_shape, img_size, static_xy):
training = []
for x in range(img_size[0]-1):
for y in range(img_size[1]-1):
training.append(np.maximum(gen_square((x,y), sq_shape, img_size),gen_square(static_xy, sq_shape, img_size)))
return np.array(training)
comp = gen_composite_training((2,2),(5,5),(1,1))
pp.images(comp)
rand_h = np.random.randint(0,2,35)
approx= ApproximatedSampler(model.weights, model.weights, model.hidden_bias, model.hidden_bias)
for current_img in comp:
reconstruction = approx.v_to_v(rand_h,rand_h,current_img.reshape(25),num_gibbs=1000)
pp.images(np.array([current_img,reconstruction[0].reshape(5,5), reconstruction[1].reshape(5,5), s.reconstruction_given_visible(current_img.reshape(25)).reshape(5,5)]))
"""
Explanation: Make a composite training set
End of explanation
"""
|
mne-tools/mne-tools.github.io | stable/_downloads/e41b6a898e7a75f8a9f1a6c00ca73857/20_visualize_epochs.ipynb | bsd-3-clause | import os
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False).crop(tmax=120)
"""
Explanation: Visualizing epoched data
This tutorial shows how to plot epoched data as time series, how to plot the
spectral density of epoched data, how to plot epochs as an imagemap, and how to
plot the sensor locations and projectors stored in ~mne.Epochs objects.
We'll start by importing the modules we need, loading the continuous (raw)
sample data, and cropping it to save memory:
End of explanation
"""
events = mne.find_events(raw, stim_channel='STI 014')
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'face': 5, 'button': 32}
epochs = mne.Epochs(raw, events, tmin=-0.2, tmax=0.5, event_id=event_dict,
preload=True)
del raw
"""
Explanation: To create the ~mne.Epochs data structure, we'll extract the event
IDs stored in the :term:stim channel, map those integer event IDs to more
descriptive condition labels using an event dictionary, and pass those to the
~mne.Epochs constructor, along with the ~mne.io.Raw data and the
desired temporal limits of our epochs, tmin and tmax (for a
detailed explanation of these steps, see tut-epochs-class).
End of explanation
"""
catch_trials_and_buttonpresses = mne.pick_events(events, include=[5, 32])
epochs['face'].plot(events=catch_trials_and_buttonpresses, event_id=event_dict,
event_color=dict(button='red', face='blue'))
"""
Explanation: Plotting Epochs as time series
.. sidebar:: Interactivity in pipelines and scripts
To use the interactive features of the `~mne.Epochs.plot` method
when running your code non-interactively, pass the ``block=True``
parameter, which halts the Python interpreter until the figure window is
closed. That way, any channels or epochs that you mark as "bad" will be
taken into account in subsequent processing steps.
To visualize epoched data as time series (one time series per channel), the
mne.Epochs.plot method is available. It creates an interactive window
where you can scroll through epochs and channels, enable/disable any
unapplied :term:SSP projectors <projector> to see how they affect the
signal, and even manually mark bad channels (by clicking the channel name) or
bad epochs (by clicking the data) for later dropping. Channels marked "bad"
will be shown in light grey color and will be added to
epochs.info['bads']; epochs marked as bad will be indicated as 'USER'
in epochs.drop_log.
Here we'll plot only the "catch" trials from the sample dataset
<sample-dataset>, and pass in our events array so that the button press
responses also get marked (we'll plot them in red, and plot the "face" events
defining time zero for each epoch in blue). We also need to pass in
our event_dict so that the ~mne.Epochs.plot method will know what
we mean by "button" — this is because subsetting the conditions by
calling epochs['face'] automatically purges the dropped entries from
epochs.event_id:
End of explanation
"""
epochs['face'].plot(events=catch_trials_and_buttonpresses, event_id=event_dict,
event_color=dict(button='red', face='blue'),
group_by='selection', butterfly=True)
"""
Explanation: To see all sensors at once, we can use butterfly mode and group by selection:
End of explanation
"""
ecg_proj_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_ecg-proj.fif')
ecg_projs = mne.read_proj(ecg_proj_file)
epochs.add_proj(ecg_projs)
epochs.apply_proj()
"""
Explanation: Plotting projectors from an Epochs object
In the plot above we can see heartbeat artifacts in the magnetometer
channels, so before we continue let's load ECG projectors from disk and apply
them to the data:
End of explanation
"""
epochs.plot_projs_topomap(vlim='joint')
"""
Explanation: Just as we saw in the tut-section-raw-plot-proj section, we can plot
the projectors present in an ~mne.Epochs object using the same
~mne.Epochs.plot_projs_topomap method. Since the original three
empty-room magnetometer projectors were inherited from the
~mne.io.Raw file, and we added two ECG projectors for each sensor
type, we should see nine projector topomaps:
End of explanation
"""
print(all(proj['active'] for proj in epochs.info['projs']))
"""
Explanation: Note that these field maps illustrate aspects of the signal that have
already been removed (because projectors in ~mne.io.Raw data are
applied by default when epoching, and because we called
~mne.Epochs.apply_proj after adding additional ECG projectors from
file). You can check this by examining the 'active' field of the
projectors:
End of explanation
"""
epochs.plot_sensors(kind='3d', ch_type='all')
epochs.plot_sensors(kind='topomap', ch_type='all')
"""
Explanation: Plotting sensor locations
Just like ~mne.io.Raw objects, ~mne.Epochs objects
keep track of sensor locations, which can be visualized with the
~mne.Epochs.plot_sensors method:
End of explanation
"""
epochs['auditory'].plot_psd(picks='eeg')
"""
Explanation: Plotting the power spectrum of Epochs
Again, just like ~mne.io.Raw objects, ~mne.Epochs objects
have a ~mne.Epochs.plot_psd method for plotting the spectral
density_ of the data.
End of explanation
"""
epochs['visual/right'].plot_psd_topomap()
"""
Explanation: It is also possible to plot spectral estimates across sensors as a scalp
topography, using ~mne.Epochs.plot_psd_topomap. The default parameters will
plot five frequency bands (δ, θ, α, β, γ), will compute power based on
magnetometer channels, and will plot the power estimates in decibels:
End of explanation
"""
bands = [(10, '10 Hz'), (15, '15 Hz'), (20, '20 Hz'), (10, 20, '10-20 Hz')]
epochs['visual/right'].plot_psd_topomap(bands=bands, vlim='joint',
ch_type='grad')
"""
Explanation: Just like ~mne.Epochs.plot_projs_topomap,
~mne.Epochs.plot_psd_topomap has a vlim='joint' option for fixing
the colorbar limits jointly across all subplots, to give a better sense of
the relative magnitude in each frequency band. You can change which channel
type is used via the ch_type parameter, and if you want to view
different frequency bands than the defaults, the bands parameter takes a
list of tuples, with each tuple containing either a single frequency and a
subplot title, or lower/upper frequency limits and a subplot title:
End of explanation
"""
epochs['auditory'].plot_image(picks='mag', combine='mean')
"""
Explanation: If you prefer untransformed power estimates, you can pass dB=False. It is
also possible to normalize the power estimates by dividing by the total power
across all frequencies, by passing normalize=True. See the docstring of
~mne.Epochs.plot_psd_topomap for details.
Plotting Epochs as an image map
A convenient way to visualize many epochs simultaneously is to plot them as
an image map, with each row of pixels in the image representing a single
epoch, the horizontal axis representing time, and each pixel's color
representing the signal value at that time sample for that epoch. Of course,
this requires either a separate image map for each channel, or some way of
combining information across channels. The latter is possible using the
~mne.Epochs.plot_image method; the former can be achieved with the
~mne.Epochs.plot_image method (one channel at a time) or with the
~mne.Epochs.plot_topo_image method (all sensors at once).
By default, the image map generated by ~mne.Epochs.plot_image will be
accompanied by a scalebar indicating the range of the colormap, and a time
series showing the average signal across epochs and a bootstrapped 95%
confidence band around the mean. ~mne.Epochs.plot_image is a highly
customizable method with many parameters, including customization of the
auxiliary colorbar and averaged time series subplots. See the docstrings of
~mne.Epochs.plot_image and mne.viz.plot_compare_evokeds (which is
used to plot the average time series) for full details. Here we'll show the
mean across magnetometers for all epochs with an auditory stimulus:
End of explanation
"""
epochs['auditory'].plot_image(picks=['MEG 0242', 'MEG 0243'])
epochs['auditory'].plot_image(picks=['MEG 0242', 'MEG 0243'], combine='gfp')
"""
Explanation: To plot image maps for individual sensors or a small group of sensors, use
the picks parameter. Passing combine=None (the default) will yield
separate plots for each sensor in picks; passing combine='gfp' will
plot the global field power (useful for combining sensors that respond with
opposite polarity).
End of explanation
"""
reject_criteria = dict(mag=3000e-15, # 3000 fT
grad=3000e-13, # 3000 fT/cm
eeg=150e-6) # 150 µV
epochs.drop_bad(reject=reject_criteria)
for ch_type, title in dict(mag='Magnetometers', grad='Gradiometers').items():
layout = mne.channels.find_layout(epochs.info, ch_type=ch_type)
epochs['auditory/left'].plot_topo_image(layout=layout, fig_facecolor='w',
font_color='k', title=title)
"""
Explanation: To plot an image map for all sensors, use
~mne.Epochs.plot_topo_image, which is optimized for plotting a large
number of image maps simultaneously, and (in interactive sessions) allows you
to click on each small image map to pop open a separate figure with the
full-sized image plot (as if you had called ~mne.Epochs.plot_image on
just that sensor). At the small scale shown in this tutorial it's hard to see
much useful detail in these plots; it's often best when plotting
interactively to maximize the topo image plots to fullscreen. The default is
a figure with black background, so here we specify a white background and
black foreground text. By default ~mne.Epochs.plot_topo_image will
show magnetometers and gradiometers on the same plot (and hence not show a
colorbar, since the sensors are on different scales) so we'll also pass a
~mne.channels.Layout restricting each plot to one channel type.
First, however, we'll also drop any epochs that have unusually high signal
levels, because they can cause the colormap limits to be too extreme and
therefore mask smaller signal fluctuations of interest.
End of explanation
"""
layout = mne.channels.find_layout(epochs.info, ch_type='eeg')
epochs['auditory/left'].plot_topo_image(layout=layout, fig_facecolor='w',
font_color='k', sigma=1)
"""
Explanation: To plot image maps for all EEG sensors, pass an EEG layout as the layout
parameter of ~mne.Epochs.plot_topo_image. Note also here the use of
the sigma parameter, which smooths each image map along the vertical
dimension (across epochs) which can make it easier to see patterns across the
small image maps (by smearing noisy epochs onto their neighbors, while
reinforcing parts of the image where adjacent epochs are similar). However,
sigma can also disguise epochs that have persistent extreme values and
maybe should have been excluded, so it should be used with caution.
End of explanation
"""
|
keskarnitish/Recipes | examples/ImageNet Pretrained Network (VGG_S).ipynb | mit | !wget https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/vgg_cnn_s.pkl
"""
Explanation: Introduction
This example demonstrates using a network pretrained on ImageNet for classification. The model used was converted from the VGG_CNN_S model (http://arxiv.org/abs/1405.3531) in Caffe's Model Zoo.
For details of the conversion process, see the example notebook "Using a Caffe Pretrained Network - CIFAR10".
License
The model is licensed for non-commercial use only
Download the model (393 MB)
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import lasagne
from lasagne.layers import InputLayer, DenseLayer, DropoutLayer
from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
from lasagne.layers import MaxPool2DLayer as PoolLayer
from lasagne.layers import LocalResponseNormalization2DLayer as NormLayer
from lasagne.utils import floatX
"""
Explanation: Setup
End of explanation
"""
net = {}
net['input'] = InputLayer((None, 3, 224, 224))
net['conv1'] = ConvLayer(net['input'], num_filters=96, filter_size=7, stride=2, flip_filters=False)
net['norm1'] = NormLayer(net['conv1'], alpha=0.0001) # caffe has alpha = alpha * pool_size
net['pool1'] = PoolLayer(net['norm1'], pool_size=3, stride=3, ignore_border=False)
net['conv2'] = ConvLayer(net['pool1'], num_filters=256, filter_size=5, flip_filters=False)
net['pool2'] = PoolLayer(net['conv2'], pool_size=2, stride=2, ignore_border=False)
net['conv3'] = ConvLayer(net['pool2'], num_filters=512, filter_size=3, pad=1, flip_filters=False)
net['conv4'] = ConvLayer(net['conv3'], num_filters=512, filter_size=3, pad=1, flip_filters=False)
net['conv5'] = ConvLayer(net['conv4'], num_filters=512, filter_size=3, pad=1, flip_filters=False)
net['pool5'] = PoolLayer(net['conv5'], pool_size=3, stride=3, ignore_border=False)
net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
net['drop6'] = DropoutLayer(net['fc6'], p=0.5)
net['fc7'] = DenseLayer(net['drop6'], num_units=4096)
net['drop7'] = DropoutLayer(net['fc7'], p=0.5)
net['fc8'] = DenseLayer(net['drop7'], num_units=1000, nonlinearity=lasagne.nonlinearities.softmax)
output_layer = net['fc8']
"""
Explanation: Define the network
End of explanation
"""
import pickle
model = pickle.load(open('vgg_cnn_s.pkl'))
CLASSES = model['synset words']
MEAN_IMAGE = model['mean image']
lasagne.layers.set_all_param_values(output_layer, model['values'])
"""
Explanation: Load the model parameters and metadata
End of explanation
"""
import urllib
index = urllib.urlopen('http://www.image-net.org/challenges/LSVRC/2012/ori_urls/indexval.html').read()
image_urls = index.split('<br>')
np.random.seed(23)
np.random.shuffle(image_urls)
image_urls = image_urls[:5]
"""
Explanation: Trying it out
Get some test images
We'll download the ILSVRC2012 validation URLs and pick a few at random
End of explanation
"""
import io
import skimage.transform
def prep_image(url):
ext = url.split('.')[-1]
im = plt.imread(io.BytesIO(urllib.urlopen(url).read()), ext)
# Resize so smallest dim = 256, preserving aspect ratio
h, w, _ = im.shape
if h < w:
im = skimage.transform.resize(im, (256, w*256/h), preserve_range=True)
else:
im = skimage.transform.resize(im, (h*256/w, 256), preserve_range=True)
# Central crop to 224x224
h, w, _ = im.shape
im = im[h//2-112:h//2+112, w//2-112:w//2+112]
rawim = np.copy(im).astype('uint8')
# Shuffle axes to c01
im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)
# Convert to BGR
im = im[::-1, :, :]
im = im - MEAN_IMAGE
return rawim, floatX(im[np.newaxis])
"""
Explanation: Helper to fetch and preprocess images
End of explanation
"""
for url in image_urls:
try:
rawim, im = prep_image(url)
prob = np.array(lasagne.layers.get_output(output_layer, im, deterministic=True).eval())
top5 = np.argsort(prob[0])[-1:-6:-1]
plt.figure()
plt.imshow(rawim.astype('uint8'))
plt.axis('off')
for n, label in enumerate(top5):
plt.text(250, 70 + n * 20, '{}. {}'.format(n+1, CLASSES[label]), fontsize=14)
except IOError:
print('bad url: ' + url)
"""
Explanation: Process test images and print top 5 predicted labels
End of explanation
"""
|
ConnectedSystems/veneer-py | doc/training/5_Running_Iteratively.ipynb | isc | import veneer
v = veneer.Veneer(port=9876)
"""
Explanation: Session 5: Running Iteratively
Running Source models from Python becomes more compelling when you start running the model multiple times, modifying something (parameters, inputs, structure) about the model between runs.
At the same time, the number of possible actions expands, and in many cases there are multiple possible ways to achieve a particular change.
This session explores iterative runs and some of the ways to make changes to the Source model.
In this session, the changes will be 'non-structural' - which is a loose term, but here it means that we won't be adding or removing nodes, links or catchments and we won't be changing model types within the network.
The types of non-structural changes we will cover include:
Iterating over input sets
Modifying functions
Modifying time series, piecewise linear functions, etc
Modifying input sets
Modifying variables directly
Overview
Organising batch runs
Hard code in the notebook
Externalise into a separate file?
Move logic to Python modules?
Batch Runs
Over pre-defined input sets
Modifying functions
Modifying time series, piecewise linear functions, etc
Modifying input sets
Modifying variables directly
Querying and summarising results
Aside: Organising Batch Runs
The functionality covered in this session is enough to create some quite sophisticated modelling workflows.
It is worth considering how you organise such workflows.
Notebooks can be useful and important documentation regarding your modelling. The notebooks are a way to make your actions reproducible, both by others in the future and on other Source models.
When moving to more sophisticated notebooks, it is worth considering the following issues:
Where will you store data that is related to the model runs - such as the set of parameter values you iterate over, and
How much of the logic will you keep in the notebook - is it worth moving logic out into separate .py modules that can be used by multiple notebooks.
In order to be standalone, these tutorial notebooks tend to combine all the data and logic into the notebook itself, but this isn't desirable. In some cases, it is worth extracting the data into files so that the notebook can be run in other circumstances with different data. In other cases, there is enough custom logic in the workflow that this logic should be in Python modules in order to facilitate reuse.
We'll mostly take the easy route in these tutorials - and keep it all the notebook.
Which Model?
Note: This session uses ExampleProject/RiverModel2.rsproj. You are welcome to work with your own model instead, however you will need to change the notebook text at certain points to reflect the names of nodes, links and functions in your model file.
Iterating over Input Sets
Perhaps the simplest approach to iterating with Python and Source is to the run the model for a series of Scenario Input Sets, where you've separately defined those input sets - eg manually.
There are two main ways to use input sets in batch runs:
Specify an input set for each simulation when calling v.run_model. This instructs Source to use that Input Set include all data sources configured for that Input Set.
Call v.apply_input_set before v.run_model. v.apply_input_set will only execute the parameter 'instructions' in the input set.
Option 2 can be useful where you have numerous input sets that need to be combined in various ways for different scenarios.
Here, we will demonstrate option 1 (specify in v.run_model()) and then illustrate option 2 with a hypothetical example (which has a basis in reality!).
As always, initialise the Veneer client
As always, you need to have the veneer package imported and a Veneer client object created
End of explanation
"""
input_sets = v.input_sets()
input_sets
input_sets.as_dataframe()
"""
Explanation: Finding the input sets in the model
We can find the input sets in the current model with v.input_sets()
End of explanation
"""
things_to_record=[
{'NetworkElement':'Lower Gauge','RecordingVariable':'Downstream Flow Volume'},
{'NetworkElement':'Crop Fields'},
{'NetworkElement':'Recreational Lake','RecordingElement':'StorageVolume'}
]
v.configure_recording(enable=things_to_record)
"""
Explanation: Note: When looking at the details of the input set from Python, you only see the instructions (under Configuration). This is because the instructions are the only thing that you can easily modify from Python at this stage. Everything else is there in Source and will be used when you run the model.
We can iterate over the input_sets list using a Python for loop, but first, we should establish a way to track the results we are interested in.
The input sets in the sample model simply change the maximum extraction rate at the supply point. The effect of this should be visible at a few points in the model - in terms of storage releases, extractions, shortfalls on the demand and flow out the bottom of the system. Lets ensure we are recording some of those
End of explanation
"""
all_results = {}
"""
Explanation: We now want to iterate over the input sets, running the model each time, and retrieving some results for each run.
We can either:
do all the runs and retrieve all the results after all the runs, OR
do one run at a time and retrieve the relevant results.
Option 2 allows us to drop each run as we go (which can be useful to save memory on big models).
Either way, we need to track information as we go - with option 1 we need to know which run relates to each input set.
We will use a Python dictionary to let us track input set -> results time series
End of explanation
"""
for i in [0,1,2,3,4]:
print(i)
"""
Explanation: A python for loop takes the form
python
for element in loop_range:
do_something_typically_using(element)
where loop_range is the thing you're looping over and is typically a list or similar. (It needs to be 'iterable')
For example:
End of explanation
"""
for i in range(10):
print(i)
"""
Explanation: There are lots of ways to support loop writing, such as the range(n) function, which returns an iterable object that goes from 0 to n-1:
End of explanation
"""
import pandas as pd
for input_set in input_sets:
set_name = input_set['Name']
# Log what's happening
veneer.log('Running ' + set_name)
# Run the model with the current input set
v.run_model(SelectedInputSet=set_name)
# Retrieve the run index so we can pass it to v.retrieve_multiple_time_series
run_index = v.retrieve_run()
# Now, retrieve the results we want
end_of_system_flow = v.retrieve_multiple_time_series(run_data=run_index,criteria={
'NetworkElement':'Lower Gauge','RecordingVariable':'Downstream Flow Volume'
})
crop_time_series = v.retrieve_multiple_time_series(run_data=run_index,criteria={
'NetworkElement':'Crop Fields','RecordingVariable':'.*@Demand Model@.*'
})
all_results[set_name] = pd.merge(end_of_system_flow,crop_time_series,left_index=True,right_index=True)
"""
Explanation: We will loop over the input_sets list, where each item in the list is a Python dictionary. Those dictionaries contain the 'Name' key, which we need to pass to Source
We need some help from the pandas core library for this one (to combine DataFrames from two calls to v.retrieve_multiple_time_series), so we'll import it here
End of explanation
"""
all_results
"""
Explanation: Now that that's run, lets look at the results
The default view won't be that friendly...
End of explanation
"""
all_results['Default Input Set'][0:10]
"""
Explanation: but we can pick the results for a given input set.
End of explanation
"""
%pylab inline
import matplotlib.pyplot as plt
for input_set in all_results:
all_results[input_set]['Lower Gauge:Downstream Flow Volume'].plot(label=input_set)
plt.legend()
"""
Explanation: Note how the results from two calls to v.retrieve_multiple_time_series have been combined into the one DataFrame
Lets enable charting now so we can plot them
(This time, we explicitly import matplotlib.pyplot in order to get the legend function)
End of explanation
"""
delta = all_results['Default Input Set']-all_results['Unrestricted Take']
delta
delta['Lower Gauge:Downstream Flow Volume'].plot()
"""
Explanation: There's obviously no visible difference there...
Lets compute the difference between the two runs and see if we can find deltas
End of explanation
"""
functions = v.functions()
f_df = functions.as_dataframe()
f_df
"""
Explanation: So there is a difference, albeit small c.f. the overall system flow.
Assembling scenarios from multiple input sets
In a recent project, there was a requirement to run 48 simulations. This was made up of 12 development scenarios x 4 climate scenarios.
The scenarios were described as input sets:
4 input sets each described one of the climate scenarios
8 input sets described the 12 scenarios, by combining the 8 input sets in different ways and using v.apply_input_sets in a particular order.
The scenario runs were organised as follows:
Each scenario was described as a list of input sets that needed to be applied, including the order
The different scenarios were grouped together in a dictionary with the overall name of the scenario:
python
scenarios={
'Baseline':['Reset Input Set'],
'Development Case 1':['Reset Input Set','Increase Crop Areas'],
'Development Case 2':['Reset Input Set','Increase Crop Areas','Increase Storage Volumes'],
}
While the climate input sets were identifed by the input set name:
python
climate_sets=['Historic','Low Rainfall','Moderate Rainfall','High Rainfall']
The model runs were then handled with a nested loop:
```python
for scenario_name,input_sets in scenarios.items:
# Run the input sets required by this scenario
for scenario_input_set in input_sets:
v.apply_input_set(scenario_input_set)
# Run the model in this configuration, once for each climate input set
for climate_set in climate_sets:
v.run_model(SelectedInputSet=climate_set)
```
Note: Source will remember the last input set used as the SelectedInputSet. If you want to use a different input set for the next run, you will need to explicitly specify it on the next call to v.run_model()
Aside: Changes (usually) persist!
In the above examples, and in most (but not all!) of what follows, when you specify a change for a particular run, that change will persist - ie it will still be there in future runs unless you change things back.
In some cases, the changes will get saved in the model file.
There is no generic 'undo' functionality in Veneer or veneer-py, so you need to consider how you reset any changes that you don't want to keep. For example:
Just don't save the model if you don't want the changes to be permanent. In many cases this is the simplest approach - simply write the script to take a working model, run some analyses and then close the software.
Query values before modifying them. This way, you can save the initial setup in a variable in Python and use it to reset later.
Have a 'reset' input set that covers all the things you'll be modifying. Execute this input set with v.apply_input_set when reset is required
The flipside is that not all changes will persist, even if you want them to. Most notably, changes to a the time series data in a data source won't be saved back to disk. Changed time series will survive as long as the project is open in the current session of Source.
Modifying functions
The Functions feature in Source was the initial way in which model parameters could be expossed to external programs, through RiverSystem.CommandLine.
You can query and modify functions in Source. While you can do this for any Function, its typical to define Functions that are scalars, and modify these, with the scalar functions being referenced by other functions.
This can be seen in the sample model.
End of explanation
"""
print(f_df)
"""
Explanation: Note: Jupyter thinks those expressions are in LaTeX format. We'll temporarily disable HTML output to look at this data frame
End of explanation
"""
v.update_function('$InflowScaling',0.5)
print(v.functions().as_dataframe())
"""
Explanation: We can see that $InflowScaling is used by three functions - $CrabSaled, $FishScaled and $ShellScaled. These functions are used to scale the respective inflow timeseries by a single scaling factor.
We can use $InflowScaling to scale all model inflows, for example to test the reliability of the system under reduced inflow.
End of explanation
"""
v.update_function('$InflowScaling',1.0)
"""
Explanation: Before proceeding, lets reset $InflowScaling to its original value
Because its just one change, its easy enough to hard code it here...
End of explanation
"""
for fn in functions:
v.update_function(fn['Name'],fn['Expression'])
"""
Explanation: Alternatively, we could reset all the functions to the values we retrieved earlier.
functions is a list of Python dictionaries, with each dictionary in the list having a 'Name' and the the expression
End of explanation
"""
import numpy as np
NUMBER_OF_SIMULATIONS=50
sampled_scaling_factors = np.random.exponential(size=NUMBER_OF_SIMULATIONS)
sampled_scaling_factors
plt.hist(sampled_scaling_factors)
"""
Explanation: Batch run using functions
We can now run a number of runs, modifying the $InflowScaling function each time and hence modifying the system inflow time series.
We'll perform a very simple (and short!) monte-carlo simulation, sampling from the exponential distribution. We'll then see what effect this has on spill volumes from the storage.
The random generators for different distributions are in the numpy package, which, by convention, is imported as np
End of explanation
"""
spill_results=[]
# Store our time series criteria in a variable to use it in configuring recording and retrieving results
ts_match_criteria = {'NetworkElement':'Recreational Lake','RecordingVariable':'Spill Volume'}
v.configure_recording(enable=[ts_match_criteria])
for scaling_factor in sampled_scaling_factors:
veneer.log('Running for $InflowScaling=%f'%scaling_factor)
# We are running the multiple many times in this case - so lets drop any results we already have...
v.drop_all_runs()
# Set $InflowScaling to current scaling factor
v.update_function('$InflowScaling',scaling_factor)
v.run_model()
# Retrieve the spill time series, as an annual sum, with the column named for the variable ('Spill Volume')
run_results = v.retrieve_multiple_time_series(criteria=ts_match_criteria,timestep='annual',name_fn=veneer.name_for_variable)
# Store the mean spill volume and the scaling factor we used
spill_results.append({'ScalingFactor':scaling_factor,'SpillVolume':run_results['Spill Volume'].mean()})
# Convert the results to a Data Frame
spill_results_df = pd.DataFrame(spill_results)
spill_results_df
"""
Explanation: Now we can construct our loop and gather the results.
I only want numbers on the average spill for each run, and I want to be able to plot it as a distribution in much the same way that we've plotting the distribution of $InflowScaling
End of explanation
"""
spill_results_df['SpillVolumeGL'] = spill_results_df['SpillVolume'] * 1e-6 # Convert to GL
spill_results_df['SpillVolumeGL'].hist()
"""
Explanation: Notes:
When retrieving the time series, we retrieved annual time series. When running locally, this is probably only for convenience. If you were running Python and Source on different computers, retrieving monthly or annual results can improve performance by reducing network traffic.
The units are again m^3/year - a fact we could check if we ran a single run and looked at the attributes on the time series. For now, we'll just convert them to GL
The call to v.drop_all_runs() ensures that we don't have 50 full results sets sitting in memory. By dropping the runs at the start of the loop, before running the model, we ensure that we have one set of results at the end in case we need to investigate anything in more detail.
End of explanation
"""
variables = v.variables()
variables_df = variables.as_dataframe()
variables_df
"""
Explanation: Modifying time series and piecewise linear functions
Functions in Source can make use of numerous types of variables, which can derive values from a number of sources, including:
values within the model, such as state variables,
time series inputs (data sources),
piecewise linear functions,
time varying patterns.
You can query Source for the list of variables, and, at this stage, you can directly modify time series variables and piecewise linear variables.
End of explanation
"""
variables_df[variables_df.VeneerSupported]
"""
Explanation: This summary doesn't tell you anything about the details of the values in the variable - although in some cases there is a URL pointing to the details (in the PiecewiseFunction column or the TimeSeries column).
As a quick summary of where more details are available, filter for variables where VeneerSupported=True
End of explanation
"""
v.variable_piecewise('$PatternPW')
v.variable_time_series('$CrabTS')[::500]
"""
Explanation: We can query for either the piecewise function or the time series
End of explanation
"""
pattern = v.variable_piecewise('$PatternPW')
pattern
pattern['Result'] *= 2.0 # Multiply each value of Result column by 2
pattern
v.update_variable_piecewise('$PatternPW',pattern)
"""
Explanation: We can update a piecewise linear variable by passing an appropriate dataframe (one with two columns, each with numbers) to v.update_variable_piecewise
For example, lets double the minimum flow requirement ($PatternPW)
End of explanation
"""
v.variable_piecewise('$PatternPW')
"""
Explanation: You can check that the change has taken effect by looking in the Functions Manager in Source, or retrieving the piecewise function again
End of explanation
"""
crab_ts = v.variable_time_series('$CrabTS')
crab_ts.plot()
"""
Explanation: Updating time series variables works in much the same way - you need to pass a DataFrame with an appropriate structure. In this case, you need a date time index and a single column of values.
One approach is to retrieve the existing time series data then modify the Value column (keeping the date index in place)
End of explanation
"""
monthly_scaling=[0.6,0.75,1.0,1.0,1.1,1.20,1.20,1.1,1.0,0.8,0.6,0.5]
len(monthly_scaling)
scaling_df = pd.DataFrame(data={'Month':range(1,13),'Scale':monthly_scaling}).set_index('Month')
scaling_df
"""
Explanation: Lets generate a synthetic time series based on the existing sequence.
We'll apply a distinct monthly scaling factor. We could do this in Source through a pattern variable, but using Python is an opportunity to demonstrate some of the time series capabilities in pandas.
First, lets initialise the scaling factor data.
End of explanation
"""
plt.plot(crab_ts.index.month)
"""
Explanation: So, now we need to apply these monthly factors to every day in the time series.
In many languages, you would write a loop (eg a for loop) at this point.
In Python, and particularly with numpy and pandas, there are more convenient (and performant) options
Because we have a datetime index on the time series, we can easily get a list of the month for each timestep, using crab_ts.index.month:
End of explanation
"""
scaling_df.Scale[12]
"""
Explanation: So, now we need to take that sequence of values and find the corresponding scaling factors.
We'll use the indexing capabilities of data frames to help. To understand the next step, see what happens when we index scaling_df by a month number (starting at 1)
End of explanation
"""
scaling_df.Scale[[2,5,7,12,12,1]]
"""
Explanation: Now, see what happens when we provide a series of months (including duplicates)
End of explanation
"""
scaling_for_timesteps = scaling_df.Scale[crab_ts.index.month].values
plot(scaling_for_timesteps)
"""
Explanation: Extending this, and using the list of months for each timestep, we can find the scaling factor for each timestep
End of explanation
"""
crab_ts['ValueScaled'] = crab_ts.Value * scaling_for_timesteps
# Lets plot the first year to see the effect
crab_ts[0:365].plot()
# That's hard to see, so lets look at the difference:
delta = crab_ts.Value-crab_ts.ValueScaled
delta[0:365].plot()
"""
Explanation: Now, we can multiple the values in the time series by the scaling factors
End of explanation
"""
crab_ts.columns
new_ts = crab_ts[['ValueScaled']]
new_ts[0::500]
v.update_variable_time_series('$CrabTS',new_ts)
"""
Explanation: We now have an extra column in our DataFrame. Source is expecting one column for our time series
End of explanation
"""
template='Nodes.Lake Release.Monthly Pattern=[%s]{ML/d}'
"""
Explanation: Modifying input sets
At the beginning of this session, we ran a series of scenarios by iterating through each scenario input set and running once for each one.
It is also possible to modify the commands in the input sets themselves.
Modifying input sets gives you control over a great many aspects of the model, although scripting the modification can get a bit fiddly, for two main reasons:
You need to manipulate and create Python strings that contain a variable reference (eg 'Nodes.Supply Point 13.Maximum Extraction Rate', a value (eg '100000') and units that can be understood by Source (eg 'ML/d'), and
It's hard to work out the variable reference text a priori.
Much of what can be accomplished with input sets can be handled directly using the functions under the v.model. namespace. However input sets can be a convenient option for certain parameters that aren't yet well supported under v.model. For example, there is currently no way to directly modify the monthly pattern on a minimum flow node, using the functions in veneer-py. However, you can modify the pattern via input sets - and the user interface in Source will give you hints as to how.
In the main Source user interface, select Edit|Scenario Input Sets.
In the text area where you type commands for the Source model, start typing:
Nodes.Lake Release.Monthly Pattern=
When you pressed =, it should show you the current value of the pattern, expressed in the scenario input set syntax. If you select this text and press enter, it should be added to the text editor:
Nodes.Lake Release.Monthly Pattern=[0 0 0 0 0 0 0 0 0 0 0 0]{ML/d}
You now have a working example of how to set the monthly pattern using an input set - and you can use this, along with Python's string manipulation routines, to set the monthly pattern from Python.
In the following blocks, we will use Python's string handling functionality. A Python tutorial will cover this in more depth.
Lets start by setting up a template for the command to change the monthly pattern. We'll add a string substitution command into the template:
End of explanation
"""
monthly_values=[5,3,4,6,6,7,7,6,5,4,4,5]
"""
Explanation: As you can see, the 0s (separated by spaces) have been replaced with a single %s. When used with the string substitution functionality, this tells Python to expect another string that should be inserted at this point.
We'll make that other string contain 12 numbers, separated by spaces.
Lets define the values we want
End of explanation
"""
list_as_string= ' '.join([str(v) for v in monthly_values])
list_as_string
"""
Explanation: Now, we want to convert our list of numbers (monthly_values) to a string, separated by space. We can use the join method, available on all strings, to do what we want.
The only trick is that join wants a list of strings, not a list of numbers. We'll use a list comprehension to convert each number to a string
End of explanation
"""
command = template%(list_as_string)
command
"""
Explanation: We can now combine this list into the template to create an input set command
End of explanation
"""
input_sets = v.input_sets()
input_sets.as_dataframe()
"""
Explanation: We can now update an input set of choice by adding this command.
First, retrieve the input sets:
End of explanation
"""
the_input_set = input_sets[0]
the_input_set['Configuration']
"""
Explanation: We'll modify the first one in the list
End of explanation
"""
the_input_set['Configuration'].append(command)
the_input_set['Configuration']
"""
Explanation: We can use .append to add a command to the 'Configuration'
End of explanation
"""
v.update_input_set(the_input_set['Name'],the_input_set)
"""
Explanation: Now, we can update the input set within Source
End of explanation
"""
|
chengsoonong/mclass-sky | projects/david/lab/experiment_log_regression.ipynb | bsd-3-clause | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.optimize as opt
from scipy.special import expit # The logistic sigmoid function
%matplotlib inline
"""
Explanation: Classification
COMP4670/8600 - Introduction to Statistical Machine Learning - Tutorial 3
$\newcommand{\trace}[1]{\operatorname{tr}\left{#1\right}}$
$\newcommand{\Norm}[1]{\lVert#1\rVert}$
$\newcommand{\RR}{\mathbb{R}}$
$\newcommand{\inner}[2]{\langle #1, #2 \rangle}$
$\newcommand{\DD}{\mathscr{D}}$
$\newcommand{\grad}[1]{\operatorname{grad}#1}$
$\DeclareMathOperator*{\argmin}{arg\,min}$
Setting up the environment
We use the SciPy implementation of the logistic sigmoid function, rather than (naively) implementing it ourselves, to avoid issues relating to numerical computation.
End of explanation
"""
names = ['diabetes', 'num preg', 'plasma', 'bp', 'skin fold', 'insulin', 'bmi', 'pedigree', 'age']
data = pd.read_csv('diabetes_scale.csv', header=None, names=names)
data['diabetes'].replace(-1, 0, inplace=True) # The target variable need be 1 or 0, not 1 or -1
data.head()
"""
Explanation: The data set
We will predict the incidence of diabetes based on various measurements (see description). Instead of directly using the raw data, we use a normalised version, where the label to be predicted (the incidence of diabetes) is in the first column. Download the data from mldata.org.
Read in the data using pandas.
End of explanation
"""
data['ones'] = np.ones((data.shape[0], 1)) # Add a column of ones
data.head()
data.shape
"""
Explanation: Classification via Logistic Regression
Implement binary classification using logistic regression for a data set with two classes. Make sure you use appropriate python style and docstrings.
Use scipy.optimize.fmin_bfgs to optimise your cost function. fmin_bfgs requires the cost function to be optimised, and the gradient of this cost function. Implement these two functions as cost and grad by following the equations in the lectures.
Implement the function train that takes a matrix of examples, and a vector of labels, and returns the maximum likelihood weight vector for logistic regresssion. Also implement a function test that takes this maximum likelihood weight vector and the a matrix of examples, and returns the predictions. See the section Putting everything together below for expected usage.
We add an extra column of ones to represent the constant basis.
End of explanation
"""
def cost(w, X, y, c=0):
"""
Returns the cross-entropy error function with (optional) sum-of-squares regularization term.
w -- parameters
X -- dataset of features where each row corresponds to a single sample
y -- dataset of labels where each row corresponds to a single sample
c -- regularization coefficient (default = 0)
"""
outputs = expit(X.dot(w)) # Vector of outputs (or predictions)
return -( y.transpose().dot(np.log(outputs)) + (1-y).transpose().dot(np.log(1-outputs)) ) + c*0.5*w.dot(w)
def grad(w, X, y, c=0):
"""
Returns the gradient of the cross-entropy error function with (optional) sum-of-squares regularization term.
"""
outputs = expit(X.dot(w))
return X.transpose().dot(outputs-y) + c*w
def train(X, y,c=0):
"""
Returns the vector of parameters which minimizes the error function via the BFGS algorithm.
"""
initial_values = np.zeros(X.shape[1]) # Error occurs if inital_values is set too high
return opt.fmin_bfgs(cost, initial_values, fprime=grad, args=(X,y,c))
def predict(w, X):
"""
Returns a vector of predictions.
"""
return expit(X.dot(w))
"""
Explanation: The Set-up
We have 9 input variables $x_0, \dots, x_8$ where $x_0$ is the dummy input variable fixed at 1. (The fixed dummy input variable could easily be $x_5$ or $x_8$, it's index is unimportant.) We set the basis functions to the simplest choice $\phi_0(\mathbf{x}) = x_0, \dots, \phi_8(\mathbf{x}) = x_8$. Our model then has the form
$$
y(\mathbf{x}) = \sigma(\sum_{j=0}^{8} w_j x_j) = \sigma(\mathbf{w}^T \mathbf{x}.)
$$
Here we have a dataset, ${(\mathbf{x}n, t_n)}{n=1}^{N}$ where $t_n \in {0, 1}$, with $N=768$ examples. We train our model by finding the parameter vector $\mathbf{w}$ which minimizes the (data-dependent) cross-entropy error function
$$
E_D(\mathbf{w}) = - \sum_{n=1}^{N} {t_n \ln \sigma(\mathbf{w}^T \mathbf{x}n) + (1 - t_n)\ln(1 - \sigma(\mathbf{w}^T \mathbf{x}_n))}.
$$
The gradient of this function is given by
$$
\nabla E(\mathbf{w}) = \sum{i=1}^{N} (\sigma(\mathbf{w}^T \mathbf{x}_n) - t_n)\mathbf{x}_n.
$$
End of explanation
"""
def confusion_matrix(predictions, y):
"""
Returns the confusion matrix [[tp, fp], [fn, tn]].
predictions -- dataset of predictions (or outputs) from a model
y -- dataset of labels where each row corresponds to a single sample
"""
tp, fp, fn, tn = 0, 0, 0, 0
predictions = predictions.round().values # Converts to numpy.ndarray
y = y.values
for prediction, label in zip(predictions, y):
if prediction == label:
if prediction == 1:
tp += 1
else:
tn += 1
else:
if prediction == 1:
fp += 1
else:
fn += 1
return np.array([[tp, fp], [fn, tn]])
def accuracy(cm):
"""
Returns the accuracy, (tp + tn)/(tp + fp + fn + tn).
"""
return cm.trace()/cm.sum()
def positive_pred_value(cm):
"""
Returns the postive predictive value, tp/p.
"""
return cm[0,0]/(cm[0,0] + cm[0,1])
def negative_pred_value(cm):
"""
Returns the negative predictive value, tn/n.
"""
return cm[1,1]/(cm[1,0] + cm[1,1])
def balanced_accuracy(cm):
"""
Returns the balanced accuracy, (tp/p + tn/n)/2.
"""
return (cm[0,0]/(cm[0,0] + cm[0,1]) + cm[1,1]/(cm[1,0] + cm[1,1]))/2
"""
Explanation: Performance measure
There are many ways to compute the performance of a binary classifier. The key concept is the idea of a confusion matrix or contingency table:
| | | Label | |
|:-------------|:--:|:-----:|:--:|
| | | +1 | -1 |
|Prediction| +1 | TP | FP |
| | -1 | FN | TN |
where
* TP - true positive
* FP - false positive
* FN - false negative
* TN - true negative
Implement three functions, the first one which returns the confusion matrix for comparing two lists (one set of predictions, and one set of labels). Then implement two functions that take the confusion matrix as input and returns the accuracy and balanced accuracy respectively. The balanced accuracy is the average accuracy of each class.
End of explanation
"""
y = data['diabetes']
X = data[['num preg', 'plasma', 'bp', 'skin fold', 'insulin', 'bmi', 'pedigree', 'age', 'ones']]
theta_best = train(X, y)
print(theta_best)
pred = predict(theta_best, X)
cmatrix = confusion_matrix(pred, y)
[accuracy(cmatrix), balanced_accuracy(cmatrix)]
"""
Explanation: Putting everything together
Consider the following code, which trains on all the examples, and predicts on the training set. Discuss the results.
End of explanation
"""
[positive_pred_value(cmatrix), negative_pred_value(cmatrix)]
"""
Explanation: To aid our discussion we give the positive predictive value (PPV) and negative predictive value (NPV) also.
End of explanation
"""
def split_data(data):
"""
Randomly split data into two equal groups.
"""
np.random.seed(1)
N = len(data)
idx = np.arange(N)
np.random.shuffle(idx)
train_idx = idx[:int(N/2)]
test_idx = idx[int(N/2):]
X_train = data.loc[train_idx].drop('diabetes', axis=1)
t_train = data.loc[train_idx]['diabetes']
X_test = data.loc[test_idx].drop('diabetes', axis=1)
t_test = data.loc[test_idx]['diabetes']
return X_train, t_train, X_test, t_test
def reg_coefficient_comparison(reg_coefficients, X_train, t_train, X_test, t_test):
"""
Returns the accuracy and balanced accuracy for the given regularization coefficient values.
reg_coefficients -- list of regularization coefficient values
X_train -- the input dataset used for training
t_train -- the dataset of labels used for training
X_test -- the input dataset used to make predictions from the trained model
t_test -- dataset of labels for performance assessment
"""
summary = []
for c in reg_coefficients:
w_best = train(X_train, t_train, c)
predictions = predict(w_best, X_test)
cm = confusion_matrix(predictions, t_test)
summary.append([c, accuracy(cm), balanced_accuracy(cm)])
return pd.DataFrame(summary, columns=["regularization coefficient", "accuracy", "balanced accuracy"])
X_train, t_train, X_test, t_test = split_data(data)
reg_coefficients = [0, 0.01, 0.1, 0.25, 0.5, 1, 1.5, 1.75, 2, 5, 9, 10, 11, 20, 100, 150]
reg_coefficient_comparison(reg_coefficients, X_train, t_train, X_test, t_test)
"""
Explanation: Discussion
Overall, the accuracy of our model is reasonable, given our naive choice of basis functions, as is its balanced accuracy. The discrepancy between these values can be accounted for by the PPV being higher than the NPV.
(optional) Effect of regularization parameter
By splitting the data into two halves, train on one half and report performance on the second half. By repeating this experiment for different values of the regularization parameter $\lambda$ we can get a feeling about the variability in the performance of the classifier due to regularization. Plot the values of accuracy and balanced accuracy for at least 3 different choices of $\lambda$. Note that you may have to update your implementation of logistic regression to include the regularisation parameter.
End of explanation
"""
|
tensorflow/quantum | docs/tutorials/qcnn.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2020 The TensorFlow Authors.
End of explanation
"""
!pip install tensorflow==2.7.0
"""
Explanation: Quantum Convolutional Neural Network
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/quantum/tutorials/qcnn"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/quantum/blob/master/docs/tutorials/qcnn.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/quantum/blob/master/docs/tutorials/qcnn.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/quantum/docs/tutorials/qcnn.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This tutorial implements a simplified <a href="https://www.nature.com/articles/s41567-019-0648-8" class="external">Quantum Convolutional Neural Network</a> (QCNN), a proposed quantum analogue to a classical convolutional neural network that is also translationally invariant.
This example demonstrates how to detect certain properties of a quantum data source, such as a quantum sensor or a complex simulation from a device. The quantum data source being a <a href="https://arxiv.org/pdf/quant-ph/0504097.pdf" class="external">cluster state</a> that may or may not have an excitation—what the QCNN will learn to detect (The dataset used in the paper was SPT phase classification).
Setup
End of explanation
"""
!pip install tensorflow-quantum
# Update package resources to account for version changes.
import importlib, pkg_resources
importlib.reload(pkg_resources)
"""
Explanation: Install TensorFlow Quantum:
End of explanation
"""
import tensorflow as tf
import tensorflow_quantum as tfq
import cirq
import sympy
import numpy as np
# visualization tools
%matplotlib inline
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
"""
Explanation: Now import TensorFlow and the module dependencies:
End of explanation
"""
qubit = cirq.GridQubit(0, 0)
# Define some circuits.
circuit1 = cirq.Circuit(cirq.X(qubit))
circuit2 = cirq.Circuit(cirq.H(qubit))
# Convert to a tensor.
input_circuit_tensor = tfq.convert_to_tensor([circuit1, circuit2])
# Define a circuit that we want to append
y_circuit = cirq.Circuit(cirq.Y(qubit))
# Instantiate our layer
y_appender = tfq.layers.AddCircuit()
# Run our circuit tensor through the layer and save the output.
output_circuit_tensor = y_appender(input_circuit_tensor, append=y_circuit)
"""
Explanation: 1. Build a QCNN
1.1 Assemble circuits in a TensorFlow graph
TensorFlow Quantum (TFQ) provides layer classes designed for in-graph circuit construction. One example is the tfq.layers.AddCircuit layer that inherits from tf.keras.Layer. This layer can either prepend or append to the input batch of circuits, as shown in the following figure.
<img src="./images/qcnn_1.png" width="700">
The following snippet uses this layer:
End of explanation
"""
print(tfq.from_tensor(input_circuit_tensor))
"""
Explanation: Examine the input tensor:
End of explanation
"""
print(tfq.from_tensor(output_circuit_tensor))
"""
Explanation: And examine the output tensor:
End of explanation
"""
def generate_data(qubits):
"""Generate training and testing data."""
n_rounds = 20 # Produces n_rounds * n_qubits datapoints.
excitations = []
labels = []
for n in range(n_rounds):
for bit in qubits:
rng = np.random.uniform(-np.pi, np.pi)
excitations.append(cirq.Circuit(cirq.rx(rng)(bit)))
labels.append(1 if (-np.pi / 2) <= rng <= (np.pi / 2) else -1)
split_ind = int(len(excitations) * 0.7)
train_excitations = excitations[:split_ind]
test_excitations = excitations[split_ind:]
train_labels = labels[:split_ind]
test_labels = labels[split_ind:]
return tfq.convert_to_tensor(train_excitations), np.array(train_labels), \
tfq.convert_to_tensor(test_excitations), np.array(test_labels)
"""
Explanation: While it is possible to run the examples below without using tfq.layers.AddCircuit, it's a good opportunity to understand how complex functionality can be embedded into TensorFlow compute graphs.
1.2 Problem overview
You will prepare a cluster state and train a quantum classifier to detect if it is "excited" or not. The cluster state is highly entangled but not necessarily difficult for a classical computer. For clarity, this is a simpler dataset than the one used in the paper.
For this classification task you will implement a deep <a href="https://arxiv.org/pdf/quant-ph/0610099.pdf" class="external">MERA</a>-like QCNN architecture since:
Like the QCNN, the cluster state on a ring is translationally invariant.
The cluster state is highly entangled.
This architecture should be effective at reducing entanglement, obtaining the classification by reading out a single qubit.
<img src="./images/qcnn_2.png" width="1000">
An "excited" cluster state is defined as a cluster state that had a cirq.rx gate applied to any of its qubits. Qconv and QPool are discussed later in this tutorial.
1.3 Building blocks for TensorFlow
<img src="./images/qcnn_3.png" width="1000">
One way to solve this problem with TensorFlow Quantum is to implement the following:
The input to the model is a circuit tensor—either an empty circuit or an X gate on a particular qubit indicating an excitation.
The rest of the model's quantum components are constructed with tfq.layers.AddCircuit layers.
For inference a tfq.layers.PQC layer is used. This reads $\langle \hat{Z} \rangle$ and compares it to a label of 1 for an excited state, or -1 for a non-excited state.
1.4 Data
Before building your model, you can generate your data. In this case it's going to be excitations to the cluster state (The original paper uses a more complicated dataset). Excitations are represented with cirq.rx gates. A large enough rotation is deemed an excitation and is labeled 1 and a rotation that isn't large enough is labeled -1 and deemed not an excitation.
End of explanation
"""
sample_points, sample_labels, _, __ = generate_data(cirq.GridQubit.rect(1, 4))
print('Input:', tfq.from_tensor(sample_points)[0], 'Output:', sample_labels[0])
print('Input:', tfq.from_tensor(sample_points)[1], 'Output:', sample_labels[1])
"""
Explanation: You can see that just like with regular machine learning you create a training and testing set to use to benchmark the model. You can quickly look at some datapoints with:
End of explanation
"""
def cluster_state_circuit(bits):
"""Return a cluster state on the qubits in `bits`."""
circuit = cirq.Circuit()
circuit.append(cirq.H.on_each(bits))
for this_bit, next_bit in zip(bits, bits[1:] + [bits[0]]):
circuit.append(cirq.CZ(this_bit, next_bit))
return circuit
"""
Explanation: 1.5 Define layers
Now define the layers shown in the figure above in TensorFlow.
1.5.1 Cluster state
The first step is to define the <a href="https://arxiv.org/pdf/quant-ph/0504097.pdf" class="external">cluster state</a> using <a href="https://github.com/quantumlib/Cirq" class="external">Cirq</a>, a Google-provided framework for programming quantum circuits. Since this is a static part of the model, embed it using the tfq.layers.AddCircuit functionality.
End of explanation
"""
SVGCircuit(cluster_state_circuit(cirq.GridQubit.rect(1, 4)))
"""
Explanation: Display a cluster state circuit for a rectangle of <a href="https://cirq.readthedocs.io/en/stable/generated/cirq.GridQubit.html" class="external"><code>cirq.GridQubit</code></a>s:
End of explanation
"""
def one_qubit_unitary(bit, symbols):
"""Make a Cirq circuit enacting a rotation of the bloch sphere about the X,
Y and Z axis, that depends on the values in `symbols`.
"""
return cirq.Circuit(
cirq.X(bit)**symbols[0],
cirq.Y(bit)**symbols[1],
cirq.Z(bit)**symbols[2])
def two_qubit_unitary(bits, symbols):
"""Make a Cirq circuit that creates an arbitrary two qubit unitary."""
circuit = cirq.Circuit()
circuit += one_qubit_unitary(bits[0], symbols[0:3])
circuit += one_qubit_unitary(bits[1], symbols[3:6])
circuit += [cirq.ZZ(*bits)**symbols[6]]
circuit += [cirq.YY(*bits)**symbols[7]]
circuit += [cirq.XX(*bits)**symbols[8]]
circuit += one_qubit_unitary(bits[0], symbols[9:12])
circuit += one_qubit_unitary(bits[1], symbols[12:])
return circuit
def two_qubit_pool(source_qubit, sink_qubit, symbols):
"""Make a Cirq circuit to do a parameterized 'pooling' operation, which
attempts to reduce entanglement down from two qubits to just one."""
pool_circuit = cirq.Circuit()
sink_basis_selector = one_qubit_unitary(sink_qubit, symbols[0:3])
source_basis_selector = one_qubit_unitary(source_qubit, symbols[3:6])
pool_circuit.append(sink_basis_selector)
pool_circuit.append(source_basis_selector)
pool_circuit.append(cirq.CNOT(control=source_qubit, target=sink_qubit))
pool_circuit.append(sink_basis_selector**-1)
return pool_circuit
"""
Explanation: 1.5.2 QCNN layers
Define the layers that make up the model using the <a href="https://arxiv.org/abs/1810.03787" class="external">Cong and Lukin QCNN paper</a>. There are a few prerequisites:
The one- and two-qubit parameterized unitary matrices from the <a href="https://arxiv.org/abs/quant-ph/0507171" class="external">Tucci paper</a>.
A general parameterized two-qubit pooling operation.
End of explanation
"""
SVGCircuit(one_qubit_unitary(cirq.GridQubit(0, 0), sympy.symbols('x0:3')))
"""
Explanation: To see what you created, print out the one-qubit unitary circuit:
End of explanation
"""
SVGCircuit(two_qubit_unitary(cirq.GridQubit.rect(1, 2), sympy.symbols('x0:15')))
"""
Explanation: And the two-qubit unitary circuit:
End of explanation
"""
SVGCircuit(two_qubit_pool(*cirq.GridQubit.rect(1, 2), sympy.symbols('x0:6')))
"""
Explanation: And the two-qubit pooling circuit:
End of explanation
"""
def quantum_conv_circuit(bits, symbols):
"""Quantum Convolution Layer following the above diagram.
Return a Cirq circuit with the cascade of `two_qubit_unitary` applied
to all pairs of qubits in `bits` as in the diagram above.
"""
circuit = cirq.Circuit()
for first, second in zip(bits[0::2], bits[1::2]):
circuit += two_qubit_unitary([first, second], symbols)
for first, second in zip(bits[1::2], bits[2::2] + [bits[0]]):
circuit += two_qubit_unitary([first, second], symbols)
return circuit
"""
Explanation: 1.5.2.1 Quantum convolution
As in the <a href="https://arxiv.org/abs/1810.03787" class="external">Cong and Lukin</a> paper, define the 1D quantum convolution as the application of a two-qubit parameterized unitary to every pair of adjacent qubits with a stride of one.
End of explanation
"""
SVGCircuit(
quantum_conv_circuit(cirq.GridQubit.rect(1, 8), sympy.symbols('x0:15')))
"""
Explanation: Display the (very horizontal) circuit:
End of explanation
"""
def quantum_pool_circuit(source_bits, sink_bits, symbols):
"""A layer that specifies a quantum pooling operation.
A Quantum pool tries to learn to pool the relevant information from two
qubits onto 1.
"""
circuit = cirq.Circuit()
for source, sink in zip(source_bits, sink_bits):
circuit += two_qubit_pool(source, sink, symbols)
return circuit
"""
Explanation: 1.5.2.2 Quantum pooling
A quantum pooling layer pools from $N$ qubits to $\frac{N}{2}$ qubits using the two-qubit pool defined above.
End of explanation
"""
test_bits = cirq.GridQubit.rect(1, 8)
SVGCircuit(
quantum_pool_circuit(test_bits[:4], test_bits[4:], sympy.symbols('x0:6')))
"""
Explanation: Examine a pooling component circuit:
End of explanation
"""
def create_model_circuit(qubits):
"""Create sequence of alternating convolution and pooling operators
which gradually shrink over time."""
model_circuit = cirq.Circuit()
symbols = sympy.symbols('qconv0:63')
# Cirq uses sympy.Symbols to map learnable variables. TensorFlow Quantum
# scans incoming circuits and replaces these with TensorFlow variables.
model_circuit += quantum_conv_circuit(qubits, symbols[0:15])
model_circuit += quantum_pool_circuit(qubits[:4], qubits[4:],
symbols[15:21])
model_circuit += quantum_conv_circuit(qubits[4:], symbols[21:36])
model_circuit += quantum_pool_circuit(qubits[4:6], qubits[6:],
symbols[36:42])
model_circuit += quantum_conv_circuit(qubits[6:], symbols[42:57])
model_circuit += quantum_pool_circuit([qubits[6]], [qubits[7]],
symbols[57:63])
return model_circuit
# Create our qubits and readout operators in Cirq.
cluster_state_bits = cirq.GridQubit.rect(1, 8)
readout_operators = cirq.Z(cluster_state_bits[-1])
# Build a sequential model enacting the logic in 1.3 of this notebook.
# Here you are making the static cluster state prep as a part of the AddCircuit and the
# "quantum datapoints" are coming in the form of excitation
excitation_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
cluster_state = tfq.layers.AddCircuit()(
excitation_input, prepend=cluster_state_circuit(cluster_state_bits))
quantum_model = tfq.layers.PQC(create_model_circuit(cluster_state_bits),
readout_operators)(cluster_state)
qcnn_model = tf.keras.Model(inputs=[excitation_input], outputs=[quantum_model])
# Show the keras plot of the model
tf.keras.utils.plot_model(qcnn_model,
show_shapes=True,
show_layer_names=False,
dpi=70)
"""
Explanation: 1.6 Model definition
Now use the defined layers to construct a purely quantum CNN. Start with eight qubits, pool down to one, then measure $\langle \hat{Z} \rangle$.
End of explanation
"""
# Generate some training data.
train_excitations, train_labels, test_excitations, test_labels = generate_data(
cluster_state_bits)
# Custom accuracy metric.
@tf.function
def custom_accuracy(y_true, y_pred):
y_true = tf.squeeze(y_true)
y_pred = tf.map_fn(lambda x: 1.0 if x >= 0 else -1.0, y_pred)
return tf.keras.backend.mean(tf.keras.backend.equal(y_true, y_pred))
qcnn_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),
loss=tf.losses.mse,
metrics=[custom_accuracy])
history = qcnn_model.fit(x=train_excitations,
y=train_labels,
batch_size=16,
epochs=25,
verbose=1,
validation_data=(test_excitations, test_labels))
plt.plot(history.history['loss'][1:], label='Training')
plt.plot(history.history['val_loss'][1:], label='Validation')
plt.title('Training a Quantum CNN to Detect Excited Cluster States')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
"""
Explanation: 1.7 Train the model
Train the model over the full batch to simplify this example.
End of explanation
"""
# 1-local operators to read out
readouts = [cirq.Z(bit) for bit in cluster_state_bits[4:]]
def multi_readout_model_circuit(qubits):
"""Make a model circuit with less quantum pool and conv operations."""
model_circuit = cirq.Circuit()
symbols = sympy.symbols('qconv0:21')
model_circuit += quantum_conv_circuit(qubits, symbols[0:15])
model_circuit += quantum_pool_circuit(qubits[:4], qubits[4:],
symbols[15:21])
return model_circuit
# Build a model enacting the logic in 2.1 of this notebook.
excitation_input_dual = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
cluster_state_dual = tfq.layers.AddCircuit()(
excitation_input_dual, prepend=cluster_state_circuit(cluster_state_bits))
quantum_model_dual = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_dual)
d1_dual = tf.keras.layers.Dense(8)(quantum_model_dual)
d2_dual = tf.keras.layers.Dense(1)(d1_dual)
hybrid_model = tf.keras.Model(inputs=[excitation_input_dual], outputs=[d2_dual])
# Display the model architecture
tf.keras.utils.plot_model(hybrid_model,
show_shapes=True,
show_layer_names=False,
dpi=70)
"""
Explanation: 2. Hybrid models
You don't have to go from eight qubits to one qubit using quantum convolution—you could have done one or two rounds of quantum convolution and fed the results into a classical neural network. This section explores quantum-classical hybrid models.
2.1 Hybrid model with a single quantum filter
Apply one layer of quantum convolution, reading out $\langle \hat{Z}_n \rangle$ on all bits, followed by a densely-connected neural network.
<img src="./images/qcnn_5.png" width="1000">
2.1.1 Model definition
End of explanation
"""
hybrid_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),
loss=tf.losses.mse,
metrics=[custom_accuracy])
hybrid_history = hybrid_model.fit(x=train_excitations,
y=train_labels,
batch_size=16,
epochs=25,
verbose=1,
validation_data=(test_excitations,
test_labels))
plt.plot(history.history['val_custom_accuracy'], label='QCNN')
plt.plot(hybrid_history.history['val_custom_accuracy'], label='Hybrid CNN')
plt.title('Quantum vs Hybrid CNN performance')
plt.xlabel('Epochs')
plt.legend()
plt.ylabel('Validation Accuracy')
plt.show()
"""
Explanation: 2.1.2 Train the model
End of explanation
"""
excitation_input_multi = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
cluster_state_multi = tfq.layers.AddCircuit()(
excitation_input_multi, prepend=cluster_state_circuit(cluster_state_bits))
# apply 3 different filters and measure expectation values
quantum_model_multi1 = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_multi)
quantum_model_multi2 = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_multi)
quantum_model_multi3 = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_multi)
# concatenate outputs and feed into a small classical NN
concat_out = tf.keras.layers.concatenate(
[quantum_model_multi1, quantum_model_multi2, quantum_model_multi3])
dense_1 = tf.keras.layers.Dense(8)(concat_out)
dense_2 = tf.keras.layers.Dense(1)(dense_1)
multi_qconv_model = tf.keras.Model(inputs=[excitation_input_multi],
outputs=[dense_2])
# Display the model architecture
tf.keras.utils.plot_model(multi_qconv_model,
show_shapes=True,
show_layer_names=True,
dpi=70)
"""
Explanation: As you can see, with very modest classical assistance, the hybrid model will usually converge faster than the purely quantum version.
2.2 Hybrid convolution with multiple quantum filters
Now let's try an architecture that uses multiple quantum convolutions and a classical neural network to combine them.
<img src="./images/qcnn_6.png" width="1000">
2.2.1 Model definition
End of explanation
"""
multi_qconv_model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),
loss=tf.losses.mse,
metrics=[custom_accuracy])
multi_qconv_history = multi_qconv_model.fit(x=train_excitations,
y=train_labels,
batch_size=16,
epochs=25,
verbose=1,
validation_data=(test_excitations,
test_labels))
plt.plot(history.history['val_custom_accuracy'][:25], label='QCNN')
plt.plot(hybrid_history.history['val_custom_accuracy'][:25], label='Hybrid CNN')
plt.plot(multi_qconv_history.history['val_custom_accuracy'][:25],
label='Hybrid CNN \n Multiple Quantum Filters')
plt.title('Quantum vs Hybrid CNN performance')
plt.xlabel('Epochs')
plt.legend()
plt.ylabel('Validation Accuracy')
plt.show()
"""
Explanation: 2.2.2 Train the model
End of explanation
"""
|
google/jax-md | notebooks/nve_neighbor_list.ipynb | apache-2.0 | #@title Imports & Utils
!pip install jax-md
import numpy as onp
from jax.config import config ; config.update('jax_enable_x64', True)
import jax.numpy as np
from jax import random
from jax import jit
from jax import lax
import time
from jax_md import space
from jax_md import smap
from jax_md import energy
from jax_md import quantity
from jax_md import simulate
from jax_md import partition
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style(style='white')
def format_plot(x, y):
plt.xlabel(x, fontsize=20)
plt.ylabel(y, fontsize=20)
def finalize_plot(shape=(1, 1)):
plt.gcf().set_size_inches(
shape[0] * 1.5 * plt.gcf().get_size_inches()[1],
shape[1] * 1.5 * plt.gcf().get_size_inches()[1])
plt.tight_layout()
"""
Explanation: <a href="https://colab.research.google.com/github/google/jax-md/blob/main/notebooks/nve_neighbor_list.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
End of explanation
"""
Nx = particles_per_side = 80
spacing = np.float32(1.25)
side_length = Nx * spacing
R = onp.stack([onp.array(r) for r in onp.ndindex(Nx, Nx)]) * spacing
R = np.array(R, np.float64)
#@title Draw the initial state
ms = 10
R_plt = onp.array(R)
plt.plot(R_plt[:, 0], R_plt[:, 1], 'o', markersize=ms * 0.5)
plt.xlim([0, np.max(R[:, 0])])
plt.ylim([0, np.max(R[:, 1])])
plt.axis('off')
finalize_plot((2, 2))
"""
Explanation: Constant Energy Simulation With Neighbor Lists
Setup some system parameters.
End of explanation
"""
# format = partition.Dense
# format = partition.Sparse
format = partition.OrderedSparse
"""
Explanation: JAX MD supports three different formats for neighbor lists: Dense, Sparse, and OrderedSparse.
Dense neighbor lists store neighbor IDs in a matrix of shape (particle_count, neighbors_per_particle). This can be advantageous if the system if homogeneous since it requires less memory bandwidth. However, Dense neighbor lists are more prone to overflows or waste if there are large fluctuations in the number of neighbors, since they must allocate enough capacity for the maximum number of neighbors.
Sparse neighbor lists store neighbor IDs in a matrix of shape (2, total_neighbors) where the first index specifies senders and receivers for each neighboring pair. Unlike Dense neighbor lists, Sparse neighbor lists must store two integers for each neighboring pair. However, they benefit because their capacity is bounded by the total number of neighbors, making them more efficient when different particles have different numbers of neighbors.
OrderedSparse neighbor lists are like Sparse neighbor lists, except they only store pairs of neighbors (i, j) where i < j. For potentials that can be phrased as $\sum_{i<j}E_{ij}$ this can give a factor of two improvement in speed.
End of explanation
"""
displacement, shift = space.periodic(side_length)
neighbor_fn, energy_fn = energy.lennard_jones_neighbor_list(displacement,
side_length,
format=format)
energy_fn = jit(energy_fn)
exact_energy_fn = jit(energy.lennard_jones_pair(displacement))
"""
Explanation: Construct two versions of the energy function with and without neighbor lists.
End of explanation
"""
nbrs = neighbor_fn.allocate(R)
"""
Explanation: To use a neighbor list, we must first allocate it. This step cannot be Just-in-Time (JIT) compiled because it uses the state of the system to infer the capacity of the neighbor list (which involves dynamic shapes).
End of explanation
"""
# Run once so that we avoid the jit compilation time.
print('E = {}'.format(energy_fn(R, neighbor=nbrs)))
print('E_ex = {}'.format(exact_energy_fn(R)))
%%timeit
energy_fn(R, neighbor=nbrs).block_until_ready()
%%timeit
exact_energy_fn(R).block_until_ready()
"""
Explanation: Now we can compute the energy with and without neighbor lists. We see that both results agree, but the neighbor list version of the code is significantly faster.
End of explanation
"""
displacement, shift = space.periodic(side_length)
init_fn, apply_fn = simulate.nve(energy_fn, shift, 1e-3)
state = init_fn(random.PRNGKey(0), R, kT=1e-3, neighbor=nbrs)
def body_fn(i, state):
state, nbrs = state
nbrs = nbrs.update(state.position)
state = apply_fn(state, neighbor=nbrs)
return state, nbrs
step = 0
while step < 40:
new_state, nbrs = lax.fori_loop(0, 100, body_fn, (state, nbrs))
if nbrs.did_buffer_overflow:
print('Neighbor list overflowed, reallocating.')
nbrs = neighbor_fn.allocate(state.position)
else:
state = new_state
step += 1
#@title Draw the final state
ms = 10
R_plt = onp.array(state.position)
plt.plot(R_plt[:, 0], R_plt[:, 1], 'o', markersize=ms * 0.5)
plt.xlim([0, np.max(R[:, 0])])
plt.ylim([0, np.max(R[:, 1])])
plt.axis('off')
finalize_plot((2, 2))
"""
Explanation: Now we can run a simulation. Inside the body of the simulation, we update the neighbor list using nbrs.update(position). This can be JIT, but it also might lead to buffer overflows if the allocated neighborlist cannot accomodate all of the neighbors. Therefore, every so often we check whether the neighbor list overflowed and if it did, we reallocate it using the state right before it overflowed.
End of explanation
"""
|
sorter43/PR2017LSBOLP | BaseClass/Porazdelitve.ipynb | apache-2.0 | % matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
data = np.loadtxt ('../ratingSAMPLE.csv', delimiter=",", skiprows=0)
"""
Explanation: Porazdelitev
End of explanation
"""
ratingsNum=list()
for number in np.arange(1,10):
ratingsNum.append(len(data[data[:,2]==number,2]))
plt.figure()
plt.bar(np.arange(1,10),ratingsNum, 0.8, color="blue")
plt.show()
"""
Explanation: Graf da osnovno idejo o tem, kaj uporabiti
End of explanation
"""
from scipy.stats import beta
a=8
b=2
n=1000
sample=beta.rvs(a, b, size=n)
xr = np.linspace(0, 1, 100)# interval X
P = [beta.pdf(x, a, b) for x in xr] # porazdelitvena funkcija
# Histogram - porazdelitev naključlnih VZORCEV x glede na P(x)
plt.figure(figsize=(10, 4))
plt.subplot(1, 2, 1)
plt.title("Vzorec")
plt.hist(sample, color="red")
plt.xlabel("X")
plt.ylabel("Število primerov")
# Graf porazdelitvene funkcije
plt.subplot(1, 2, 2)
plt.title("Graf porazdelitve")
plt.plot(xr, P, color="red") # nariši P(x)
plt.ylabel("P(x)")
plt.xlabel("X")
plt.show()
"""
Explanation: Ker imamo vnaprej dolečen interval, ki ne ustreza Gaussu najbolje, sem se odločil uporabiti beta porazdelitev
End of explanation
"""
|
chunweixu/Deep-Learning | language-translation/.ipynb_checkpoints/dlnd_language_translation-checkpoint.ipynb | mit | """
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
"""
Explanation: Language Translation
In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.
Get the Data
Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.
End of explanation
"""
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
"""
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
"""
eos = target_vocab_to_int['<EOS>']
source_id_text = [[source_vocab_to_int[word] for word in sentence.split()] for sentence in source_text.split('\n')]
target_id_text = [[target_vocab_to_int[word] for word in sentence.split()] + [eos] for sentence in target_text.split('\n')]
return source_id_text, target_id_text
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_text_to_ids(text_to_ids)
"""
Explanation: Implement Preprocessing Function
Text to Word Ids
As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the <EOS> word id at the end of target_text. This will help the neural network predict when the sentence should end.
You can get the <EOS> word id by doing:
python
target_vocab_to_int['<EOS>']
You can get other word ids using source_vocab_to_int and target_vocab_to_int.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
from tensorflow.python.layers.core import Dense
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Check the Version of TensorFlow and Access to GPU
This will check to make sure you have the correct version of TensorFlow and access to a GPU
End of explanation
"""
def model_inputs():
"""
Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.
:return: Tuple (input, targets, learning rate, keep probability, target sequence length,
max target sequence length, source sequence length)
"""
inputs = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
probs = tf.placeholder(tf.float32, name='keep_prob')
target_seq_len = tf.placeholder(tf.int32, [None], name='target_sequence_length')
max_target_len = tf.reduce_max(target_seq_len, name='max_target_len')
source_seq_len = tf.placeholder(tf.int32, [None], name='source_sequence_length')
return inputs, targets, learning_rate, probs, target_seq_len, max_target_len, source_seq_len
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_inputs(model_inputs)
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
- model_inputs
- process_decoder_input
- encoding_layer
- decoding_layer_train
- decoding_layer_infer
- decoding_layer
- seq2seq_model
Input
Implement the model_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
Input text placeholder named "input" using the TF Placeholder name parameter with rank 2.
Targets placeholder with rank 2.
Learning rate placeholder with rank 0.
Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0.
Target sequence length placeholder named "target_sequence_length" with rank 1
Max target sequence length tensor named "max_target_len" getting its value from applying tf.reduce_max on the target_sequence_length placeholder. Rank 0.
Source sequence length placeholder named "source_sequence_length" with rank 1
Return the placeholders in the following the tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length)
End of explanation
"""
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for encoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_process_encoding_input(process_decoder_input)
"""
Explanation: Process Decoder Input
Implement process_decoder_input by removing the last word id from each batch in target_data and concat the GO ID to the begining of each batch.
End of explanation
"""
from imp import reload
reload(tests)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:param source_sequence_length: a list of the lengths of each sequence in the batch
:param source_vocab_size: vocabulary size of source data
:param encoding_embedding_size: embedding size of source data
:return: tuple (RNN output, RNN state)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_encoding_layer(encoding_layer)
"""
Explanation: Encoding
Implement encoding_layer() to create a Encoder RNN layer:
* Embed the encoder input using tf.contrib.layers.embed_sequence
* Construct a stacked tf.contrib.rnn.LSTMCell wrapped in a tf.contrib.rnn.DropoutWrapper
* Pass cell and embedded input to tf.nn.dynamic_rnn()
End of explanation
"""
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
target_sequence_length, max_summary_length,
output_layer, keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_summary_length: The length of the longest sequence in the batch
:param output_layer: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing training logits and sample_id
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_train(decoding_layer_train)
"""
Explanation: Decoding - Training
Create a training decoding layer:
* Create a tf.contrib.seq2seq.TrainingHelper
* Create a tf.contrib.seq2seq.BasicDecoder
* Obtain the decoder outputs from tf.contrib.seq2seq.dynamic_decode
End of explanation
"""
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, max_target_sequence_length,
vocab_size, output_layer, batch_size, keep_prob):
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param max_target_sequence_length: Maximum length of target sequences
:param vocab_size: Size of decoder/target vocabulary
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_layer: Function to apply the output layer
:param batch_size: Batch size
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing inference logits and sample_id
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_infer(decoding_layer_infer)
"""
Explanation: Decoding - Inference
Create inference decoder:
* Create a tf.contrib.seq2seq.GreedyEmbeddingHelper
* Create a tf.contrib.seq2seq.BasicDecoder
* Obtain the decoder outputs from tf.contrib.seq2seq.dynamic_decode
End of explanation
"""
def decoding_layer(dec_input, encoder_state,
target_sequence_length, max_target_sequence_length,
rnn_size,
num_layers, target_vocab_to_int, target_vocab_size,
batch_size, keep_prob, decoding_embedding_size):
"""
Create decoding layer
:param dec_input: Decoder input
:param encoder_state: Encoder state
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_target_sequence_length: Maximum length of target sequences
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param target_vocab_size: Size of target vocabulary
:param batch_size: The size of the batch
:param keep_prob: Dropout keep probability
:param decoding_embedding_size: Decoding embedding size
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer(decoding_layer)
"""
Explanation: Build the Decoding Layer
Implement decoding_layer() to create a Decoder RNN layer.
Embed the target sequences
Construct the decoder LSTM cell (just like you constructed the encoder cell above)
Create an output layer to map the outputs of the decoder to the elements of our vocabulary
Use the your decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob) function to get the training logits.
Use your decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob) function to get the inference logits.
Note: You'll need to use tf.variable_scope to share variables between training and inference.
End of explanation
"""
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
source_sequence_length, target_sequence_length,
max_target_sentence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers, target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param source_sequence_length: Sequence Lengths of source sequences in the batch
:param target_sequence_length: Sequence Lengths of target sequences in the batch
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_seq2seq_model(seq2seq_model)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size).
Process target data using your process_decoder_input(target_data, target_vocab_to_int, batch_size) function.
Decode the encoded input using your decoding_layer(dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size) function.
End of explanation
"""
# Number of Epochs
epochs = None
# Batch Size
batch_size = None
# RNN Size
rnn_size = None
# Number of Layers
num_layers = None
# Embedding Size
encoding_embedding_size = None
decoding_embedding_size = None
# Learning Rate
learning_rate = None
# Dropout Keep Probability
keep_probability = None
display_step = None
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set num_layers to the number of layers.
Set encoding_embedding_size to the size of the embedding for the encoder.
Set decoding_embedding_size to the size of the embedding for the decoder.
Set learning_rate to the learning rate.
Set keep_probability to the Dropout keep probability
Set display_step to state how many steps between each debug output statement
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
#sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]),
targets,
keep_prob,
batch_size,
source_sequence_length,
target_sequence_length,
max_target_sequence_length,
len(source_vocab_to_int),
len(target_vocab_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers,
target_vocab_to_int)
training_logits = tf.identity(train_logits.rnn_output, name='logits')
inference_logits = tf.identity(inference_logits.sample_id, name='predictions')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
def pad_sentence_batch(sentence_batch, pad_int):
"""Pad sentences with <PAD> so that each sentence of a batch has the same length"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):
"""Batch targets, sources, and the lengths of their sentences together"""
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
# Slice the right amount for the batch
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# Pad
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths
"""
Explanation: Batch and pad the source and target sequences
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1])],
'constant')
return np.mean(np.equal(target, logits))
# Split data to training and validation sets
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = source_int_text[:batch_size]
valid_target = target_int_text[:batch_size]
(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source,
valid_target,
batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>']))
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate(
get_batches(train_source, train_target, batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>'])):
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths,
keep_prob: keep_probability})
if batch_i % display_step == 0 and batch_i > 0:
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch,
source_sequence_length: sources_lengths,
target_sequence_length: targets_lengths,
keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_sources_batch,
source_sequence_length: valid_sources_lengths,
target_sequence_length: valid_targets_lengths,
keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits)
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params(save_path)
"""
Explanation: Save Parameters
Save the batch_size and save_path parameters for inference.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def sentence_to_seq(sentence, vocab_to_int):
"""
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_sentence_to_seq(sentence_to_seq)
"""
Explanation: Sentence to Sequence
To feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq() to preprocess new sentences.
Convert the sentence to lowercase
Convert words into ids using vocab_to_int
Convert words not in the vocabulary, to the <UNK> word id.
End of explanation
"""
translate_sentence = 'he saw a old yellow truck .'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size,
target_sequence_length: [len(translate_sentence)*2]*batch_size,
source_sequence_length: [len(translate_sentence)]*batch_size,
keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in translate_logits]))
print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits])))
"""
Explanation: Translate
This will translate translate_sentence from English to French.
End of explanation
"""
|
amitkaps/hackermath | Module_3a_linear_algebra_eigenvectors.ipynb | mit | import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (10, 6)
def vector_plot (vector):
X,Y,U,V = zip(*vector)
C = [1,1,2,2]
plt.figure()
ax = plt.gca()
ax.quiver(X,Y,U,V,C, angles='xy',scale_units='xy',scale=1)
ax.set_xlim([-6,6])
ax.set_ylim([-6,6])
plt.axhline(0, color='grey', linewidth=1)
plt.axvline(0, color='grey', linewidth=1)
plt.axes().set_aspect('equal')
plt.draw()
A = np.array([[ 6 , 2],
[ 2 , 6]])
x = np.array([[-1],
[1]])
v = A.dot(x)
# All the vectors start at 0, 0
vAX = np.r_[[0,0],A[:,0]]
vAY = np.r_[[0,0],A[:,1]]
vx = np.r_[[0,0],x[:,0]]
vv = np.r_[[0,0],v[:,0]]
vector_plot([vAX, vAY, vx, vv])
"""
Explanation: Intermediate Linear Algebra - Eigenvalues & Eigenvectors
Key Equation: $Ax = \lambda b ~~ \text{for} ~~ n \times n $
Transformations
So what really happens when we multiply the $A$ matrix with a vector $x$
Lets say we have a vector - $x$
$$ x = \begin{bmatrix} -1 \ 1 \end{bmatrix} $$
What happens when we multiply by a matrix - $A$
$$ A = \begin{bmatrix} 6 & 2 \ 2 & 6 \end{bmatrix} $$
$$ Ax = \begin{bmatrix} 6 & 2 \ 2 & 6 \end{bmatrix} \begin{bmatrix} -1 \ 1 \end{bmatrix} = \begin{bmatrix} -4 \ 4 \end{bmatrix} $$
$$ Ax = 4Ix $$
$$ Ax = 4x $$
So this particular matrix has just scaled our original vector. It is a scalar transformation. Other matrices can do reflection, rotation and any arbitary transformation in the same 2d space for n = 2.
Lets see what has happened through code.
End of explanation
"""
A = np.array([[ 3 , 1],
[ 1 , 3]])
eigen_val, eigen_vec = np.linalg.eig(A)
eigen_val
eigen_vec
eigen_vec[:,0]
# All the vectors start at 0, 0
vX1 = np.r_[[0,0],A[:,0]]
vY1 = np.r_[[0,0],A[:,1]]
vE1 = np.r_[[0,0],eigen_vec[:,0]] * 2
vE2 = np.r_[[0,0],eigen_vec[:,1]] * 2
vector_plot([vX1, vY1, vE1, vE2])
"""
Explanation: Solving Equation $Ax=\lambda x$
Special Case: $Ax = 0$
So far we have been solving the equation $Ax = b$. Let us just look at special case when $b=0$.
$$ Ax =0 $$
If $A^{-1}$ exists (the matrix is non-singular and invertable), then the solution is trival
$$ A^{-1}Ax =0 $$
$$ x = 0$$
If $A^{-1}$ does not exist, then there may be infinitely many other solutions $x$. And since $A^{-1}$ is a singular matrix then
$$||A|| = 0 $$
General Case
The second part of linear algebra is solving the equation, for a given $A$ -
$$ Ax = \lambda x$$
Note that both $x$ and $\lambda$ are unknown in this equation. For all solutions of them:
$$ \text{eigenvalues} = \lambda $$
$$ \text{eigenvectors} = x $$
Calculating Eigenvalues
So let us first solve this for $\lambda$ :
$$ Ax = \lambda Ix $$
$$ (A-\lambda I)x = 0 $$
So for non-trivial solution of $x$, $A$ should be singular:
$$ ||A - \lambda I|| = 0 $$
For 2 x 2 Matrix
Let us use the sample $A$ vector:
$$ A = \begin{bmatrix}3 & 1\ 1 & 3\end{bmatrix} $$
So our equation becomes:
$$ \begin{bmatrix}3 & 1\ 1 & 3\end{bmatrix} \begin{bmatrix}x \ y\end{bmatrix} = \begin{bmatrix}\lambda & 0\ 0 & \lambda \end{bmatrix} \begin{bmatrix}x \ y\end{bmatrix} $$
$$ \begin{bmatrix}3 - \lambda & 1\ 1 & 3 - \lambda \end{bmatrix} \begin{bmatrix}x \ y\end{bmatrix} = 0 $$
So for a singular matrix:
$$ \begin{Vmatrix}3 - \lambda & 1\ 1 & 3 - \lambda \end{Vmatrix} = 0 $$
$$ (3 - \lambda)^2 - 1 = 0 $$
$$ \lambda^2 - 6\lambda + 8 = 0 $$
$$ (\lambda - 4)(\lambda - 2) = 0 $$
$$ \lambda_1 = 2, \lambda_2 = 4 $$
$$||A|| = \lambda_{1} \lambda_{2} $$
Calculating Eigenvectors
For $\lambda = 2$,
$$ \begin{bmatrix}3 - \lambda & 1\ 1 & 3 - \lambda \end{bmatrix} \begin{bmatrix}x \ y\end{bmatrix} = \begin{bmatrix}1 & 1\ 1 & 1 \end{bmatrix} \begin{bmatrix}x \ y\end{bmatrix} = 0 $$
So one simple solution is:
$$ \begin{bmatrix}x \ y\end{bmatrix} = \begin{bmatrix}-1 \ 1\end{bmatrix} $$
For $\lambda = 4$,
$$ \begin{bmatrix}3 - \lambda & 1\ 1 & 3 - \lambda \end{bmatrix} \begin{bmatrix}x \ y\end{bmatrix} = \begin{bmatrix}-1 & 1\ 1 & -1 \end{bmatrix} \begin{bmatrix}x \ y\end{bmatrix} = 0 $$
So one simple solution is:
$$ \begin{bmatrix}x \ y\end{bmatrix} = \begin{bmatrix}1 \ 1\end{bmatrix} $$
The eigenvectors are orthonormal to each other in this case.
Vector Representation (2x2)
A vector representation for this is now:
$$ \begin{bmatrix}3 \ 1\end{bmatrix} x + \begin{bmatrix}1 \ 3\end{bmatrix} y = \begin{bmatrix} \lambda \ 0 \end{bmatrix} x + \begin{bmatrix} 0 \ \lambda \end{bmatrix} y $$
Now we need to draw these vectors and see the result
End of explanation
"""
f = np.matrix([[1,1,1],
[3,8,1],
[5,-4,3]])
np.linalg.eig(f)
"""
Explanation: 3 x 3 Matrix
Let us write it in the form
$$ Ax = \lambda x $$
$$ \begin{bmatrix}1 & 1 & 1 \ 3 & 8 & 1 \ 5 & -4 & 3\end{bmatrix}\begin{bmatrix} x \y \ z\end{bmatrix}= \lambda \begin{bmatrix} x\ y \ x \end{bmatrix} $$
End of explanation
"""
|
google/compass | packages/propensity/12.cleanup.ipynb | apache-2.0 | # Add custom utils module to Python environment.
import os
import sys
sys.path.append(os.path.abspath(os.pardir))
from google.cloud import bigquery
from utils import helpers
"""
Explanation: 12. Cleanup BigQuery artifacts
This notebook helps to clean up interim tables generated while executing notebooks from 01 to 09.
Import required modules
End of explanation
"""
# Get GCP configurations.
configs = helpers.get_configs('config.yaml')
dest_configs = configs.destination
# GCP project ID where queries and other computation will be run.
PROJECT_ID = dest_configs.project_id
# BigQuery dataset name to store query results (if needed).
DATASET_NAME = dest_configs.dataset_name
"""
Explanation: Set parameters
End of explanation
"""
# Initialize BigQuery Client.
bq_client = bigquery.Client()
all_tables = []
for table in bq_client.list_tables(DATASET_NAME):
all_tables.append(table.table_id)
print(all_tables)
"""
Explanation: List all tables in the BigQuery Dataset
End of explanation
"""
# Define specific tables to remove from the dataset.
tables_to_delete = ['table1', 'table2']
# Or uncomment below to remove all tables in the dataset.
# tables_to_delete = all_tables
# Remove tables from BigQuery dataset.
for table_id in tables_to_delete:
bq_client.delete_table(f'{PROJECT_ID}.{DATASET_NAME}.{table_id}')
"""
Explanation: Remove list of tables
Select table names from the printed out list in above cell.
End of explanation
"""
|
QuantEcon/QuantEcon.notebooks | ddp_ex_MF_7_6_5_py.ipynb | bsd-3-clause | %matplotlib inline
import itertools
import numpy as np
from scipy import sparse
import matplotlib.pyplot as plt
from quantecon.markov import DiscreteDP
maxcap = 30
n = maxcap + 1 # Number of states
m = n # Number of actions
a1, b1 = 14, 0.8
a2, b2 = 10, 0.4
F = lambda x: a1 * x**b1 # Benefit from irrigation
U = lambda c: a2 * c**b2 # Benefit from recreational consumption c = s - x
probs = [0.1, 0.2, 0.4, 0.2, 0.1]
supp_size = len(probs)
beta = 0.9
"""
Explanation: DiscreteDP Example: Water Management
Daisuke Oyama
Faculty of Economics, University of Tokyo
From Miranda and Fackler, <i>Applied Computational Economics and Finance</i>, 2002,
Section 7.6.5
End of explanation
"""
# Reward array
R = np.empty((n, m))
for s, x in itertools.product(range(n), range(m)):
R[s, x] = F(x) + U(s-x) if x <= s else -np.inf
# Transition probability array
Q = np.zeros((n, m, n))
for s, x in itertools.product(range(n), range(m)):
if x <= s:
for j in range(supp_size):
Q[s, x, np.minimum(s-x+j, n-1)] += probs[j]
# Create a DiscreteDP
ddp = DiscreteDP(R, Q, beta)
# Solve the dynamic optimization problem (by policy iteration)
res = ddp.solve()
# Number of iterations
res.num_iter
# Optimal policy
res.sigma
# Optimal value function
res.v
# Simulate the controlled Markov chain for num_rep times
# and compute the average
init = 0
nyrs = 50
ts_length = nyrs + 1
num_rep = 10**4
ave_path = np.zeros(ts_length)
for i in range(num_rep):
path = res.mc.simulate(ts_length, init=init)
ave_path = (i/(i+1)) * ave_path + (1/(i+1)) * path
ave_path
# Stationary distribution of the Markov chain
stationary_dist = res.mc.stationary_distributions[0]
stationary_dist
# Plot sigma, v, ave_path, stationary_dist
hspace = 0.3
fig, axes = plt.subplots(2, 2, figsize=(12, 8+hspace))
fig.subplots_adjust(hspace=hspace)
axes[0, 0].plot(res.sigma, '*')
axes[0, 0].set_xlim(-1, 31)
axes[0, 0].set_ylim(-0.5, 5.5)
axes[0, 0].set_xlabel('Water Level')
axes[0, 0].set_ylabel('Irrigation')
axes[0, 0].set_title('Optimal Irrigation Policy')
axes[0, 1].plot(res.v)
axes[0, 1].set_xlim(0, 30)
y_lb, y_ub = 300, 700
axes[0, 1].set_ylim(y_lb, y_ub)
axes[0, 1].set_yticks(np.linspace(y_lb, y_ub, 5, endpoint=True))
axes[0, 1].set_xlabel('Water Level')
axes[0, 1].set_ylabel('Value')
axes[0, 1].set_title('Optimal Value Function')
axes[1, 0].plot(ave_path)
axes[1, 0].set_xlim(0, nyrs)
y_lb, y_ub = 0, 15
axes[1, 0].set_ylim(y_lb, y_ub)
axes[1, 0].set_yticks(np.linspace(y_lb, y_ub, 4, endpoint=True))
axes[1, 0].set_xlabel('Year')
axes[1, 0].set_ylabel('Water Level')
axes[1, 0].set_title('Average Optimal State Path')
axes[1, 1].bar(range(n), stationary_dist, align='center')
axes[1, 1].set_xlim(-1, n)
y_lb, y_ub = 0, 0.15
axes[1, 1].set_ylim(y_lb, y_ub+0.01)
axes[1, 1].set_yticks(np.linspace(y_lb, y_ub, 4, endpoint=True))
axes[1, 1].set_xlabel('Water Level')
axes[1, 1].set_ylabel('Probability')
axes[1, 1].set_title('Stationary Distribution')
plt.show()
"""
Explanation: Product formulation
End of explanation
"""
# Arrays of state and action indices
S = np.arange(n)
X = np.arange(m)
S_left = S.reshape(n, 1) - X.reshape(1, n)
s_indices, a_indices = np.where(S_left >= 0)
# Reward vector
S_left = S_left[s_indices, a_indices]
R = F(X[a_indices]) + U(S_left)
# Transition probability array
L = len(S_left)
Q = sparse.lil_matrix((L, n))
for i, s_left in enumerate(S_left):
for j in range(supp_size):
Q[i, np.minimum(s_left+j, n-1)] += probs[j]
# Create a DiscreteDP
ddp = DiscreteDP(R, Q, beta, s_indices, a_indices)
# Solve the dynamic optimization problem (by policy iteration)
res = ddp.solve()
# Number of iterations
res.num_iter
# Simulate the controlled Markov chain for num_rep times
# and compute the average
init = 0
nyrs = 50
ts_length = nyrs + 1
num_rep = 10**4
ave_path = np.zeros(ts_length)
for i in range(num_rep):
path = res.mc.simulate(ts_length, init=init)
ave_path = (i/(i+1)) * ave_path + (1/(i+1)) * path
# Stationary distribution of the Markov chain
stationary_dist = res.mc.stationary_distributions[0]
# Plot sigma, v, ave_path, stationary_dist
hspace = 0.3
fig, axes = plt.subplots(2, 2, figsize=(12, 8+hspace))
fig.subplots_adjust(hspace=hspace)
axes[0, 0].plot(res.sigma, '*')
axes[0, 0].set_xlim(-1, 31)
axes[0, 0].set_ylim(-0.5, 5.5)
axes[0, 0].set_xlabel('Water Level')
axes[0, 0].set_ylabel('Irrigation')
axes[0, 0].set_title('Optimal Irrigation Policy')
axes[0, 1].plot(res.v)
axes[0, 1].set_xlim(0, 30)
y_lb, y_ub = 300, 700
axes[0, 1].set_ylim(y_lb, y_ub)
axes[0, 1].set_yticks(np.linspace(y_lb, y_ub, 5, endpoint=True))
axes[0, 1].set_xlabel('Water Level')
axes[0, 1].set_ylabel('Value')
axes[0, 1].set_title('Optimal Value Function')
axes[1, 0].plot(ave_path)
axes[1, 0].set_xlim(0, nyrs)
y_lb, y_ub = 0, 15
axes[1, 0].set_ylim(y_lb, y_ub)
axes[1, 0].set_yticks(np.linspace(y_lb, y_ub, 4, endpoint=True))
axes[1, 0].set_xlabel('Year')
axes[1, 0].set_ylabel('Water Level')
axes[1, 0].set_title('Average Optimal State Path')
axes[1, 1].bar(range(n), stationary_dist, align='center')
axes[1, 1].set_xlim(-1, n)
y_lb, y_ub = 0, 0.15
axes[1, 1].set_ylim(y_lb, y_ub+0.01)
axes[1, 1].set_yticks(np.linspace(y_lb, y_ub, 4, endpoint=True))
axes[1, 1].set_xlabel('Water Level')
axes[1, 1].set_ylabel('Probability')
axes[1, 1].set_title('Stationary Distribution')
plt.show()
"""
Explanation: State-action pairs formulation
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/inpe/cmip6/models/sandbox-1/landice.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inpe', 'sandbox-1', 'landice')
"""
Explanation: ES-DOC CMIP6 Model Properties - Landice
MIP Era: CMIP6
Institute: INPE
Source ID: SANDBOX-1
Topic: Landice
Sub-Topics: Glaciers, Ice.
Properties: 30 (21 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:06
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Grid
4. Glaciers
5. Ice
6. Ice --> Mass Balance
7. Ice --> Mass Balance --> Basal
8. Ice --> Mass Balance --> Frontal
9. Ice --> Dynamics
1. Key Properties
Land ice key properties
1.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Ice Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify how ice albedo is modelled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Atmospheric Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the atmosphere and ice (e.g. orography, ice mass)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Oceanic Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the ocean and ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which variables are prognostically calculated in the ice model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of land ice code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Grid
Land ice grid
3.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.2. Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is an adative grid being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Base Resolution
Is Required: TRUE Type: FLOAT Cardinality: 1.1
The base resolution (in metres), before any adaption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Resolution Limit
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If an adaptive grid is being used, what is the limit of the resolution (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.5. Projection
Is Required: TRUE Type: STRING Cardinality: 1.1
The projection of the land ice grid (e.g. albers_equal_area)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Glaciers
Land ice glaciers
4.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of glaciers in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of glaciers, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 4.3. Dynamic Areal Extent
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does the model include a dynamic glacial extent?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Ice
Ice sheet and ice shelf
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the ice sheet and ice shelf in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 5.2. Grounding Line Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the technique used for modelling the grounding line in the ice sheet-ice shelf coupling
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.3. Ice Sheet
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice sheets simulated?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.4. Ice Shelf
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice shelves simulated?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Ice --> Mass Balance
Description of the surface mass balance treatment
6.1. Surface Mass Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how and where the surface mass balance (SMB) is calulated. Include the temporal coupling frequeny from the atmosphere, whether or not a seperate SMB model is used, and if so details of this model, such as its resolution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Ice --> Mass Balance --> Basal
Description of basal melting
7.1. Bedrock
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over bedrock
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Ocean
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Ice --> Mass Balance --> Frontal
Description of claving/melting from the ice shelf front
8.1. Calving
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of calving from the front of the ice shelf
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Melting
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of melting from the front of the ice shelf
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Ice --> Dynamics
**
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description if ice sheet and ice shelf dynamics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.2. Approximation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Approximation type used in modelling ice dynamics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.3. Adaptive Timestep
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there an adaptive time scheme for the ice scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.4. Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep (in seconds) of the ice scheme. If the timestep is adaptive, then state a representative timestep.
End of explanation
"""
|
ltiao/project-euler | problem-7-10001st-prime.ipynb | unlicense | from itertools import count, islice
from collections import defaultdict
def _sieve_of_eratosthenes():
factors = defaultdict(set)
for n in count(2):
if factors[n]:
for m in factors.pop(n):
factors[n+m].add(m)
else:
factors[n*n].add(n)
yield n
list(islice(_sieve_of_eratosthenes(), 20))
"""
Explanation: By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
What is the 10001st prime number?
Sieve of Eratosthenes
Previously, we implemented the Sieve of Eratosthenes. However, our implementation demands an integer $m$ and can only generate primes less than $m$. While some approximation algorithms for determining the $n$th prime are available, we would like to produce an exact solution. Hence, we must implement a prime sieve that does not require an upper bound.
End of explanation
"""
get_prime = lambda n: next(islice(_sieve_of_eratosthenes(), n, n+1))
# The Project Euler problem
# uses the 1-based index.
get_prime(10001-1)
"""
Explanation: <!-- TEASER_END -->
End of explanation
"""
get_prime(10**6)
"""
Explanation: This implementation scales quite well, and has good space and time complexity.
End of explanation
"""
|
tensorflow/docs-l10n | site/ja/guide/saved_model.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2018 The TensorFlow Authors.
End of explanation
"""
import os
import tempfile
from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
tmpdir = tempfile.mkdtemp()
physical_devices = tf.config.list_physical_devices('GPU')
for device in physical_devices:
tf.config.experimental.set_memory_growth(device, True)
file = tf.keras.utils.get_file(
"grace_hopper.jpg",
"https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg")
img = tf.keras.utils.load_img(file, target_size=[224, 224])
plt.imshow(img)
plt.axis('off')
x = tf.keras.utils.img_to_array(img)
x = tf.keras.applications.mobilenet.preprocess_input(
x[tf.newaxis,...])
"""
Explanation: SavedModel 形式の使用
<table class="tfo-notebook-buttons" align="left">
<td> <a target="_blank" href="https://www.tensorflow.org/guide/saved_model"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org で表示</a> </td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/guide/saved_model.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab で実行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/guide/saved_model.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/guide/saved_model.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード</a></td>
</table>
SavedModel には、トレーニング済みのパラメータ(tf.Variable)や計算を含む完全な TensorFlow プログラムが含まれます。実行するために元のモデルのビルディングコードを必要としないため、TFLite、TensorFlow.js、TensorFlow Serving、または TensorFlow Hub との共有やデプロイに便利です。
以下の API を使用して、SavedModel 形式でのモデルの保存と読み込みを行えます。
低レベルの tf.saved_model API。このドキュメントでは、この API の使用方法を詳しく説明しています。
保存: tf.saved_model.save(model, path_to_dir)
読み込み: model = tf.saved_model.load(path_to_dir)
高レベルの tf.keras.Model API。Keras の保存とシリアル化ガイドをご覧ください。
トレーニング中の重みの保存/読み込みのみを実行する場合は、チェックポイントガイドをご覧ください。
Keras を使った SavedModel の作成
簡単な導入として、このセクションでは、事前にトレーニング済みの Keras モデルをエクスポートし、それを使って画像分類リクエストを送信します。SavedModels のほかの作成方法については、このガイドの残りの部分で説明します。
End of explanation
"""
labels_path = tf.keras.utils.get_file(
'ImageNetLabels.txt',
'https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())
pretrained_model = tf.keras.applications.MobileNet()
result_before_save = pretrained_model(x)
decoded = imagenet_labels[np.argsort(result_before_save)[0,::-1][:5]+1]
print("Result before saving:\n", decoded)
"""
Explanation: 実行例として、グレース・ホッパーの画像と Keras の次元トレーニング済み画像分類モデルを使用します(使いやすいため)。カスタムモデルも使用できますが、これについては後半で説明します。
End of explanation
"""
mobilenet_save_path = os.path.join(tmpdir, "mobilenet/1/")
tf.saved_model.save(pretrained_model, mobilenet_save_path)
"""
Explanation: この画像の予測トップは「軍服」です。
End of explanation
"""
loaded = tf.saved_model.load(mobilenet_save_path)
print(list(loaded.signatures.keys())) # ["serving_default"]
"""
Explanation: save-path は、TensorFlow Serving が使用する規則に従っており、最後のパスコンポーネント(この場合 1/)はモデルのバージョンを指します。Tensorflow Serving のようなツールで、相対的な鮮度を区別させることができます。
tf.saved_model.load で SavedModel を Python に読み込み直し、ホッパー将官の画像がどのように分類されるかを確認できます。
End of explanation
"""
infer = loaded.signatures["serving_default"]
print(infer.structured_outputs)
"""
Explanation: インポートされるシグネチャは、必ずディクショナリを返します。シグネチャ名と出力ディクショナリキーをカスタマイズするには、「エクスポート中のシグネチャの指定」を参照してください。
End of explanation
"""
labeling = infer(tf.constant(x))[pretrained_model.output_names[0]]
decoded = imagenet_labels[np.argsort(labeling)[0,::-1][:5]+1]
print("Result after saving and loading:\n", decoded)
"""
Explanation: SavedModel から推論を実行すると、元のモデルと同じ結果が得られます。
End of explanation
"""
!ls {mobilenet_save_path}
"""
Explanation: TensorFlow Serving での SavedModel の実行
SavedModels は Python から使用可能(詳細は以下参照)ですが、本番環境では通常、Python コードを使用せずに、推論専用のサービスが使用されます。これは、TensorFlow Serving を使用して SavedModel から簡単にセットアップできます。
エンドツーエンドのtensorflow-servingの例については、 TensorFlow Serving RESTチュートリアルをご覧ください。
ディスク上の SavedModel 形式
SavedModel は、変数の値や語彙など、シリアル化されたシグネチャとそれらを実行するために必要な状態を含むディレクトリです。
End of explanation
"""
!saved_model_cli show --dir {mobilenet_save_path} --tag_set serve
"""
Explanation: saved_model.pb ファイルは、実際の TensorFlow プログラムまたはモデル、およびテンソル入力を受け入れてテンソル出力を生成する関数を識別する一連の名前付きシグネチャを保存します。
SavedModel には、複数のモデルバリアント(saved_model_cli への --tag_set フラグで識別される複数の v1.MetaGraphDefs)が含まれることがありますが、それは稀なことです。複数のモデルバリアントを作成する API には、tf.Estimator.experimental_export_all_saved_models と TensorFlow 1.x の tf.saved_model.Builder があります。
End of explanation
"""
!ls {mobilenet_save_path}/variables
"""
Explanation: variables ディレクトリには、標準のトレーニングチェックポイントが含まれます(「トレーニングチェックポイントガイド」を参照してください)。
End of explanation
"""
class CustomModule(tf.Module):
def __init__(self):
super(CustomModule, self).__init__()
self.v = tf.Variable(1.)
@tf.function
def __call__(self, x):
print('Tracing with', x)
return x * self.v
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def mutate(self, new_v):
self.v.assign(new_v)
module = CustomModule()
"""
Explanation: assets ディレクトリには、語彙テーブルを初期化するためのテキストファイルなど、TensorFlow グラフが使用するファイルが含まれます。この例では使用されません。
SavedModel には、SavedModel で何をするかといった消費者向けの情報など、TensorFlow グラフで使用されないファイルに使用する assets.extra ディレクトリがある場合があります。TensorFlow そのものでは、このディレクトリは使用されません。
カスタムモデルの保存
tf.saved_model.save は、tf.Module オブジェクトと、tf.keras.Layer や tf.keras.Model などのサブクラスの保存をサポートしています。
tf.Module の保存と復元の例を見てみましょう。
End of explanation
"""
module_no_signatures_path = os.path.join(tmpdir, 'module_no_signatures')
module(tf.constant(0.))
print('Saving model...')
tf.saved_model.save(module, module_no_signatures_path)
"""
Explanation: tf.Module を保存すると、すべての tf.Variable 属性、tf.function でデコレートされたメソッド、および再帰トラバースで見つかった tf.Module が保存されます(この再帰トラバースについては、「チェックポイントのチュートリアル」を参照してください)。ただし、Python の属性、関数、およびデータは失われます。つまり、tf.function が保存されても、Python コードは保存されません。
Python コードが保存されないのであれば、SavedModel は関数をどのようにして復元するのでしょうか。
簡単に言えば、tf.function は、Python コードをトレースして ConcreteFunction(tf.Graph のコーラブルラッパー)を生成することで機能します。tf.function を保存すると、実際には tf.function の ConcreteFunctions のキャッシュを保存しているのです。
tf.function と ConcreteFunctions の関係に関する詳細は、「tf.function ガイド」をご覧ください。
End of explanation
"""
imported = tf.saved_model.load(module_no_signatures_path)
assert imported(tf.constant(3.)).numpy() == 3
imported.mutate(tf.constant(2.))
assert imported(tf.constant(3.)).numpy() == 6
"""
Explanation: カスタムモデルの読み込みと使用
Python に SavedModel を読み込むと、すべての tf.Variable 属性、tf.function でデコレートされたメソッド、および tf.Module は、保存された元の tf.Module と同じオブジェクト構造で復元されます。
End of explanation
"""
optimizer = tf.optimizers.SGD(0.05)
def train_step():
with tf.GradientTape() as tape:
loss = (10. - imported(tf.constant(2.))) ** 2
variables = tape.watched_variables()
grads = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(grads, variables))
return loss
for _ in range(10):
# "v" approaches 5, "loss" approaches 0
print("loss={:.2f} v={:.2f}".format(train_step(), imported.v.numpy()))
"""
Explanation: Python コードは保存されないため、新しい入力シグネチャで tf.function で呼び出しても失敗します。
python
imported(tf.constant([3.]))
<pre>ValueError: Could not find matching function to call for canonicalized inputs ((<tf.Tensor 'args_0:0' shape=(1,) dtype=float32>,), {}). Only existing signatures are [((TensorSpec(shape=(), dtype=tf.float32, name=u'x'),), {})].
</pre>
基本の微調整
変数オブジェクトを使用でき、インポートされた関数を通じてバックプロパゲーションできます。単純なケースの場合、SavedModel をファインチューニング(再トレーニング)するには、これで十分です。
End of explanation
"""
loaded = tf.saved_model.load(mobilenet_save_path)
print("MobileNet has {} trainable variables: {}, ...".format(
len(loaded.trainable_variables),
", ".join([v.name for v in loaded.trainable_variables[:5]])))
trainable_variable_ids = {id(v) for v in loaded.trainable_variables}
non_trainable_variables = [v for v in loaded.variables
if id(v) not in trainable_variable_ids]
print("MobileNet also has {} non-trainable variables: {}, ...".format(
len(non_trainable_variables),
", ".join([v.name for v in non_trainable_variables[:3]])))
"""
Explanation: 一般的な微調整
Keras の SavedModel は、より高度な微調整の事例に対処できる、プレーンな __call__ よりも詳細な内容を提供します。TensorFlow Hub は、微調整の目的で共有される SavedModel に、該当する場合は次の項目を提供することをお勧めします。
モデルに、フォワードパスがトレーニングと推論で異なるドロップアウトまたはほかのテクニックが使用されている場合(バッチの正規化など)、__call__ メソッドは、オプションのPython 重視の training= 引数を取ります。この引数は、デフォルトで False になりますが、True に設定することができます。
__call__ 属性の隣には、対応する変数リストを伴う .variable と .trainable_variable 属性があります。もともとトレーニング可能であっても、微調整中には凍結されるべき変数は、.trainable_variables から省略されます。
レイヤとサブモデルの属性として重みの正規化を表現する Keras のようなフレームワークのために、.regularization_losses 属性も使用できます。この属性は、値が合計損失に追加することを目的とした引数無しの関数のリストを保有します。
最初の MobileNet の例に戻ると、これらの一部が動作していることを確認できます。
End of explanation
"""
assert len(imported.signatures) == 0
"""
Explanation: エクスポート中のシグネチャの指定
TensorFlow Serving や saved_model_cli のようなツールは、SavedModel と対話できます。これらのツールがどの ConcreteFunctions を使用するか判定できるように、サービングシグネチャを指定する必要があります。tf.keras.Model は、サービングシグネチャを自動的に指定しますが、カスタムモジュールに対して明示的に宣言する必要があります。
重要: モデルを TensorFlow 2.x と Python 以外の環境にエクスポートする必要がない限り、おそらく明示的にシグネチャをエクスポートする必要はありません。特定の関数に入力シグネチャを強要する方法を探している場合は、tf.function への input_signature 引数をご覧ください。
デフォルトでは、シグネチャはカスタム tf.Module で宣言されません。
End of explanation
"""
module_with_signature_path = os.path.join(tmpdir, 'module_with_signature')
call = module.__call__.get_concrete_function(tf.TensorSpec(None, tf.float32))
tf.saved_model.save(module, module_with_signature_path, signatures=call)
imported_with_signatures = tf.saved_model.load(module_with_signature_path)
list(imported_with_signatures.signatures.keys())
"""
Explanation: サービングシグネチャを宣言するには、signatures kwarg を使用して ConcreteFunction 指定します。単一のシグネチャを指定する場合、シグネチャキーは 'serving_default' となり、定数 tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY として保存されます。
End of explanation
"""
module_multiple_signatures_path = os.path.join(tmpdir, 'module_with_multiple_signatures')
signatures = {"serving_default": call,
"array_input": module.__call__.get_concrete_function(tf.TensorSpec([None], tf.float32))}
tf.saved_model.save(module, module_multiple_signatures_path, signatures=signatures)
imported_with_multiple_signatures = tf.saved_model.load(module_multiple_signatures_path)
list(imported_with_multiple_signatures.signatures.keys())
"""
Explanation: 複数のシグネチャをエクスポートするには、シグネチャキーのディクショナリを ConcreteFunction に渡します。各シグネチャキーは 1 つの ConcreteFunction に対応します。
End of explanation
"""
class CustomModuleWithOutputName(tf.Module):
def __init__(self):
super(CustomModuleWithOutputName, self).__init__()
self.v = tf.Variable(1.)
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def __call__(self, x):
return {'custom_output_name': x * self.v}
module_output = CustomModuleWithOutputName()
call_output = module_output.__call__.get_concrete_function(tf.TensorSpec(None, tf.float32))
module_output_path = os.path.join(tmpdir, 'module_with_output_name')
tf.saved_model.save(module_output, module_output_path,
signatures={'serving_default': call_output})
imported_with_output_name = tf.saved_model.load(module_output_path)
imported_with_output_name.signatures['serving_default'].structured_outputs
"""
Explanation: デフォルトでは、出力されたテンソル名は、output_0 というようにかなり一般的な名前です。出力の名前を制御するには、出力名を出力にマッピングするディクショナリを返すように tf.function を変更します。入力の名前は Python 関数の引数名から取られます。
End of explanation
"""
|
ajhenrikson/phys202-2015-work | assignments/project/NeuralNetworks.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
from IPython.html.widgets import interact
from sklearn.datasets import load_digits
digits = load_digits()
print(digits.data.shape)
def show_digit(i):
plt.matshow(digits.images[i]);
interact(show_digit, i=(0,100));
"""
Explanation: Neural Networks
This project was created by Brian Granger. All content is licensed under the MIT License.
Introduction
Neural networks are a class of algorithms that can learn how to compute the value of a function given previous examples of the functions output. Because neural networks are capable of learning how to compute the output of a function based on existing data, they generally fall under the field of Machine Learning.
Let's say that we don't know how to compute some function $f$:
$$ f(x) \rightarrow y $$
But we do have some data about the output that $f$ produces for particular input $x$:
$$ f(x_1) \rightarrow y_1 $$
$$ f(x_2) \rightarrow y_2 $$
$$ \ldots $$
$$ f(x_n) \rightarrow y_n $$
A neural network learns how to use that existing data to compute the value of the function $f$ on yet unseen data. Neural networks get their name from the similarity of their design to how neurons in the brain work.
Work on neural networks began in the 1940s, but significant advancements were made in the 1970s (backpropagation) and more recently, since the late 2000s, with the advent of deep neural networks. These days neural networks are starting to be used extensively in products that you use. A great example of the application of neural networks is the recently released Flickr automated image tagging. With these algorithms, Flickr is able to determine what tags ("kitten", "puppy") should be applied to each photo, without human involvement.
In this case the function takes an image as input and outputs a set of tags for that image:
$$ f(image) \rightarrow {tag_1, \ldots} $$
For the purpose of this project, good introductions to neural networks can be found at:
The Nature of Code, Daniel Shiffman.
Neural Networks and Deep Learning, Michael Nielsen.
Data Science from Scratch, Joel Grus
The Project
Your general goal is to write Python code to predict the number associated with handwritten digits. The dataset for these digits can be found in sklearn:
End of explanation
"""
digits.target
"""
Explanation: The actual, known values (0,1,2,3,4,5,6,7,8,9) associated with each image can be found in the target array:
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/dwd/cmip6/models/sandbox-2/aerosol.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'dwd', 'sandbox-2', 'aerosol')
"""
Explanation: ES-DOC CMIP6 Model Properties - Aerosol
MIP Era: CMIP6
Institute: DWD
Source ID: SANDBOX-2
Topic: Aerosol
Sub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model.
Properties: 69 (37 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:57
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Meteorological Forcings
5. Key Properties --> Resolution
6. Key Properties --> Tuning Applied
7. Transport
8. Emissions
9. Concentrations
10. Optical Radiative Properties
11. Optical Radiative Properties --> Absorption
12. Optical Radiative Properties --> Mixtures
13. Optical Radiative Properties --> Impact Of H2o
14. Optical Radiative Properties --> Radiative Scheme
15. Optical Radiative Properties --> Cloud Interactions
16. Model
1. Key Properties
Key properties of the aerosol model
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of aerosol model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of aerosol model code
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Prognostic variables in the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of tracers in the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are aerosol calculations generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Physical properties of seawater in ocean
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the time evolution of the prognostic variables
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for aerosol advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for aerosol physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the aerosol model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.5. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Meteorological Forcings
**
4.1. Variables 3D
Is Required: FALSE Type: STRING Cardinality: 0.1
Three dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Variables 2D
Is Required: FALSE Type: STRING Cardinality: 0.1
Two dimensionsal forcing variables, e.g. land-sea mask definition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Frequency
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Frequency with which meteological forcings are applied (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Resolution
Resolution in the aersosol model grid
5.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 5.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 5.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Tuning Applied
Tuning methodology for aerosol model
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Transport
Aerosol transport
7.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of transport in atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
"""
Explanation: 7.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for aerosol transport modeling
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7.3. Mass Conservation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method used to ensure mass conservation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7.4. Convention
Is Required: TRUE Type: ENUM Cardinality: 1.N
Transport by convention
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Emissions
Atmospheric aerosol emissions
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of emissions in atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method used to define aerosol species (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the aerosol species are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
"""
Explanation: 8.4. Prescribed Climatology
Is Required: FALSE Type: ENUM Cardinality: 0.1
Specify the climatology type for aerosol emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and prescribed via a climatology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.6. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.7. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.8. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.9. Other Method Characteristics
Is Required: FALSE Type: STRING Cardinality: 0.1
Characteristics of the "other method" used for aerosol emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Concentrations
Atmospheric aerosol concentrations
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of concentrations in atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.4. Prescribed Fields Mmr
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed as mass mixing ratios.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.5. Prescribed Fields Mmr
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed as AOD plus CCNs.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10. Optical Radiative Properties
Aerosol optical and radiative properties
10.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of optical and radiative properties
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11. Optical Radiative Properties --> Absorption
Absortion properties in aerosol scheme
11.1. Black Carbon
Is Required: FALSE Type: FLOAT Cardinality: 0.1
Absorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Dust
Is Required: FALSE Type: FLOAT Cardinality: 0.1
Absorption mass coefficient of dust at 550nm (if non-absorbing enter 0)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.3. Organics
Is Required: FALSE Type: FLOAT Cardinality: 0.1
Absorption mass coefficient of organics at 550nm (if non-absorbing enter 0)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Optical Radiative Properties --> Mixtures
**
12.1. External
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there external mixing with respect to chemical composition?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12.2. Internal
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there internal mixing with respect to chemical composition?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Mixing Rule
Is Required: FALSE Type: STRING Cardinality: 0.1
If there is internal mixing with respect to chemical composition then indicate the mixinrg rule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13. Optical Radiative Properties --> Impact Of H2o
**
13.1. Size
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does H2O impact size?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.2. Internal Mixture
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does H2O impact internal mixture?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Optical Radiative Properties --> Radiative Scheme
Radiative scheme for aerosol
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of radiative scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.2. Shortwave Bands
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of shortwave bands
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.3. Longwave Bands
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of longwave bands
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Optical Radiative Properties --> Cloud Interactions
Aerosol-cloud interactions
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of aerosol-cloud interactions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.2. Twomey
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the Twomey effect included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.3. Twomey Minimum Ccn
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If the Twomey effect is included, then what is the minimum CCN number?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.4. Drizzle
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the scheme affect drizzle?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Cloud Lifetime
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the scheme affect cloud lifetime?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.6. Longwave Bands
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of longwave bands
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Model
Aerosol model
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
"""
Explanation: 16.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the Aerosol model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.3. Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other model components coupled to the Aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.4. Gas Phase Precursors
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of gas phase aerosol precursors.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.5. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.6. Bulk Scheme Species
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of species covered by the bulk scheme.
End of explanation
"""
|
guruucsd/EigenfaceDemo | python/PCA Demo.ipynb | mit | from numpy.random import standard_normal # Gaussian variables
N = 1000; P = 5
X = standard_normal((N, P))
W = X - X.mean(axis=0,keepdims=True)
print(dot(W[:,0], W[:,1]))
"""
Explanation: PCA and EigenFaces Demo
In this demo, we will go through the basic concepts behind the principal component analysis (PCA). We will then apply PCA to a face dataset to find the characteristic faces ("eigenfaces").
What is PCA?
PCA is a linear transformation. Suppose we have a $N \times P$ data matrix ${\bf X}$, where $N$ is the number of samples and $P$ is the dimension of each sample. Then PCA will find you a $K \times P$ matrix ${\bf V}$ such that
$$ \underbrace{{\bf X}}{N \times P} = \underbrace{{\bf S}}{P \times K} \underbrace{{\bf V}}_{K \times P}. $$
Here, $K$ is the number of principal components with $K \le P$.
But what does the V matrix do?
We can think of ${\bf V}$ in many different ways.
The first way is to think of it as a de-correlating transformation: originally, each variable (or dimension) in ${\bf X}$ - there are $P$ of them - may be correlated. That is, if we take any two column vectors of ${\bf X}$, say ${\bf x}_0$ and ${\bf x}_1$, their covariance is not going to be zero.
Let's try this in a randomly generated data:
End of explanation
"""
from sklearn.decomposition import PCA
S=PCA(whiten=True).fit_transform(X)
print(dot(S[:,0], S[:,1]))
"""
Explanation: I'll skip ahead and use a pre-canned PCA routine from scikit-learn (but we'll dig into it a bit later!) Let's see what happens to the transformed variables, ${\bf S}$:
End of explanation
"""
from numpy.random import standard_normal
from matplotlib.patches import Ellipse
from numpy.linalg import svd
@interact
def plot_2d_pca(mu_x=FloatSlider(min=-3.0, max=3.0, value=0),
mu_y=FloatSlider(min=-3.0, max=3.0, value=0),
sigma_x=FloatSlider(min=0.2, max=1.8, value=1.8),
sigma_y=FloatSlider(min=0.2, max=1.8, value=0.3),
theta=FloatSlider(min=0.0, max=pi, value=pi/6), center=False):
mu=array([mu_x, mu_y])
sigma=array([sigma_x, sigma_y])
R=array([[cos(theta),-sin(theta)],[sin(theta),cos(theta)]])
X=dot(standard_normal((1000, 2)) * sigma[newaxis,:],R.T) + mu[newaxis,:]
# Plot the points and the ellipse
fig, ax = plt.subplots(figsize=(8,8))
ax.scatter(X[:200,0], X[:200,1], marker='.')
ax.grid()
M=8.0
ax.set_xlim([-M,M])
ax.set_ylim([-M,M])
e=Ellipse(xy=array([mu_x, mu_y]), width=sigma_x*3, height=sigma_y*3, angle=theta/pi*180,
facecolor=[1.0,0,0], alpha=0.3)
ax.add_artist(e)
# Perform PCA and plot the vectors
if center:
X_mean=X.mean(axis=0,keepdims=True)
else:
X_mean=zeros((1,2))
# Doing PCA here... I'm using svd instead of scikit-learn PCA, we'll come back to this.
U,s,V =svd(X-X_mean, full_matrices=False)
for v in dot(diag(s/sqrt(X.shape[0])),V): # Each eigenvector
ax.arrow(X_mean[0,0],X_mean[0,1],-v[0],-v[1],
head_width=0.5, head_length=0.5, fc='b', ec='b')
Ustd=U.std(axis=0)
ax.set_title('std(U*s) [%f,%f]' % (Ustd[0]*s[0],Ustd[1]*s[1]))
"""
Explanation: Another way to look at ${\bf V}$ is to think of them as projections. Since the row vectors of ${\bf V}$ is orthogonal to each other, the projected data ${\bf S}$ lines in a new "coordinate system" specified by ${\bf V}$. Furthermore, the new coordinate system is sorted in the decreasing order of variance in the original data. So, PCA can be thought of as calculating a new coordinate system where the basis vectors point toward the direction of largest variances first.
<img src="files/images/PCA/pca.png" style="margin:auto; width: 483px;"/>
Exercise 1. Let's get a feel for this in the following interactive example. Try moving the sliders around to generate the data, and see how the principal component vectors change.
In this demo, mu_x and mu_y specifies the center of the data, sigma_x and sigma_y the standard deviations, and everything is rotated by the angle theta. The two blue arrows are the rows of ${\bf V}$ that gets calculated.
When you click on center, the data is first centered (mean is subtracted from the data) first. (Question: why is it necessary to "center" data when mu_x and mu_y are not zero?)
End of explanation
"""
import pickle
dataset=pickle.load(open('data/cafe.pkl','r')) # or 'pofa.pkl' for POFA
disp('dataset.images shape is %s' % str(dataset.images.shape))
disp('dataset.data shape is %s' % str(dataset.data.shape))
@interact
def plot_face(image_id=(0, dataset.images.shape[0]-1)):
plt.imshow(dataset.images[image_id],cmap='gray')
plt.title('Image Id = %d, Gender = %d' % (dataset.target[image_id], dataset.gender[image_id]))
plt.axis('off')
"""
Explanation: Yet another use for ${\bf V}$ is to perform a dimensionality reduction. In many scenarios you encounter in image manipulation (as we'll see soon), we might want to have a more concise representation of the data ${\bf X}$. PCA with $K < P$ is one way to reduce the dimesionality: because PCA picks the directions with highest data variances, if a small number of top $K$ rows are sufficient to approximate (reconstruct) ${\bf X}$.
How do we actually perform PCA?
Well, we can use from sklearn.decomposition import PCA. But for learning, let's dig just one step into what it acutally does.
One of the easiest way to perform PCA is to use the singular value decomposition (SVD). SVD decomposes a matrix ${\bf X}$ into a unitary matrix ${\bf U}$, rectangular diagonal matrix ${\bf \Sigma}$ (called "singular values"), and another unitary matrix ${\bf W}$ such that
$$ {\bf X} = {\bf U} {\bf \Sigma} {\bf W}$$
So how can we use that to do PCA? Well, it turns out ${\bf \Sigma} {\bf W}$ of SVD, are exactly what we need to calculate the ${\bf V}$ matrix for the PCA, so we just have to run SVD and set ${\bf V} = {\bf \Sigma} {\bf W}$.
(Note: svd of numpy returns only the diagonal elements of ${\bf \Sigma}$.)
Exercise 2. Generate 1000 10-dimensional data and perform PCA this way. Plot the squares of the singular values.
To reduce the the $P$-dimesional data ${\bf X}$ to a $K$-dimensional data, we just need to pick the top $K$ row vectors of ${\bf V}$ - let's call that ${\bf W}$ - then calcuate ${\bf T} = {\bf X} {\bf W}^\intercal$. ${\bf T}$ then has the dimension $N \times K$.
If we want to reconstruct the data ${\bf T}$, we simply do ${\hat {\bf X}} = {\bf T} {\bf W}$ (and re-add the means for ${\bf X}$, if necessary).
Exercise 3. Reduce the same data to 5 dimensions, then based on the projected data ${\bf T}$, reconstruct ${\bf X}$. What's the mean squared error of the reconstruction?
Performing PCA on a face dataset
Now that we have a handle on the PCA method, let's try applying it to a dataset consisting of face data. We have two datasets in this demo, CAFE and POFA. The following code loads the dataset into the dataset variable:
End of explanation
"""
X=dataset.data.copy() # So that we won't mess up the data in the dataset\
X_mean=X.mean(axis=0,keepdims=True) # Mean for each dimension across sample (centering)
X_std=X.std(axis=0,keepdims=True)
X-=X_mean
disp(all(abs(X.mean(axis=0))<1e-12)) # Are means for all dimensions very close to zero?
"""
Explanation: Preprocessing
We'll center the data by subtracting the mean. The first axis (axis=0) is the n_samples dimension.
End of explanation
"""
from numpy.linalg import svd
U,s,V=svd(X,compute_uv=True, full_matrices=False)
disp(str(U.shape))
disp(str(s.shape))
disp(str(V.shape))
"""
Explanation: Then we perform SVD to calculate the projection matrix $V$. By default, U,s,V=svd(...) returns full matrices, which will return $n \times n$ matrix U, $n$-dimensional vector of singular values s, and $d \times d$ matrix V. But here, we don't really need $d \times d$ matrix V; with full_matrices=False, svd only returns $n \times d$ matrix for V.
End of explanation
"""
variance_ratio=s**2/(s**2).sum() # Normalized so that they add to one.
@interact
def plot_variance_ratio(n_components=(1, len(variance_ratio))):
n=n_components-1
fig, axs = plt.subplots(1, 2, figsize=(12, 5))
axs[0].plot(variance_ratio)
axs[0].set_title('Explained Variance Ratio')
axs[0].set_xlabel('n_components')
axs[0].axvline(n, color='r', linestyle='--')
axs[0].axhline(variance_ratio[n], color='r', linestyle='--')
axs[1].plot(cumsum(variance_ratio))
axs[1].set_xlabel('n_components')
axs[1].set_title('Cumulative Sum')
captured=cumsum(variance_ratio)[n]
axs[1].axvline(n, color='r', linestyle='--')
axs[1].axhline(captured, color='r', linestyle='--')
axs[1].annotate(s='%f%% with %d components' % (captured * 100, n_components), xy=(n, captured),
xytext=(10, 0.5), arrowprops=dict(arrowstyle="->"))
"""
Explanation: We can also plot how much each eigenvector in V contributes to the overall variance by plotting variance_ratio = $\frac{s^2}{\sum s^2}$. (Notice that s is already in the decreasing order.) The cumsum (cumulative sum) of variance_ratio then shows how much of the variance is explained by components up to n_components.
End of explanation
"""
image_shape=dataset.images.shape[1:] # (H x W)
@interact
def plot_eigenface(eigenface=(0, V.shape[0]-1)):
v=V[eigenface]*X_std
plt.imshow(v.reshape(image_shape), cmap='gray')
plt.title('Eigenface %d (%f to %f)' % (eigenface, v.min(), v.max()))
plt.axis('off')
"""
Explanation: Since we're dealing with face data, each row vector of ${\bf V}$ is called an "eigenface". The first "eigenface" is the one that explains a lot of variances in the data, whereas the last one explains the least.
End of explanation
"""
@interact
def plot_reconstruction(image_id=(0,dataset.images.shape[0]-1), n_components=(0, V.shape[0]-1),
pc1_multiplier=FloatSlider(min=-2,max=2, value=1)):
# This is where we perform the projection and un-projection
Vn=V[:n_components]
M=ones(n_components)
if n_components > 0:
M[0]=pc1_multiplier
X_hat=dot(multiply(dot(X[image_id], Vn.T), M), Vn)
# Un-center
I=X[image_id] + X_mean
I_hat = X_hat + X_mean
D=multiply(I-I_hat,I-I_hat) / multiply(X_std, X_std)
# And plot
fig, axs = plt.subplots(1, 3, figsize=(10, 10))
axs[0].imshow(I.reshape(image_shape), cmap='gray', vmin=0, vmax=1)
axs[0].axis('off')
axs[0].set_title('Original')
axs[1].imshow(I_hat.reshape(image_shape), cmap='gray', vmin=0, vmax=1)
axs[1].axis('off')
axs[1].set_title('Reconstruction')
axs[2].imshow(1-D.reshape(image_shape), cmap='gray', vmin=0, vmax=1)
axs[2].axis('off')
axs[2].set_title('Difference^2 (mean = %f)' % sqrt(D.mean()))
plt.tight_layout()
"""
Explanation: Now let's try reconstructing faces with different number of principal components (PCs)! Now, the transformed X is reconstructed by multiplying by the sample standard deviations for each dimension and adding the sample mean. For this reason, even for zero components, you get a face-like image!
The rightmost plot is the "relative" reconstruction error (image minus the reconstruction squared, divided by the data standard deviations). White is where the error is close to zero, and black is where the relative error is large (1 or more). As you increase the number of PCs, you should see the error mostly going to zero (white).
End of explanation
"""
def plot_morph(left=0, right=1, mix=0.5):
# Projected images
x_lft=dot(X[left], V.T)
x_rgt=dot(X[right], V.T)
# Mix
x_avg = x_lft * (1.0-mix) + x_rgt * (mix)
# Un-project
X_hat = dot(x_avg[newaxis,:], V)
I_hat = X_hat + X_mean
# And plot
fig, axs = plt.subplots(1, 3, figsize=(10, 10))
axs[0].imshow(dataset.images[left], cmap='gray', vmin=0, vmax=1)
axs[0].axis('off')
axs[0].set_title('Left')
axs[1].imshow(I_hat.reshape(image_shape), cmap='gray', vmin=0, vmax=1)
axs[1].axis('off')
axs[1].set_title('Morphed (%.2f %% right)' % (mix * 100))
axs[2].imshow(dataset.images[right], cmap='gray', vmin=0, vmax=1)
axs[2].axis('off')
axs[2].set_title('Right')
plt.tight_layout()
interact(plot_morph,
left=IntSlider(max=dataset.images.shape[0]-1),
right=IntSlider(max=dataset.images.shape[0]-1,value=1),
mix=FloatSlider(value=0.5, min=0, max=1.0))
"""
Explanation: Image morphing
As a fun exercise, we'll morph two images by taking averages of the two images within the transformed data space. How is it different than simply morphing them in the pixel space?
End of explanation
"""
|
phockett/ePSproc | notebooks/utilDev/zenodo_data_download_tests_200720.ipynb | gpl-3.0 | import requests
# From doi
urlDOI = 'http://dx.doi.org/10.5281/zenodo.3629721'
r = requests.get(urlDOI)
r.ok
dir(r)
# r.json() Throws an error, not sure why!
# import json
# json.loads(r.text) # Ah, same error - seems to be formatting issue?
# JSONDecodeError: Expecting value: line 2 column 1 (char 1)
print(r.text) # This is OK, just HTML for Zenodo record page.
r.text
# OPTIONS: parse this text for file links & download, or use API
"""
Explanation: Zenodo data downloads
20/07/20
Quick tests for data IO with Zenodo.
(See also epsman for Zenodo API stuff (in development) for packaging & uploading to Zenodo + ePSdata.)
Options:
Basic requests.get() with Zenodo API should be fine.
Python wrappers, e.g. zenodo_get
Testing with record: http://dx.doi.org/10.5281/zenodo.3629721
Basic requests usage from URL
End of explanation
"""
import os
from pathlib import Path
# Set record IDs, starting from DOI
recordID = {}
recordID['doi'] = '10.5281/zenodo.3629721'
recordID['url'] = {'doi':'http://dx.doi.org/' + recordID['doi']}
recordID['zenID'] = int(recordID['doi'].rsplit('.',1)[-1])
recordID['url']['get'] = 'https://zenodo.org/record/' + str(recordID['zenID'])
# Set also local paths, working dir or other
# recordID['downloadBase'] = Path(os.getcwd())
recordID['downloadBase'] = Path('/home/femtolab/Downloads')
recordID['downloadDir'] = recordID['downloadBase']/str(recordID['zenID'])
try:
os.mkdir(recordID['downloadDir'])
except FileExistsError:
print(f"*** Directory {recordID['downloadDir']} already exists, contents will be overwritten.")
testStr = 'http://dx.doi.org/10.5281/zenodo.3629721'
# testStr.find("dx.doi") #.startswith('http://dx.doi')
"dx.doi" in testStr
# With url parser, see https://docs.python.org/3/library/urllib.parse.html
from urllib.parse import urlparse
urlparse(testStr).path.strip('/')
testURL2 = "https://zenodo.org/record/3629721"
ID = urlparse(testURL2).path.rsplit('/')[-1]
from urllib.parse import urljoin
urljoin('http://dx.doi.org/10.5281/zenodo.', ID)
type(ID)
# '10.5281/zenodo.'.join(ID)
'10.5281/zenodo.' + ID
# 'tets' + 'TTTT'
recordID
# r = requests.get('https://zenodo.org/api/deposit/depositions/3629721/files') # Needs token
# r = requests.get('https://zenodo.org/api/records/3629721') # OK
r = requests.get(recordID['url']['get']) # OK
if r.ok:
print(f"Found Zenodo record {recordID['zenID']}: {r.json()['metadata']['title']}")
r
type(r.json()['files'])
# Try getting a file with wget
import wget
wget.download(r.json()['files'][0]['links']['self'], out=recordID['downloadDir'].as_posix())
# Basic bytes to KB/Mb... conversion, from https://stackoverflow.com/questions/2104080/how-to-check-file-size-in-python
def convert_bytes(num):
"""
This function will convert bytes to MB.... GB... etc
"""
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
# return [num, x]
num /= 1024.0
# Pull all files
# downloadSize = sum(item['size'] for item in r.json()['files'])
# fList = []
# print(f"Record {recordID['zenID']}: {len(r.json()['files'])} files, {convert_bytes(downloadSize)}")
# for n, item in enumerate(r.json()['files']):
# print(f"Getting item {item['links']['self']}")
# fout = wget.download(item['links']['self'], out=recordID['downloadDir'].as_posix())
# print(f"Pulled to file: {fout}")
# fList.append(Path(fout)) # Log local file list
dir(fList[0])
# Unzip if required
import zipfile
for n, item in enumerate(fList):
if item.suffix == '.zip':
with zipfile.ZipFile(item,"r") as zipObj:
zipFiles = zipObj.namelist()
zipObj.extractall(recordID['downloadDir'])
# print(zip_ref)
(zipFiles)
wget.download
"""
Explanation: With Zenodo API
This should be neater than above method... but some methods require (personal) access token to work.
https://developers.zenodo.org/#quickstart-upload
End of explanation
"""
import sys
# ePSproc test codebase (local)
if sys.platform == "win32":
modPath = r'D:\code\github\ePSproc' # Win test machine
else:
modPath = r'/home/femtolab/github/ePSproc/' # Linux test machine
sys.path.append(modPath)
# import epsproc as ep
from epsproc.util.epsdata import ePSdata
dataObj = ePSdata(doi='10.5281/zenodo.3629721', downloadDir=r'/home/femtolab/Downloads')
# dir(dataObj)
# dataObj.downloadSize
# dataObj.r.json()['files']
dataObj.downloadFiles(overwriteFlag=False, overwritePromptFlag=True)
dataObj.fList
dataObj.fList[0].parent
dataObj.unzipFiles()
sum([item.file_size for item in dataObj.zip[0]['info']])
# [print(item) for item in dataObj.zip[0]['info']]
dataObj.zip
"""
Explanation: With class
Above now implemented in epsproc.utils.epsdata.ePSdata class
End of explanation
"""
import sys
# ePSproc test codebase (local)
if sys.platform == "win32":
modPath = r'D:\code\github\ePSproc' # Win test machine
else:
modPath = r'/home/femtolab/github/ePSproc/' # Linux test machine
sys.path.append(modPath)
# import epsproc as ep
from epsproc.util.epsdata import ePSdata
ABCOdata = ePSdata(URL='https://zenodo.org/record/3627347', downloadDir=r'/home/femtolab/Downloads')
ABCOdata.r.ok
ABCOdata.downloadFiles()
from pathlib import Path
# Path(ABCOdata.fList[4].stem + '_joined.zip')
ABCOdata.fList[5]
ABCOdata.fList[5].with_suffix('.zip')
ABCOdata.unzipFiles()
# TODO finish fixing file logic!!!
# Now unzipping OK, including case with extra path info.
# NEED TO MOVE FILES in this case.
# 'pkg' in ABCOdata.zip[0]['zipfile'].relative_to(ABCOdata.zip[0]['path']).parts
ABCOdata.zip
ABCOdata.recordID['downloadDir']/ABCOdata.zip[0]['files'][0]
# dir(ABCOdata)
ABCOdata.fList
# Testing file sorting etc.
# See epsman._repo for prototypes
from collections import Counter
import pprint
fileListTest = ABCOdata.zip[0]['files']
suffixList = [Path(item).suffix for item in fileListTest]
c = Counter(suffixList)
pprint.pprint(c, width=50)
ePSout = [item for item in fileListTest if Path(item).suffix == '.out']
ePSout
# Checking subdirs
import os
path = ABCOdata.recordID['downloadDir']
test = list(os.walk(path))
# [f.path for f in os.scandir(path) if f.is_dir()]
# import glob
# glob.glob(path.as_posix() + '/**/', recursive=True)
len(test)
# list(item[0] for item in test)
[item[0] for item in test]
list_subfolders_with_paths = []
for root, dirs, files in os.walk(path):
for dir in dirs:
list_subfolders_with_paths.append( os.path.join(root, dir) )
# list_subfolders_with_paths.append(dir)
break
list_subfolders_with_paths
"""
Explanation: Test for larger file-set (ABCO)
https://zenodo.org/record/3627347
End of explanation
"""
ABCOdata.zipMP[0]
# Path(ABCOdata.zipMP[0]['files'][0]).parts
# Test file move/copy
# With shutil
# import shutil
# testOut = shutil.move((ABCOdata.zipMP[0]['path']/ABCOdata.zipMP[0]['files'][0]).as_posix(), ABCOdata.zipMP[0]['path'].as_posix())
# testOut
# (ABCOdata.zipMP[0]['path']/Path(ABCOdata.zipMP[0]['files'][0]).parts[0])
# With Path
# testOut = (ABCOdata.zipMP[0]['path']/ABCOdata.zipMP[0]['files'][0]).rename(ABCOdata.zipMP[0]['path']/Path(ABCOdata.zipMP[0]['files'][0]).name)
ABCOdata.zipMP[0]['path']/Path(ABCOdata.zipMP[0]['files'][0]).name
list((ABCOdata.zipMP[0]['path']/Path(ABCOdata.zipMP[0]['files'][0])).parent.parent.iterdir()) #.parent.rmdir()
Path(Path(ABCOdata.zipMP[0]['files'][0]).parts[-1]).parent
root = ABCOdata.zipMP[0]['path']
os.listdir(root)
import os
# list(os.walk((ABCOdata.zipMP[0]['path']/Path(ABCOdata.zipMP[0]['files'][0]).parent.parts[0]),topdown=False))
list(os.walk((ABCOdata.zipMP[0]['path']/'.'), topdown=False))
# Recursive dir deletion with Path
# In this case pass top-level dir, contents to be removed
# Modified version of code from https://stackoverflow.com/a/49782093
# ABANDONED - just use os.removedirs!!!!!
# from pathlib import Path
# def rmdir(directory):
# directory = Path(directory)
# for item in directory.iterdir():
# if item.is_dir():
# rmdir(item)
# # else:
# # item.unlink()
# try:
# directory.rmdir()
# return 0
# except OSError as e:
# if e == "[Errno 39] Directory not empty":
# print(f"{})
# rmdir(Path("dir/"))
# Path(ABCOdata.zipMP[0]['files'][0]).parent.is_dir()
# Path(ABCOdata.zipMP[0]['files'][0]).is_file()
# Path(ABCOdata.zipMP[0]['files'][0]).relative_to(ABCOdata.zip[0]['path'])
# Test dir removal
# ABCOdata.zipMP[0]['path']/Path(ABCOdata.zipMP[0]['files'][0]).parent # .parts[0:2]
# os.getcwd()
# With Path.rmdir()
# (ABCOdata.zipMP[0]['path']/Path(ABCOdata.zipMP[0]['files'][0]).parts[0]).rmdir() # This requires dir to be empty, so could be run recursively and safely
# Returns OSError: [Errno 39] Directory not empty: '/home/femtolab/Downloads/3627347/mnt'
# With SHUTIL
# This works. Could be dangerous however! Doesn't require dir to be empty.
# shutil.rmtree(ABCOdata.zipMP[0]['path']/Path(ABCOdata.zipMP[0]['files'][0]).parts[0])
# With os.removedirs - works recursively until non-empty dir found.
# os.removedirs(ABCOdata.zipMP[0]['path']/Path(ABCOdata.zipMP[0]['files'][0]).parts[0])
try:
# Basic case, just use full path
os.removedirs(ABCOdata.zipMP[0]['path']/Path(ABCOdata.zipMP[0]['files'][0]).parent)
# With chdir for extra safety (?)
# currDir = os.getcwd()
# os.chdir(ABCOdata.zipMP[0]['path'])
# os.removedirs(Path(ABCOdata.zipMP[0]['files'][0]).parent)
except OSError as e:
# if e.startswith("[Errno 39] Directory not empty"):
# print(e)
# print(type(e))
# print(dir(e))
# print(e.filename)
# print(e.errno)
if e.errno == 39:
print(f'Pruned dir tree back to {e.filename}')
# return e.filename
else:
raise
currDir
print(os.getcwd())
os.chdir(currDir)
print(os.getcwd())
os.chdir('/home/femtolab/github/ePSproc/epsproc/tests/utilDev')
Path(ABCOdata.zipMP[0]['files'][0]).parent
import os
os.getcwd()
"""
Explanation: Additional multipart-zip testing (move/delete files and dirs)
End of explanation
"""
# FUNCTION TESTING
# Check if file exists
item = dataObj.r.json()['files'][2]
localFile = dataObj.recordID['downloadDir']/item['key']
overwriteFlag = False
overwritePromptFlag = True
downloadFlag = True
if localFile.is_file():
sizeCheck = localFile.stat().st_size - item['size'] # Quick file size check
if (not sizeCheck):
print('Local file size incomensurate with remote by {sizeCheck} bytes. File will be downloaded again.')
downloadFlag = True
else:
print('Local file already exists, file size OK.')
if not (overwriteFlag and overwritePromptFlag):
downloadFlag = False
elif (overwriteFlag and overwritePromptFlag):
test = input("Download file again (y/n)?: ")
if test == 'y':
downloadFlag = True
else:
downloadFlag = False
else:
downloadFlag = True
if downloadFlag:
print('File will be downloaded again.')
else:
print('Skipping download.')
# print(localFile.stat().st_size)
# print(sizeCheck)
# dir(localFile)
"""
Explanation: Function testing
End of explanation
"""
import re
myString = dataObj.r.json()['metadata']['description']
# print(re.search("(?P<url>https?://[^\s]+)", myString).group("url")) # This pulls full <a href .....</a>, ugh.
# re.findall(r'(https?://\S+)', myString) # Gets all URLs, but not correct.
# urls = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', myString) # This gets only base URL
# urls
# This works.
# https://stackoverflow.com/a/6883228
from html.parser import HTMLParser
class MyParser(HTMLParser):
def __init__(self, output_list=None):
HTMLParser.__init__(self)
if output_list is None:
self.output_list = []
else:
self.output_list = output_list
def handle_starttag(self, tag, attrs):
if tag == 'a':
self.output_list.append(dict(attrs).get('href'))
p = MyParser()
p.feed(myString)
p.output_list
# With Beautiful Soup
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/
from bs4 import BeautifulSoup
# Set object
soup = BeautifulSoup(myString, 'html.parser')
# Find all tags <a
soup.find_all('a')
# Extract URLs
for link in soup.find_all('a'):
print(link.get('href'))
# Test job info summary - HTML rendering
from IPython.core.display import HTML
jobInfo = HTML(dataObj.r.json()['metadata']['description'])
display(jobInfo)
"""
Explanation: Testing HTML parsing & display
For URL extraction.
Best notes: https://stackoverflow.com/questions/6883049/regex-to-extract-urls-from-href-attribute-in-html-with-python
NOTE - use HTML parsers, not regex!
Either inbuilt html.parser, or BeautifulSoup, are suggested.
See also https://github.com/lipoja/URLExtract for another alternative.
End of explanation
"""
# Install with pip
!pip install zenodo_get
# import zenodo_get as zget # Seems to be OK, but empty - issue with import here (designed for CLI?)
from zenodo_get import __main__ as zget # This seems to work.
dir(zget)
# zget.zenodo_get(['','-d http://dx.doi.org/10.5281/zenodo.3629721']) # Throws KeyError at 'files'
# zget.zenodo_get(['','-d 10.5281/zenodo.3629721']) # Throws KeyError at 'files'
zget.zenodo_get(['','-r 3629721']) # Throws KeyError at 'files'
!zenodo_get.py -c
"""
Explanation: With zenodo_get wrapper
For details, see Zenodo https://doi.org/10.5281/zenodo.3676567 or GitLab page
End of explanation
"""
import os
os.path.expanduser('~')
os.mkdir(os.path.expanduser(r'~/Testmkdir')) # OK
os.mkdir(os.path.expanduser(r'/home/femtolab/Testmkdir2')) # OK
# os.path.expanduser(r'/home/femtolab/Testmkdir2')
os.path.expanduser(r'/etc')
testPath = Path('~/etc')
# os.path.expanduser(testPath)
testPath.expanduser()
"""
Explanation: Test homedir stuff
End of explanation
"""
|
nick-youngblut/SIPSim | ipynb/bac_genome/fullCyc/Day1_fullDataset/rep10_noPCR.ipynb | mit | import os
import glob
import re
import nestly
%load_ext rpy2.ipython
%load_ext pushnote
%%R
library(ggplot2)
library(dplyr)
library(tidyr)
library(gridExtra)
library(phyloseq)
## BD for G+C of 0 or 100
BD.GCp0 = 0 * 0.098 + 1.66
BD.GCp100 = 1 * 0.098 + 1.66
"""
Explanation: TODO: rerun; DBL default changed
Goal
Extension of Day1_rep10 simulations: subsampling OTU table without performing PCR simulation first
Seeing how this affects the abundance distribution of the overlapping taxa in the dataset
Init
End of explanation
"""
workDir = '/home/nick/notebook/SIPSim/dev/fullCyc/n1147_frag_norm_9_2.5_n5/'
buildDir = os.path.join(workDir, 'Day1_rep10')
R_dir = '/home/nick/notebook/SIPSim/lib/R/'
fragFile= '/home/nick/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags.pkl'
targetFile = '/home/nick/notebook/SIPSim/dev/fullCyc/CD-HIT/target_taxa.txt'
physeqDir = '/var/seq_data/fullCyc/MiSeq_16SrRNA/515f-806r/lib1-7/phyloseq/'
physeq_bulkCore = 'bulk-core'
physeq_SIP_core = 'SIP-core_unk'
nreps = 10
prefrac_comm_abundance = '1e9'
seq_per_fraction = ['lognormal', 9.432, 0.5, 10000, 30000] # dist, mean, scale, min, max
bulk_days = [1]
nprocs = 12
# building tree structure
nest = nestly.Nest()
## varying params
nest.add('rep', [x + 1 for x in xrange(nreps)])
## set params
nest.add('bulk_day', bulk_days, create_dir=False)
nest.add('abs', [prefrac_comm_abundance], create_dir=False)
nest.add('percIncorp', [0], create_dir=False)
nest.add('percTaxa', [0], create_dir=False)
nest.add('np', [nprocs], create_dir=False)
nest.add('subsample_dist', [seq_per_fraction[0]], create_dir=False)
nest.add('subsample_mean', [seq_per_fraction[1]], create_dir=False)
nest.add('subsample_scale', [seq_per_fraction[2]], create_dir=False)
nest.add('subsample_min', [seq_per_fraction[3]], create_dir=False)
nest.add('subsample_max', [seq_per_fraction[4]], create_dir=False)
### input/output files
nest.add('buildDir', [buildDir], create_dir=False)
nest.add('R_dir', [R_dir], create_dir=False)
nest.add('fragFile', [fragFile], create_dir=False)
nest.add('targetFile', [targetFile], create_dir=False)
nest.add('physeqDir', [physeqDir], create_dir=False)
nest.add('physeq_bulkCore', [physeq_bulkCore], create_dir=False)
# building directory tree
nest.build(buildDir)
# bash file to run
bashFile = os.path.join(buildDir, 'SIPSimRun.sh')
%%writefile $bashFile
#!/bin/bash
export PATH={R_dir}:$PATH
echo '# subsampling from the OTU table (simulating sequencing of the DNA pool)'
SIPSim OTU_subsample \
--dist {subsample_dist} \
--dist_params mean:{subsample_mean},sigma:{subsample_scale} \
--min_size {subsample_min} \
--max_size {subsample_max} \
OTU_abs{abs}.txt \
> OTU_abs{abs}_sub.txt
echo '# making a wide-formatted table'
SIPSim OTU_wideLong -w \
OTU_abs{abs}_sub.txt \
> OTU_abs{abs}_sub_w.txt
echo '# making metadata (phyloseq: sample_data)'
SIPSim OTU_sampleData \
OTU_abs{abs}_sub.txt \
> OTU_abs{abs}_sub_meta.txt
!chmod 777 $bashFile
!cd $workDir; \
nestrun --template-file $bashFile -d Day1_rep10 --log-file log.txt -j 2
"""
Explanation: Nestly
assuming fragments already simulated
assuming Day1_rep10 notebook already ran
End of explanation
"""
%%R
## min G+C cutoff
min_GC = 13.5
## max G+C cutoff
max_GC = 80
## max G+C shift
max_13C_shift_in_BD = 0.036
min_BD = min_GC/100.0 * 0.098 + 1.66
max_BD = max_GC/100.0 * 0.098 + 1.66
max_BD = max_BD + max_13C_shift_in_BD
cat('Min BD:', min_BD, '\n')
cat('Max BD:', max_BD, '\n')
"""
Explanation: BD min/max
what is the min/max BD that we care about?
End of explanation
"""
%%R -i physeqDir -i physeq_SIP_core -i bulk_days
# bulk core samples
F = file.path(physeqDir, physeq_SIP_core)
physeq.SIP.core = readRDS(F)
physeq.SIP.core.m = physeq.SIP.core %>% sample_data
physeq.SIP.core = prune_samples(physeq.SIP.core.m$Substrate == '12C-Con' &
physeq.SIP.core.m$Day %in% bulk_days,
physeq.SIP.core) %>%
filter_taxa(function(x) sum(x) > 0, TRUE)
physeq.SIP.core.m = physeq.SIP.core %>% sample_data
physeq.SIP.core
%%R
## dataframe
df.EMP = physeq.SIP.core %>% otu_table %>%
as.matrix %>% as.data.frame
df.EMP$OTU = rownames(df.EMP)
df.EMP = df.EMP %>%
gather(sample, abundance, 1:(ncol(df.EMP)-1))
df.EMP = inner_join(df.EMP, physeq.SIP.core.m, c('sample' = 'X.Sample'))
df.EMP.nt = df.EMP %>%
group_by(sample) %>%
mutate(n_taxa = sum(abundance > 0)) %>%
ungroup() %>%
distinct(sample) %>%
filter(Buoyant_density >= min_BD,
Buoyant_density <= max_BD)
df.EMP.nt %>% head(n=3)
"""
Explanation: Loading data
Emperical
SIP data
End of explanation
"""
%%R
physeq.dir = '/var/seq_data/fullCyc/MiSeq_16SrRNA/515f-806r/lib1-7/phyloseq/'
physeq.bulk = 'bulk-core'
physeq.file = file.path(physeq.dir, physeq.bulk)
physeq.bulk = readRDS(physeq.file)
physeq.bulk.m = physeq.bulk %>% sample_data
physeq.bulk = prune_samples(physeq.bulk.m$Exp_type == 'microcosm_bulk' &
physeq.bulk.m$Day %in% bulk_days, physeq.bulk)
physeq.bulk.m = physeq.bulk %>% sample_data
physeq.bulk
%%R
physeq.bulk.n = transform_sample_counts(physeq.bulk, function(x) x/sum(x))
physeq.bulk.n
%%R
# making long format of each bulk table
bulk.otu = physeq.bulk.n %>% otu_table %>% as.data.frame
ncol = ncol(bulk.otu)
bulk.otu$OTU = rownames(bulk.otu)
bulk.otu = bulk.otu %>%
gather(sample, abundance, 1:ncol)
bulk.otu = inner_join(physeq.bulk.m, bulk.otu, c('X.Sample' = 'sample')) %>%
dplyr::select(OTU, abundance) %>%
rename('bulk_abund' = abundance)
bulk.otu %>% head(n=3)
%%R
# joining tables
df.EMP.j = inner_join(df.EMP, bulk.otu, c('OTU' = 'OTU')) %>%
filter(Buoyant_density >= min_BD,
Buoyant_density <= max_BD)
df.EMP.j %>% head(n=3)
"""
Explanation: bulk soil samples
End of explanation
"""
OTU_files = !find $buildDir -name "OTU_abs1e9_sub.txt"
#OTU_files = !find $buildDir -name "OTU_abs1e9.txt"
OTU_files
%%R -i OTU_files
# loading files
df.SIM = list()
for (x in OTU_files){
SIM_rep = gsub('/home/nick/notebook/SIPSim/dev/fullCyc/n1147_frag_norm_9_2.5_n5/Day1_rep10/', '', x)
#SIM_rep = gsub('/OTU_abs1e9_sub.txt', '', SIM_rep)
SIM_rep = gsub('/OTU_abs1e9_sub.txt', '', SIM_rep)
df.SIM[[SIM_rep]] = read.delim(x, sep='\t')
}
df.SIM = do.call('rbind', df.SIM)
df.SIM$SIM_rep = gsub('\\.[0-9]+$', '', rownames(df.SIM))
rownames(df.SIM) = 1:nrow(df.SIM)
df.SIM %>% head
%%R
## edit table
df.SIM.nt = df.SIM %>%
filter(count > 0) %>%
group_by(SIM_rep, library, BD_mid) %>%
summarize(n_taxa = n()) %>%
filter(BD_mid >= min_BD,
BD_mid <= max_BD)
df.SIM.nt %>% head
"""
Explanation: Simulated
End of explanation
"""
# loading comm files
comm_files = !find $buildDir -name "bulk-core_comm_target.txt"
comm_files
%%R -i comm_files
df.comm = list()
for (f in comm_files){
rep = gsub('.+/Day1_rep10/([0-9]+)/.+', '\\1', f)
df.comm[[rep]] = read.delim(f, sep='\t') %>%
dplyr::select(library, taxon_name, rel_abund_perc) %>%
rename('bulk_abund' = rel_abund_perc) %>%
mutate(bulk_abund = bulk_abund / 100)
}
df.comm = do.call('rbind', df.comm)
df.comm$SIM_rep = gsub('\\.[0-9]+$', '', rownames(df.comm))
rownames(df.comm) = 1:nrow(df.comm)
df.comm %>% head(n=3)
%%R
## joining tables
df.SIM.j = inner_join(df.SIM, df.comm, c('SIM_rep' = 'SIM_rep',
'library' = 'library',
'taxon' = 'taxon_name')) %>%
filter(BD_mid >= min_BD,
BD_mid <= max_BD)
df.SIM.j %>% head(n=3)
%%R
# filtering & combining emperical w/ simulated data
## emperical
max_BD_range = max(df.EMP.j$Buoyant_density) - min(df.EMP.j$Buoyant_density)
df.EMP.j.f = df.EMP.j %>%
filter(abundance > 0) %>%
group_by(OTU) %>%
summarize(mean_rel_abund = mean(bulk_abund),
min_BD = min(Buoyant_density),
max_BD = max(Buoyant_density),
BD_range = max_BD - min_BD,
BD_range_perc = BD_range / max_BD_range * 100) %>%
ungroup() %>%
mutate(dataset = 'emperical',
SIM_rep = NA)
## simulated
max_BD_range = max(df.SIM.j$BD_mid) - min(df.SIM.j$BD_mid)
df.SIM.j.f = df.SIM.j %>%
filter(count > 0) %>%
group_by(SIM_rep, taxon) %>%
summarize(mean_rel_abund = mean(bulk_abund),
min_BD = min(BD_mid),
max_BD = max(BD_mid),
BD_range = max_BD - min_BD,
BD_range_perc = BD_range / max_BD_range * 100) %>%
ungroup() %>%
rename('OTU' = taxon) %>%
mutate(dataset = 'simulated')
## join
df.j = rbind(df.EMP.j.f, df.SIM.j.f) %>%
filter(BD_range_perc > 0,
mean_rel_abund > 0)
df.j$SIM_rep = reorder(df.j$SIM_rep, df.j$SIM_rep %>% as.numeric)
df.j %>% head(n=3)
%%R -h 400
## plotting
ggplot(df.j, aes(mean_rel_abund, BD_range_perc, color=SIM_rep)) +
geom_point(alpha=0.3) +
scale_x_log10() +
scale_y_continuous() +
labs(x='Pre-fractionation abundance', y='% of total BD range') +
facet_grid(dataset ~ .) +
theme_bw() +
theme(
text = element_text(size=16),
panel.grid = element_blank()#,
#legend.position = 'none'
)
"""
Explanation: 'bulk soil' community files
End of explanation
"""
%%R -i targetFile
df.target = read.delim(targetFile, sep='\t')
df.target %>% nrow %>% print
df.target %>% head(n=3)
%%R
# filtering to just target taxa
df.j.t = df.j %>%
filter(OTU %in% df.target$OTU)
df.j %>% nrow %>% print
df.j.t %>% nrow %>% print
## plotting
ggplot(df.j.t, aes(mean_rel_abund, BD_range_perc, color=SIM_rep)) +
geom_point(alpha=0.5, shape='O') +
scale_x_log10() +
scale_y_continuous() +
#scale_color_manual(values=c('blue', 'red')) +
labs(x='Pre-fractionation abundance', y='% of total BD range') +
facet_grid(dataset ~ .) +
theme_bw() +
theme(
text = element_text(size=16),
panel.grid = element_blank()#,
#legend.position = 'none'
)
"""
Explanation: BD span of just overlapping taxa
Taxa overlapping between emperical data and genomes in dataset
These taxa should have the same relative abundances in both datasets.
The comm file was created from the emperical dataset phyloseq file.
End of explanation
"""
%%R
# formatting data
df.1 = df.j.t %>%
filter(dataset == 'simulated') %>%
select(SIM_rep, OTU, mean_rel_abund, BD_range, BD_range_perc)
df.2 = df.j.t %>%
filter(dataset == 'emperical') %>%
select(SIM_rep, OTU, mean_rel_abund, BD_range, BD_range_perc)
df.12 = inner_join(df.1, df.2, c('OTU' = 'OTU')) %>%
mutate(BD_diff_perc = BD_range_perc.y - BD_range_perc.x)
df.12$SIM_rep.x = reorder(df.12$SIM_rep.x, df.12$SIM_rep.x %>% as.numeric)
%%R -w 800 -h 500
ggplot(df.12, aes(mean_rel_abund.x, BD_diff_perc)) +
geom_point(alpha=0.5) +
scale_x_log10() +
labs(x='Pre-fractionation relative abundance',
y='Difference in % of gradient spanned\n(emperical - simulated)',
title='Overlapping taxa') +
facet_wrap(~ SIM_rep.x) +
theme_bw() +
theme(
text = element_text(size=16),
panel.grid = element_blank(),
legend.position = 'none'
)
"""
Explanation: Correlation between relative abundance and BD_range diff
Are low abundant taxa more variable in their BD span
End of explanation
"""
%%R
join_abund_dists = function(df.EMP.j, df.SIM.j, df.target){
## emperical
df.EMP.j.f = df.EMP.j %>%
filter(abundance > 0) %>%
#filter(!OTU %in% c('OTU.32', 'OTU.2', 'OTU.4')) %>% # TEST
dplyr::select(OTU, sample, abundance, Buoyant_density, bulk_abund) %>%
mutate(dataset = 'emperical', SIM_rep = NA) %>%
filter(OTU %in% df.target$OTU)
## simulated
df.SIM.j.f = df.SIM.j %>%
filter(count > 0) %>%
#filter(!taxon %in% c('OTU.32', 'OTU.2', 'OTU.4')) %>% # TEST
dplyr::select(taxon, fraction, count, BD_mid, bulk_abund, SIM_rep) %>%
rename('OTU' = taxon,
'sample' = fraction,
'Buoyant_density' = BD_mid,
'abundance' = count) %>%
mutate(dataset = 'simulated') %>%
filter(OTU %in% df.target$OTU)
## getting just intersecting OTUs
OTUs.int = intersect(df.EMP.j.f$OTU, df.SIM.j.f$OTU)
df.j = rbind(df.EMP.j.f, df.SIM.j.f) %>%
filter(OTU %in% OTUs.int) %>%
group_by(sample) %>%
mutate(rel_abund = abundance / sum(abundance))
cat('Number of overlapping OTUs between emperical & simulated:',
df.j$OTU %>% unique %>% length, '\n\n')
return(df.j)
}
df.j = join_abund_dists(df.EMP.j, df.SIM.j, df.target)
df.j %>% head(n=3) %>% as.data.frame
%%R
# closure operation
df.j = df.j %>%
ungroup() %>%
mutate(SIM_rep = SIM_rep %>% as.numeric) %>%
group_by(dataset, SIM_rep, sample) %>%
mutate(rel_abund_c = rel_abund / sum(rel_abund)) %>%
ungroup()
df.j %>% head(n=3) %>% as.data.frame
%%R -h 1500 -w 800
# plotting
plot_abunds = function(df){
p = ggplot(df, aes(Buoyant_density, rel_abund_c, fill=OTU)) +
geom_area(stat='identity', position='dodge', alpha=0.5) +
labs(x='Buoyant density',
y='Subsampled community\n(relative abundance for subset taxa)') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none',
axis.title.y = element_text(vjust=1),
axis.title.x = element_blank(),
plot.margin=unit(c(0.1,1,0.1,1), "cm")
)
return(p)
}
# simulations
df.j.f = df.j %>%
filter(dataset == 'simulated')
p.SIM = plot_abunds(df.j.f)
p.SIM = p.SIM + facet_grid(SIM_rep ~ .)
# emperical
df.j.f = df.j %>%
filter(dataset == 'emperical')
p.EMP = plot_abunds(df.j.f)
# status
cat('Number of overlapping taxa:', df.j$OTU %>% unique %>% length, '\n')
# make figure
grid.arrange(p.EMP, p.SIM, ncol=1, heights=c(1,5))
"""
Explanation: Notes
between Day1_rep10, Day1_richFromTarget_rep10, and Day1_add_Rich_rep10:
Day1_rep10 has the most accurate representation of BD span (% of gradient spanned by taxa).
Accuracy drops at ~1e-3 to ~5e-4, but this is caused by detection limits (veil-line effect).
Comparing abundance distributions of overlapping taxa
End of explanation
"""
%%R
center_mass = function(df){
df = df %>%
group_by(dataset, SIM_rep, OTU) %>%
summarize(center_mass = weighted.mean(Buoyant_density, rel_abund_c, na.rm=T),
median_rel_abund_c = median(rel_abund_c)) %>%
ungroup()
return(df)
}
df.j.cm = center_mass(df.j)
%%R -w 650
# getting mean cm for all SIM_reps
df.j.cm.s = df.j.cm %>%
group_by(dataset, OTU) %>%
summarize(mean_cm = mean(center_mass, na.rm=T),
stdev_cm = sd(center_mass),
median_rel_abund_c = first(median_rel_abund_c)) %>%
ungroup() %>%
spread(dataset, mean_cm) %>%
group_by(OTU) %>%
summarize(stdev_cm = mean(stdev_cm, na.rm=T),
emperical = mean(emperical, na.rm=T),
simulated = mean(simulated, na.rm=T),
median_rel_abund_c = first(median_rel_abund_c)) %>%
ungroup()
# check
cat('Number of OTUs:', df.j.cm.s$OTU %>% unique %>% length, '\n')
# plotting
ggplot(df.j.cm.s, aes(emperical, simulated, color=median_rel_abund_c,
ymin = simulated - stdev_cm,
ymax = simulated + stdev_cm)) +
geom_pointrange() +
stat_function(fun = function(x) x, linetype='dashed', alpha=0.5, color='red') +
scale_x_continuous(limits=c(1.69, 1.74)) +
scale_y_continuous(limits=c(1.705, 1.74)) +
scale_color_gradient(trans='log') +
labs(title='Center of mass') +
theme_bw() +
theme(
text = element_text(size=16)
)
"""
Explanation: Calculating center of mass for overlapping taxa
weighted mean BD, where weights are relative abundances
End of explanation
"""
%%R
df.j.cm.s.f = df.j.cm.s %>%
mutate(CM_diff = emperical - simulated)
ggplot(df.j.cm.s.f, aes(median_rel_abund_c, CM_diff)) +
geom_point() +
scale_x_log10() +
labs(x='Relative abundance', y='Center of mass (Emperical - Simulated)', title='Center of mass') +
theme_bw() +
theme(
text = element_text(size=16)
)
"""
Explanation: Notes
Leaving out the PCR simulation does not help with simulation accuracy for center of mass on overlapping taxa
plotting taxon abundance vs diff between emperical & simulated
End of explanation
"""
|
google/earthengine-community | tutorials/time-series-visualization-with-altair/index.ipynb | apache-2.0 | import ee
ee.Authenticate()
ee.Initialize()
"""
Explanation: Time Series Visualization with Altair
Author: jdbcode
This tutorial provides methods for generating time series data in Earth Engine and visualizing it with the Altair library using drought and vegetation response as an example.
Topics include:
Time series region reduction in Earth Engine
Formatting a table in Earth Engine
Transferring an Earth Engine table to a Colab Python kernel
Converting an Earth Engine table to a pandas DataFrame
Data representation with various Altair chart types
Note that this tutorial uses the Earth Engine Python API in a Colab notebook.
Context
At the heart of this tutorial is the notion of data reduction and the need to transform data into insights to help inform our understanding of Earth processes and human's role in them. It combines a series of technologies, each best suited to a particular task in the data reduction process. Earth Engine is used to access, clean, and reduce large amounts of spatiotemporal data, pandas is used to analyze and organize the results, and Altair is used to visualize the results.
Note: This notebook demonstrates an analysis template and interactive workflow that is appropriate for a certain size of dataset, but there are limitations to interactive computation time and server-to-client data transfer size imposed by Colab and Earth Engine. To analyze even larger datasets, you may need to modify the workflow to export FeatureCollection results from Earth Engine as static assets and then use the static assets to perform the subsequent steps involving Earth Engine table formatting, conversion to pandas DataFrame, and charting with Altair.
Materials
Datasets
Climate
Drought severity (PDSI)
Historical climate (PRISM)
Projected climate (NEX-DCP30)
Vegetation proxies
NDVI (MODIS)
NBR (Landsat)
Region of interest
The region of interest for these examples is the Sierra Nevada ecoregion of California. The vegetation grades from mostly ponderosa pine and Douglas-fir at low elevations on the western side, to pines and Sierra juniper on the eastern side, and to fir and other conifers at higher elevations.
General workflow
Preparation of every dataset for visualization follows the same basic steps:
Filter the dataset (server-side Earth Engine)
Reduce the data region by a statistic (server-side Earth Engine)
Format the region reduction into a table (server-side Earth Engine)
Convert the Earth Engine table to a DataFrame (server-side Earth Engine > client-side Python kernel)
Alter the DataFrame (client-side pandas)
Plot the DataFrame (client-side Altair)
The first dataset will walk through each step in detail. Following examples will provide less description, unless there is variation that merits note.
Python setup
Earth Engine API
Import the Earth Engine library.
Authenticate access (registration verification and Google account access).
Initialize the API.
End of explanation
"""
import pandas as pd
import altair as alt
import numpy as np
import folium
"""
Explanation: Other libraries
Import other libraries used in this notebook.
pandas: data analysis (including the DataFrame data structure)
altair: declarative visualization library (used for charting)
numpy: array-processing package (used for linear regression)
folium: interactive web map
End of explanation
"""
def create_reduce_region_function(geometry,
reducer=ee.Reducer.mean(),
scale=1000,
crs='EPSG:4326',
bestEffort=True,
maxPixels=1e13,
tileScale=4):
"""Creates a region reduction function.
Creates a region reduction function intended to be used as the input function
to ee.ImageCollection.map() for reducing pixels intersecting a provided region
to a statistic for each image in a collection. See ee.Image.reduceRegion()
documentation for more details.
Args:
geometry:
An ee.Geometry that defines the region over which to reduce data.
reducer:
Optional; An ee.Reducer that defines the reduction method.
scale:
Optional; A number that defines the nominal scale in meters of the
projection to work in.
crs:
Optional; An ee.Projection or EPSG string ('EPSG:5070') that defines
the projection to work in.
bestEffort:
Optional; A Boolean indicator for whether to use a larger scale if the
geometry contains too many pixels at the given scale for the operation
to succeed.
maxPixels:
Optional; A number specifying the maximum number of pixels to reduce.
tileScale:
Optional; A number representing the scaling factor used to reduce
aggregation tile size; using a larger tileScale (e.g. 2 or 4) may enable
computations that run out of memory with the default.
Returns:
A function that accepts an ee.Image and reduces it by region, according to
the provided arguments.
"""
def reduce_region_function(img):
"""Applies the ee.Image.reduceRegion() method.
Args:
img:
An ee.Image to reduce to a statistic by region.
Returns:
An ee.Feature that contains properties representing the image region
reduction results per band and the image timestamp formatted as
milliseconds from Unix epoch (included to enable time series plotting).
"""
stat = img.reduceRegion(
reducer=reducer,
geometry=geometry,
scale=scale,
crs=crs,
bestEffort=bestEffort,
maxPixels=maxPixels,
tileScale=tileScale)
return ee.Feature(geometry, stat).set({'millis': img.date().millis()})
return reduce_region_function
"""
Explanation: Region reduction function
Reduction of pixels intersecting the region of interest to a statistic will be performed multiple times. Define a reusable function that can perform the task for each dataset. The function accepts arguments such as scale and reduction method to parameterize the operation for each particular analysis.
Note: most of the reduction operations in this tutorial use a large pixel scale so that operations complete quickly. In your own application, set the scale and other parameter arguments as you wish.
End of explanation
"""
# Define a function to transfer feature properties to a dictionary.
def fc_to_dict(fc):
prop_names = fc.first().propertyNames()
prop_lists = fc.reduceColumns(
reducer=ee.Reducer.toList().repeat(prop_names.size()),
selectors=prop_names).get('list')
return ee.Dictionary.fromLists(prop_names, prop_lists)
"""
Explanation: Formatting
The result of the region reduction function above applied to an ee.ImageCollection produces an ee.FeatureCollection. This data needs to be transferred to the Python kernel, but serialized feature collections are large and awkward to deal with. This step defines a function to convert the feature collection to an ee.Dictionary where the keys are feature property names and values are corresponding lists of property values, which pandas can deal with handily.
Extract the property values from the ee.FeatureCollection as a list of lists stored in an ee.Dictionary using reduceColumns().
Extract the list of lists from the dictionary.
Add names to each list by converting to an ee.Dictionary where keys are property names and values are the corresponding value lists.
The returned ee.Dictionary is essentially a table, where keys define columns and list elements define rows.
End of explanation
"""
today = ee.Date(pd.to_datetime('today'))
date_range = ee.DateRange(today.advance(-20, 'years'), today)
pdsi = ee.ImageCollection('GRIDMET/DROUGHT').filterDate(date_range).select('pdsi')
aoi = ee.FeatureCollection('EPA/Ecoregions/2013/L3').filter(
ee.Filter.eq('na_l3name', 'Sierra Nevada')).geometry()
"""
Explanation: Drought severity
In this section we'll look at a time series of drought severity as a calendar heat map and a bar chart.
Import data
Load the gridded Palmer Drought Severity Index (PDSI) data as an ee.ImageCollection.
Load the EPA Level-3 ecoregion boundaries as an ee.FeatureCollection and filter it to include only the Sierra Nevada region, which defines the area of interest (AOI).
End of explanation
"""
reduce_pdsi = create_reduce_region_function(
geometry=aoi, reducer=ee.Reducer.mean(), scale=5000, crs='EPSG:3310')
pdsi_stat_fc = ee.FeatureCollection(pdsi.map(reduce_pdsi)).filter(
ee.Filter.notNull(pdsi.first().bandNames()))
"""
Explanation: Note: the aoi defined above will be used throughout this tutorial. In your own application, redefine it for your own area of interest.
Reduce data
Create a region reduction function.
Map the function over the pdsi image collection to reduce each image.
Filter out any resulting features that have null computed values (occurs when all pixels in an AOI are masked).
End of explanation
"""
task = ee.batch.Export.table.toAsset(
collection=pdsi_stat_fc,
description='pdsi_stat_fc export',
assetId='users/YOUR_USER_NAME/pdsi_stat_fc_ts_vis_with_altair')
# task.start()
"""
Explanation: STOP:
Optional export
If your process is long-running, you'll want to export the pdsi_stat_fc variable as an asset using a batch task. Wait until the task finishes, import the asset, and continue on. Please see the Developer Guide section on exporting with the Python API.
Export to asset:
End of explanation
"""
# pdsi_stat_fc = ee.FeatureCollection('users/YOUR_USER_NAME/pdsi_stat_fc_ts_vis_with_altair')
"""
Explanation: Import the asset after the export completes:
End of explanation
"""
pdsi_dict = fc_to_dict(pdsi_stat_fc).getInfo()
"""
Explanation: * Remove comments (#) to run the above cells.
CONTINUE:
Server to client transfer
The ee.FeatureCollection needs to be converted to a dictionary and transferred to the Python kernel.
Apply the fc_to_dict function to convert from ee.FeatureCollection to ee.Dictionary.
Call getInfo() on the ee.Dictionary to transfer the data client-side.
End of explanation
"""
print(type(pdsi_dict), '\n')
for prop in pdsi_dict.keys():
print(prop + ':', pdsi_dict[prop][0:3] + ['...'])
"""
Explanation: The result is a Python dictionary. Print a small part to see how it is formatted.
End of explanation
"""
pdsi_df = pd.DataFrame(pdsi_dict)
"""
Explanation: Convert the Python dictionary to a pandas DataFrame.
End of explanation
"""
display(pdsi_df)
print(pdsi_df.dtypes)
"""
Explanation: Preview the DataFrame and check the column data types.
End of explanation
"""
# Function to add date variables to DataFrame.
def add_date_info(df):
df['Timestamp'] = pd.to_datetime(df['millis'], unit='ms')
df['Year'] = pd.DatetimeIndex(df['Timestamp']).year
df['Month'] = pd.DatetimeIndex(df['Timestamp']).month
df['Day'] = pd.DatetimeIndex(df['Timestamp']).day
df['DOY'] = pd.DatetimeIndex(df['Timestamp']).dayofyear
return df
"""
Explanation: Add date columns
Add date columns derived from the milliseconds from Unix epoch column. The pandas library provides functions and objects for timestamps and the DataFrame object allows for easy mutation.
Define a function to add date variables to the DataFrame: year, month, day, and day of year (DOY).
End of explanation
"""
pdsi_df = add_date_info(pdsi_df)
pdsi_df.head(5)
"""
Explanation: Note: the above function for adding date information to a DataFrame will be used throughout this tutorial.
Apply the add_date_info function to the PDSI DataFrame to add date attribute columns, preview the results.
End of explanation
"""
pdsi_df = pdsi_df.rename(columns={
'pdsi': 'PDSI'
}).drop(columns=['millis', 'system:index'])
pdsi_df.head(5)
"""
Explanation: Rename and drop columns
Often it is desirable to rename columns and/or remove unnecessary columns. Do both here and preview the DataFrame.
End of explanation
"""
pdsi_df.dtypes
"""
Explanation: Check the data type of each column.
End of explanation
"""
alt.Chart(pdsi_df).mark_rect().encode(
x='Year:O',
y='Month:O',
color=alt.Color(
'mean(PDSI):Q', scale=alt.Scale(scheme='redblue', domain=(-5, 5))),
tooltip=[
alt.Tooltip('Year:O', title='Year'),
alt.Tooltip('Month:O', title='Month'),
alt.Tooltip('mean(PDSI):Q', title='PDSI')
]).properties(width=600, height=300)
"""
Explanation: At this point the DataFrame is in good shape for charting with Altair.
Calendar heatmap
Chart PDSI data as a calendar heatmap. Set observation year as the x-axis variable, month as y-axis, and PDSI value as color.
Note that Altair features a convenient method for aggregating values within groups while encoding the chart (i.e., no need to create a new DataFrame). The mean aggregate transform is applied here because each month has three PDSI observations (year and month are the grouping factors).
Also note that a tooltip has been added to the chart; hovering over cells reveals the values of the selected variables.
End of explanation
"""
alt.Chart(pdsi_df).mark_bar(size=1).encode(
x='Timestamp:T',
y='PDSI:Q',
color=alt.Color(
'PDSI:Q', scale=alt.Scale(scheme='redblue', domain=(-5, 5))),
tooltip=[
alt.Tooltip('Timestamp:T', title='Date'),
alt.Tooltip('PDSI:Q', title='PDSI')
]).properties(width=600, height=300)
"""
Explanation: The calendar heat map is good for interpretation of relative intra- and inter-annual differences in PDSI. However, since the PDSI variable is represented by color, estimating absolute values and magnitude of difference is difficult.
Bar chart
Chart PDSI time series as a bar chart to more easily interpret absolute values and compare them over time. Here, the observation timestamp is represented on the x-axis and PDSI is represented by both the y-axis and color. Since each PDSI observation has a unique timestamp that can be plotted to the x-axis, there is no need to aggregate PDSI values as in the above chart. A tooltip is added to the chart; hover over the bars to reveal the values for each variable.
End of explanation
"""
ndvi = ee.ImageCollection('MODIS/006/MOD13A2').filterDate(date_range).select('NDVI')
reduce_ndvi = create_reduce_region_function(
geometry=aoi, reducer=ee.Reducer.mean(), scale=1000, crs='EPSG:3310')
ndvi_stat_fc = ee.FeatureCollection(ndvi.map(reduce_ndvi)).filter(
ee.Filter.notNull(ndvi.first().bandNames()))
"""
Explanation: This temporal bar chart makes it easier to interpret and compare absolute values of PDSI over time, but relative intra- and inter-annual variability are arguably harder to interpret because the division of year and month is not as distinct as in the calendar heatmap above.
Take note of the extended and severe period of drought from 2012 through 2016. In the next section, we'll look for a vegetation response to this event.
Vegetation productivity
NDVI is a proxy measure of photosynthetic capacity and is used in this tutorial to investigate vegetation response to the 2012-2016 drought identified in the PDSI bar chart above.
MODIS provides an analysis-ready 16-day NDVI composite that is well suited for regional investigation of temporal vegetation dynamics. The following steps reduce and prepare this data for charting in the same manner as the PDSI data above; please refer to previous sections to review details.
Import and reduce
Load the MODIS NDVI data as an ee.ImageCollection.
Create a region reduction function.
Apply the function to all images in the time series.
Filter out features with null computed values.
End of explanation
"""
ndvi_dict = fc_to_dict(ndvi_stat_fc).getInfo()
ndvi_df = pd.DataFrame(ndvi_dict)
display(ndvi_df)
print(ndvi_df.dtypes)
"""
Explanation: STOP:
If your process is long-running, you'll want to export the ndvi_stat_fc variable as an asset using a batch task. Wait until the task finishes, import the asset, and continue on.
Please see the above Optional export section for more details.
CONTINUE:
Prepare DataFrame
Transfer data from the server to the client.
Convert the Python dictionary to a pandas DataFrame.
Preview the DataFrame and check data types.
End of explanation
"""
ndvi_df['NDVI'] = ndvi_df['NDVI'] / 10000
ndvi_df = add_date_info(ndvi_df)
ndvi_df.head(5)
"""
Explanation: Remove the NDVI scaling.
Add date attribute columns.
Preview the DataFrame.
End of explanation
"""
highlight = alt.selection(
type='single', on='mouseover', fields=['Year'], nearest=True)
base = alt.Chart(ndvi_df).encode(
x=alt.X('DOY:Q', scale=alt.Scale(domain=[0, 353], clamp=True)),
y=alt.Y('NDVI:Q', scale=alt.Scale(domain=[0.1, 0.6])),
color=alt.Color('Year:O', scale=alt.Scale(scheme='magma')))
points = base.mark_circle().encode(
opacity=alt.value(0),
tooltip=[
alt.Tooltip('Year:O', title='Year'),
alt.Tooltip('DOY:Q', title='DOY'),
alt.Tooltip('NDVI:Q', title='NDVI')
]).add_selection(highlight)
lines = base.mark_line().encode(
size=alt.condition(~highlight, alt.value(1), alt.value(3)))
(points + lines).properties(width=600, height=350).interactive()
"""
Explanation: These NDVI time series data are now ready for plotting.
DOY line chart
Make a day of year (DOY) line chart where each line represents a year of observations. This chart makes it possible to compare the same observation date among years. Use it to compare NDVI values for years during the drought and not.
Day of year is represented on the x-axis and NDVI on the y-axis. Each line represents a year and is distinguished by color. Note that this plot includes a tooltip and has been made interactive so that the axes can be zoomed and panned.
End of explanation
"""
base = alt.Chart(ndvi_df).encode(
x=alt.X('DOY:Q', scale=alt.Scale(domain=(150, 340))))
line = base.mark_line().encode(
y=alt.Y('median(NDVI):Q', scale=alt.Scale(domain=(0.47, 0.53))))
band = base.mark_errorband(extent='iqr').encode(
y='NDVI:Q')
(line + band).properties(width=600, height=300).interactive()
"""
Explanation: The first thing to note is that winter dates (when there is snow in the Sierra Nevada ecoregion) exhibit highly variable inter-annual NDVI, but spring, summer, and fall dates are more consistent. With regard to drought effects on vegetation, summer and fall dates are the most sensitive time. Zooming into observations for the summer/fall days (224-272), you'll notice that many years have a u-shaped pattern where NDVI values decrease and then rise.
Another way to view these data is to plot the distribution of NDVI by DOY represented as an interquartile range envelope and median line. Here, these two charts are defined and then combined in the following snippet.
Define a base chart.
Define a line chart for median NDVI (note the use of aggregate median transform grouping by DOY).
Define a band chart using 'iqr' (interquartile range) to represent NDVI distribution grouping on DOY.
Combine the line and band charts.
End of explanation
"""
ndvi_doy_range = [224, 272]
ndvi_df_sub = ndvi_df[(ndvi_df['DOY'] >= ndvi_doy_range[0])
& (ndvi_df['DOY'] <= ndvi_doy_range[1])]
ndvi_df_sub = ndvi_df_sub.groupby('Year').agg('min')
"""
Explanation: The summary statistics for the summer/fall days (224-272) certainly show an NDVI reduction, but there is also variability; some years exhibit greater NDVI reduction than others as suggested by the wide interquartile range during the middle of the summer. Assuming that NDVI reduction is due to water and heat limiting photosynthesis, we can hypothesize that during years of drought, photosynthesis (NDVI) will be lower than non-drought years. We can investigate the relationship between photosynthesis (NDVI) and drought (PDSI) using a scatter plot and linear regression.
Dought and productivity relationship
A scatterplot is a good way to visualize the relationship between two variables. Here, PDSI (drought indicator) will be plotted on the x-axis and NDVI (vegetation productivity) on the y-axis. To achieve this, both variables must exist in the same DataFrame. Each row will be an observation in time and columns will correspond to PDSI and NDVI values. Currently, PDSI and NDVI are in two different DataFrames and need to be merged.
Prepare DataFrames
Before they can be merged, each variable must be reduced to a common temporal observation unit to define correspondence. There are a number of ways to do this and each will define the relationship between PDSI and NDVI differently. Here, our temporal unit will be an annual observation set where NDVI is reduced to the intra-annual minimum from DOY 224 to 272 and PDSI will be the mean from DOY 1 to 272. We are proposing that average drought severity for the first three quarters of a year are related to minimum summer NDVI for a given year.
Filter the NDVI DataFrame to observations that occur between DOY 224 and 272.
Reduce the DOY-filtered subset to intra-annual minimum NDVI.
End of explanation
"""
pdsi_doy_range = [1, 272]
pdsi_df_sub = pdsi_df[(pdsi_df['DOY'] >= pdsi_doy_range[0])
& (pdsi_df['DOY'] <= pdsi_doy_range[1])]
pdsi_df_sub = pdsi_df_sub.groupby('Year').agg('mean')
"""
Explanation: Note: in your own application you may find that a different DOY range is more suitable, change the ndvi_doy_range as needed.
Filter the PDSI DataFrame to observations that occur between DOY 1 and 272.
Reduce the values within a given year to the mean of the observations.
End of explanation
"""
ndvi_pdsi_df = pd.merge(
ndvi_df_sub, pdsi_df_sub, how='left', on='Year').reset_index()
ndvi_pdsi_df = ndvi_pdsi_df[['Year', 'NDVI', 'PDSI']]
ndvi_pdsi_df.head(5)
"""
Explanation: Note: in your own application you may find that a different DOY range is more suitable, change the pdsi_doy_range as needed.
Perform a join on 'Year' to combine the two reduced DataFrames.
Select only the columns of interest: 'Year', 'NDVI', 'PDSI'.
Preview the DataFrame.
End of explanation
"""
ndvi_pdsi_df['Fit'] = np.poly1d(
np.polyfit(ndvi_pdsi_df['PDSI'], ndvi_pdsi_df['NDVI'], 1))(
ndvi_pdsi_df['PDSI'])
ndvi_pdsi_df.head(5)
"""
Explanation: NDVI and PDSI are now included in the same DataFrame linked by Year. This format is suitable for determining a linear relationship and drawing a line of best fit through the data.
Including a line of best fit can be a helpful visual aid. Here, a 1D polynomial is fit through the xy point cloud defined by corresponding NDVI and PDSI observations. The resulting fit is added to the DataFrame as a new column 'Fit'.
Add a line of best fit between PDSI and NDVI by determining the linear relationship and predicting NDVI based on PDSI for each year.
End of explanation
"""
base = alt.Chart(ndvi_pdsi_df).encode(
x=alt.X('PDSI:Q', scale=alt.Scale(domain=(-5, 5))))
points = base.mark_circle(size=60).encode(
y=alt.Y('NDVI:Q', scale=alt.Scale(domain=(0.4, 0.6))),
color=alt.Color('Year:O', scale=alt.Scale(scheme='magma')),
tooltip=[
alt.Tooltip('Year:O', title='Year'),
alt.Tooltip('PDSI:Q', title='PDSI'),
alt.Tooltip('NDVI:Q', title='NDVI')
])
fit = base.mark_line().encode(
y=alt.Y('Fit:Q'),
color=alt.value('#808080'))
(points + fit).properties(width=600, height=300).interactive()
"""
Explanation: Scatter plot
The DataFrame is ready for plotting. Since this chart is to include points and a line of best fit, two charts need to be created, one for the points and one for the line. The results are combined into the final plot.
End of explanation
"""
# Define a method for displaying Earth Engine image tiles to folium map.
def add_ee_layer(self, ee_image_object, vis_params, name):
map_id_dict = ee.Image(ee_image_object).getMapId(vis_params)
folium.raster_layers.TileLayer(
tiles=map_id_dict['tile_fetcher'].url_format,
attr='Map Data © <a href="https://earthengine.google.com/">Google Earth Engine, USDA National Agriculture Imagery Program</a>',
name=name,
overlay=True,
control=True).add_to(self)
# Add an Earth Engine layer drawing method to folium.
folium.Map.add_ee_layer = add_ee_layer
# Import a NAIP image for the area and date of interest.
naip_img = ee.ImageCollection('USDA/NAIP/DOQQ').filterDate(
'2016-01-01',
'2017-01-01').filterBounds(ee.Geometry.Point([-118.6407, 35.9665])).first()
# Display the NAIP image to the folium map.
m = folium.Map(location=[35.9665, -118.6407], tiles='Stamen Terrain', zoom_start=16)
m.add_ee_layer(naip_img, None, 'NAIP image, 2016')
# Add the point of interest to the map.
folium.Circle(
radius=15,
location=[35.9665, -118.6407],
color='yellow',
fill=False,
).add_to(m)
# Add the AOI to the map.
folium.GeoJson(
aoi.getInfo(),
name='geojson',
style_function=lambda x: {'fillColor': '#00000000', 'color': '#000000'},
).add_to(m)
# Add a lat lon popup.
folium.LatLngPopup().add_to(m)
# Display the map.
display(m)
"""
Explanation: As you can see, there seems to be some degree of positive correlation between PDSI and NDVI (i.e., as wetness increases, vegetation productivity increases; as wetness decreases, vegetation productivity decreases). Note that some of the greatest outliers are 2016, 2017, 2018 - the three years following recovery from the long drought. It is also important to note that there are many other factors that may influence the NDVI signal that are not being considered here.
Patch-level vegetation mortality
At a regional scale there appears to be a relationship between drought and vegetation productivity. This section will look more closely at effects of drought on vegetation at a patch level, with a specific focus on mortality. Here, a Landsat time series collection is created for the period 1984-present to provide greater temporal context for change at a relatively precise spatial resolution.
Find a point of interest
Use aerial imagery from the National Agriculture Imagery Program (NAIP) in an interactive Folium map to identify a location in the Sierra Nevada ecoregion that appears to have patches of dead trees.
Run the following code block to render an interactive Folium map for a selected NAIP image.
Zoom and pan around the image to identify a region of recently dead trees (standing silver snags with no fine branches or brown/grey snags with fine branches).
Click the map to list the latitude and longitude for a patch of interest. Record these values for use in the following section (the example location used in the following section is presented as a yellow point).
End of explanation
"""
start_day = 224
end_day = 272
latitude = 35.9665
longitude = -118.6407
"""
Explanation: Prepare Landsat collection
Landsat surface reflectance data need to be prepared before being reduced. The steps below will organize data from multiple sensors into congruent collections where band names are consistent, cloud and cloud shadows have been masked out, and the normalized burn ratio (NBR) transformation is calculated and returned as the image representative (NBR is a good indicator of forest disturbance). Finally, all sensor collections will be merged into a single collection and annual composites calculated based on mean annual NBR using a join.
Define Landsat observation date window inputs based on NDVI curve plotted previously and set latitude and longitude variables from the map above.
End of explanation
"""
# Make lat. and long. vars an `ee.Geometry.Point`.
point = ee.Geometry.Point([longitude, latitude])
# Define a function to get and rename bands of interest from OLI.
def rename_oli(img):
return (img.select(
ee.List(['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'pixel_qa']),
ee.List(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa'])))
# Define a function to get and rename bands of interest from ETM+.
def rename_etm(img):
return (img.select(
ee.List(['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'pixel_qa']),
ee.List(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa'])))
# Define a function to mask out clouds and cloud shadows.
def cfmask(img):
cloud_shadow_bi_mask = 1 << 3
cloud_bit_mask = 1 << 5
qa = img.select('pixel_qa')
mask = qa.bitwiseAnd(cloud_shadow_bi_mask).eq(0).And(
qa.bitwiseAnd(cloud_bit_mask).eq(0))
return img.updateMask(mask)
# Define a function to add year as an image property.
def set_year(img):
year = ee.Image(img).date().get('year')
return img.set('Year', year)
# Define a function to calculate NBR.
def calc_nbr(img):
return img.normalizedDifference(ee.List(['NIR', 'SWIR2'])).rename('NBR')
# Define a function to prepare OLI images.
def prep_oli(img):
orig = img
img = rename_oli(img)
img = cfmask(img)
img = calc_nbr(img)
img = img.copyProperties(orig, orig.propertyNames())
return set_year(img)
# Define a function to prepare TM/ETM+ images.
def prep_etm(img):
orig = img
img = rename_etm(img)
img = cfmask(img)
img = calc_nbr(img)
img = img.copyProperties(orig, orig.propertyNames())
return set_year(img)
# Import image collections for each Landsat sensor (surface reflectance).
tm_col = ee.ImageCollection('LANDSAT/LT05/C01/T1_SR')
etm_col = ee.ImageCollection('LANDSAT/LE07/C01/T1_SR')
oli_col = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')
# Filter collections and prepare them for merging.
oli_col = oli_col.filterBounds(point).filter(
ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_oli)
etm_col = etm_col.filterBounds(point).filter(
ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_etm)
tm_col = tm_col.filterBounds(point).filter(
ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_etm)
# Merge the collections.
landsat_col = oli_col.merge(etm_col).merge(tm_col)
# Get a distinct year collection.
distinct_year_col = landsat_col.distinct('Year')
# Define a filter that identifies which images from the complete collection
# match the year from the distinct year collection.
join_filter = ee.Filter.equals(leftField='Year', rightField='Year')
# Define a join.
join = ee.Join.saveAll('year_matches')
# Apply the join and convert the resulting FeatureCollection to an
# ImageCollection.
join_col = ee.ImageCollection(
join.apply(distinct_year_col, landsat_col, join_filter))
# Define a function to apply mean reduction among matching year collections.
def reduce_by_join(img):
year_col = ee.ImageCollection.fromImages(ee.Image(img).get('year_matches'))
return year_col.reduce(ee.Reducer.mean()).rename('NBR').set(
'system:time_start',
ee.Image(img).date().update(month=8, day=1).millis())
# Apply the `reduce_by_join` function to the list of annual images in the
# properties of the join collection.
landsat_col = join_col.map(reduce_by_join)
"""
Explanation: Note: in your own application it may be necessary to change these values.
Prepare a Landsat surface reflectance collection 1984-present. Those unfamiliar with Landsat might find the following acronym definitions and links helpful.
OLI (Landsat's Operational Land Imager sensor)
ETM+ (Landsat's Enhanced Thematic Mapper Plus sensor)
TM (Landsat's Thematic Mapper sensor)
CFMask (Landsat USGS surface reflectance mask based on the CFMask algorithm)
NBR. (Normalized Burn Ratio: a spectral vegetation index)
Understanding Earth Engine joins
End of explanation
"""
reduce_landsat = create_reduce_region_function(
geometry=point, reducer=ee.Reducer.first(), scale=30, crs='EPSG:3310')
nbr_stat_fc = ee.FeatureCollection(landsat_col.map(reduce_landsat)).filter(
ee.Filter.notNull(landsat_col.first().bandNames()))
"""
Explanation: The result of the above code block is an image collection with as many images as there are years present in the merged Landsat collection. Each image represents the annual mean NBR constrained to observations within the given date window.
Prepare DataFrame
Create a region reduction function; use ee.Reducer.first() as the reducer since no spatial aggregation is needed (we are interested in the single pixel that intersects the point). Set the region as the geometry defined by the lat. and long. coordinates identified in the above map.
Apply the function to all images in the time series.
Filter out features with null computed values.
End of explanation
"""
nbr_dict = fc_to_dict(nbr_stat_fc).getInfo()
nbr_df = pd.DataFrame(nbr_dict)
display(nbr_df.head())
print(nbr_df.dtypes)
"""
Explanation: Transfer data from the server to the client.<br>
Note: if the process times out, you'll need to export/import the nbr_stat_fc feature collection as described in the Optional export section.
Convert the Python dictionary to a pandas DataFrame.
Preview the DataFrame and check data types.
End of explanation
"""
nbr_df = add_date_info(nbr_df)
nbr_df.head(5)
"""
Explanation: Add date attribute columns.
Preview the DataFrame.
End of explanation
"""
alt.Chart(nbr_df).mark_line().encode(
x=alt.X('Timestamp:T', title='Date'),
y='NBR:Q',
tooltip=[
alt.Tooltip('Timestamp:T', title='Date'),
alt.Tooltip('NBR:Q')
]).properties(width=600, height=300).interactive()
"""
Explanation: Line chart
Display the Landsat NBR time series for the point of interest as a line plot.
End of explanation
"""
dcp_col = (ee.ImageCollection('NASA/NEX-DCP30_ENSEMBLE_STATS')
.select(['tasmax_median', 'tasmin_median', 'pr_median'])
.filter(
ee.Filter.And(ee.Filter.eq('scenario', 'rcp85'),
ee.Filter.date('2019-01-01', '2070-01-01'))))
def calc_mean_temp(img):
return (img.select('tasmax_median')
.add(img.select('tasmin_median'))
.divide(ee.Image.constant(2.0))
.addBands(img.select('pr_median'))
.rename(['Temp-mean', 'Precip-rate'])
.copyProperties(img, img.propertyNames()))
dcp_col = dcp_col.map(calc_mean_temp)
"""
Explanation: As you can see from the above time series of NBR observations, a dramatic decrease in NBR began in 2015, shortly after the severe and extended drought began. The decline continued through 2017, when a minor recovery began. Within the context of the entire time series, it is apparent that the decline is outside of normal inter-annual variability and that the reduction in NBR for this site is quite severe. The lack of major recovery response in NBR in 2017-19 (time of writing) indicates that the event was not ephemeral; the loss of vegetation will have a lasting impact on this site. The corresponding onset of drought and reduction in NBR provides further evidence that there is a relationship between drought and vegetation response in the Sierra Nevada ecoregion.
Past and future climate
The previous data visualizations suggest there is a relationship between drought and vegetation stress and mortality in the Sierra Nevada ecoregion.
This section will look at how climate is projected to change in the future, which can give us a sense for what to expect with regard to drought conditions and speculate about its impact on vegetation.
We'll look at historical and projected temperature and precipitation. Projected data are represented by NEX-DCP30, and historical observations by PRISM.
Future climate
NEX-DCP30 data contain 33 climate models projected to the year 2100 using several scenarios of greenhouse gas concentration pathways (RCP). Here, we'll use the median of all models for RCP 8.5 (the worst case scenario) to look at potential future temperature and precipitation.
Import and prepare collection
Filter the collection by date and scenario.
Calculate 'mean' temperature from median min and max among 33 models.
End of explanation
"""
reduce_dcp30 = create_reduce_region_function(
geometry=point, reducer=ee.Reducer.first(), scale=5000, crs='EPSG:3310')
dcp_stat_fc = ee.FeatureCollection(dcp_col.map(reduce_dcp30)).filter(
ee.Filter.notNull(dcp_col.first().bandNames()))
"""
Explanation: Prepare DataFrame
Create a region reduction function.
Apply the function to all images in the time series.
Filter out features with null computed values.
End of explanation
"""
dcp_dict = fc_to_dict(dcp_stat_fc).getInfo()
dcp_df = pd.DataFrame(dcp_dict)
display(dcp_df)
print(dcp_df.dtypes)
"""
Explanation: Transfer data from the server to the client. Note: if the process times out, you'll need to export/import the dcp_stat_fc feature collection as described in the Optional export section.
Convert the Python dictionary to a pandas DataFrame.
Preview the DataFrame and check the data types.
End of explanation
"""
dcp_df = add_date_info(dcp_df)
dcp_df.head(5)
"""
Explanation: Add date attribute columns.
Preview the DataFrame.
End of explanation
"""
dcp_df['Precip-mm'] = dcp_df['Precip-rate'] * 86400 * 30
dcp_df['Temp-mean'] = dcp_df['Temp-mean'] - 273.15
dcp_df['Model'] = 'NEX-DCP30'
dcp_df = dcp_df.drop('Precip-rate', 1)
dcp_df.head(5)
"""
Explanation: Convert precipitation rate to mm.
Convert Kelvin to celsius.
Add the model name as a column.
Remove the 'Precip-rate' column.
End of explanation
"""
prism_col = (ee.ImageCollection('OREGONSTATE/PRISM/AN81m')
.select(['ppt', 'tmean'])
.filter(ee.Filter.date('1979-01-01', '2019-12-31')))
reduce_prism = create_reduce_region_function(
geometry=point, reducer=ee.Reducer.first(), scale=5000, crs='EPSG:3310')
prism_stat_fc = (ee.FeatureCollection(prism_col.map(reduce_prism))
.filter(ee.Filter.notNull(prism_col.first().bandNames())))
prism_dict = fc_to_dict(prism_stat_fc).getInfo()
prism_df = pd.DataFrame(prism_dict)
display(prism_df)
print(prism_df.dtypes)
"""
Explanation: Past climate
PRISM data are climate datasets for the conterminous United States. Grid cells are interpolated based on station data assimilated from many networks across the country. The datasets used here are monthly averages for precipitation and temperature. They provide a record of historical climate.
Reduce collection and prepare DataFrame
Import the collection and filter by date.
Reduce the collection images by region and filter null computed values.
Convert the feature collection to a dictionary and transfer it client-side.<br>
Note: if the process times out, you'll need to export/import the prism_stat_fc feature collection as described in the Optional export section.
Convert the dictionary to a DataFrame.
Preview the DataFrame.
End of explanation
"""
prism_df = add_date_info(prism_df)
prism_df['Model'] = 'PRISM'
prism_df = prism_df.rename(columns={'ppt': 'Precip-mm', 'tmean': 'Temp-mean'})
prism_df.head(5)
"""
Explanation: Add date attribute columns.
Add model name.
Rename columns to be consistent with the NEX-DCP30 DataFrame.
Preview the DataFrame.
End of explanation
"""
climate_df = pd.concat([prism_df, dcp_df], sort=True)
climate_df
"""
Explanation: Combine DataFrames
At this point the PRISM and NEX-DCP30 DataFrames have the same columns, the same units, and are distinguished by unique entries in the 'Model' column. Use the concat function to concatenate these DataFrames into a single DataFrame for plotting together in the same chart.
End of explanation
"""
base = alt.Chart(climate_df).encode(
x='Year:O',
color='Model')
line = base.mark_line().encode(
y=alt.Y('median(Precip-mm):Q', title='Precipitation (mm/month)'))
band = base.mark_errorband(extent='iqr').encode(
y=alt.Y('Precip-mm:Q', title='Precipitation (mm/month)'))
(band + line).properties(width=600, height=300)
"""
Explanation: Charts
Chart the past and future precipitation and temperature together to get a sense for where climate has been and where it is projected to go under RCP 8.5.
Precipitation
End of explanation
"""
line = alt.Chart(climate_df).mark_line().encode(
x='Year:O',
y='median(Temp-mean):Q',
color='Model')
band = alt.Chart(climate_df).mark_errorband(extent='iqr').encode(
x='Year:O',
y=alt.Y('Temp-mean:Q', title='Temperature (°C)'), color='Model')
(band + line).properties(width=600, height=300)
"""
Explanation: Temperature
End of explanation
"""
|
econ-ark/HARK | examples/ConsIndShockModel/KinkedRconsumerType.ipynb | apache-2.0 | # Initial imports and notebook setup, click arrow to show
import matplotlib.pyplot as plt
import numpy as np
from HARK.ConsumptionSaving.ConsIndShockModel import KinkedRconsumerType
from HARK.utilities import plot_funcs_der, plot_funcs
mystr = lambda number: "{:.4f}".format(number)
"""
Explanation: KinkedRconsumerType: Consumption-saving model with idiosyncratic income shocks and different interest rates on borrowing and saving
End of explanation
"""
KinkedRdict = { # Click the arrow to expand this parameter dictionary
# Parameters shared with the perfect foresight model
"CRRA": 2.0, # Coefficient of relative risk aversion
"DiscFac": 0.96, # Intertemporal discount factor
"LivPrb": [0.98], # Survival probability
"PermGroFac": [1.01], # Permanent income growth factor
"BoroCnstArt": None, # Artificial borrowing constraint; imposed minimum level of end-of period assets
# New parameters unique to the "kinked R" model
"Rboro": 1.20, # Interest factor on borrowing (a < 0)
"Rsave": 1.01, # Interest factor on saving (a > 0)
# Parameters that specify the income distribution over the lifecycle (shared with IndShockConsumerType)
"PermShkStd": [0.1], # Standard deviation of log permanent shocks to income
"PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks
"TranShkStd": [0.2], # Standard deviation of log transitory shocks to income
"TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks
"UnempPrb": 0.05, # Probability of unemployment while working
"IncUnemp": 0.3, # Unemployment benefits replacement rate
"UnempPrbRet": 0.0005, # Probability of "unemployment" while retired
"IncUnempRet": 0.0, # "Unemployment" benefits when retired
"T_retire": 0, # Period of retirement (0 --> no retirement)
"tax_rate": 0.0, # Flat income tax rate (legacy parameter, will be removed in future)
# Parameters for constructing the "assets above minimum" grid (shared with IndShockConsumerType)
"aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value
"aXtraMax": 20, # Maximum end-of-period "assets above minimum" value
"aXtraCount": 48, # Number of points in the base grid of "assets above minimum"
"aXtraNestFac": 3, # Exponential nesting factor when constructing "assets above minimum" grid
"aXtraExtra": [None], # Additional values to add to aXtraGrid
# A few other paramaters (shared with IndShockConsumerType)
"vFuncBool": True, # Whether to calculate the value function during solution
"CubicBool": False, # Preference shocks currently only compatible with linear cFunc
"T_cycle": 1, # Number of periods in the cycle for this agent type
# Parameters only used in simulation (shared with PerfForesightConsumerType)
"AgentCount": 10000, # Number of agents of this type
"T_sim": 500, # Number of periods to simulate
"aNrmInitMean": -6.0, # Mean of log initial assets
"aNrmInitStd": 1.0, # Standard deviation of log initial assets
"pLvlInitMean": 0.0, # Mean of log initial permanent income
"pLvlInitStd": 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg": 1.0, # Aggregate permanent income growth factor
"T_age": None, # Age after which simulated agents are automatically killed
}
"""
Explanation: The module HARK.ConsumptionSaving.ConsIndShockModel concerns consumption-saving models with idiosyncratic shocks to (non-capital) income. All of the models assume CRRA utility with geometric discounting, no bequest motive, and income shocks are fully transitory or fully permanent.
ConsIndShockModel currently includes three models:
1. A very basic "perfect foresight" model with no uncertainty.
2. A model with risk over transitory and permanent income shocks.
3. The model described in (2), with an interest rate for debt that differs from the interest rate for savings.
This notebook provides documentation for the third of these models.
$\newcommand{\CRRA}{\rho}$
$\newcommand{\DiePrb}{\mathsf{D}}$
$\newcommand{\PermGroFac}{\Gamma}$
$\newcommand{\Rfree}{\mathsf{R}}$
$\newcommand{\DiscFac}{\beta}$
Statement of "kinked R" model
Consider a small extension to the model faced by IndShockConsumerTypes: that the interest rate on borrowing $a_t < 0$ is greater than the interest rate on saving $a_t > 0$. Consumers who face this kind of problem are represented by the $\texttt{KinkedRconsumerType}$ class.
For a full theoretical treatment, this model analyzed in A Theory of the Consumption Function, With
and Without Liquidity Constraints
and its expanded edition.
Continuing to work with normalized variables (e.g. $m_t$ represents the level of market resources divided by permanent income), the "kinked R" model can be stated as:
\begin{eqnarray}
v_t(m_t) &=& \max_{c_t} {~} U(c_t) + \DiscFac (1-\DiePrb_{t+1}) \mathbb{E}{t} \left[ (\PermGroFac{t+1}\psi_{t+1})^{1-\CRRA} v_{t+1}(m_{t+1}) \right], \
a_t &=& m_t - c_t, \
a_t &\geq& \underline{a}, \
m_{t+1} &=& \Rfree_t/(\PermGroFac_{t+1} \psi_{t+1}) a_t + \theta_{t+1}, \
\Rfree_t &=& \cases{\Rfree_{boro} \texttt{ if } a_t < 0 \
\Rfree_{save} \texttt{ if } a_t \geq 0},\
\Rfree_{boro} &>& \Rfree_{save}, \
(\psi_{t+1},\theta_{t+1}) &\sim& F_{t+1}, \
\mathbb{E}[\psi]=\mathbb{E}[\theta] &=& 1.
\end{eqnarray}
Solving the "kinked R" model
The solution method for the "kinked R" model is nearly identical to that of the IndShockConsumerType on which it is based, using the endogenous grid method; see the notebook for that model for more information. The only significant difference is that the interest factor varies by $a_t$ across the exogenously chosen grid of end-of-period assets, with a discontinuity in $\Rfree$ at $a_t=0$.
To correctly handle this, the solveConsKinkedR function inserts two instances of $a_t=0$ into the grid of $a_t$ values: the first corresponding to $\Rfree_{boro}$ ($a_t = -0$) and the other corresponding to $\Rfree_{save}$ ($a_t = +0$). The two consumption levels (and corresponding endogenous $m_t$ gridpoints) represent points at which the agent's first order condition is satisfied at exactly $a_t=0$ at the two different interest factors. In between these two points, the first order condition does not hold with equality: the consumer will end the period with exactly $a_t=0$, consuming $c_t=m_t$, but his marginal utility of consumption exceeds the marginal value of saving and is less than the marginal value of borrowing. This generates a consumption function with two kinks: two concave portions (for borrowing and saving) with a linear segment of slope 1 in between.
Example parameter values to construct an instance of KinkedRconsumerType
The parameters required to create an instance of KinkedRconsumerType are nearly identical to those for IndShockConsumerType. The only difference is that the parameter $\texttt{Rfree}$ is replaced with $\texttt{Rboro}$ and $\texttt{Rsave}$.
While the parameter $\texttt{CubicBool}$ is required to create a valid KinkedRconsumerType instance, it must be set to False; cubic spline interpolation has not yet been implemented for this model. In the future, this restriction will be lifted.
| Parameter | Description | Code | Example value | Time-varying? |
| :---: | --- | --- | --- | --- |
| $\DiscFac$ |Intertemporal discount factor | $\texttt{DiscFac}$ | $0.96$ | |
| $\CRRA $ |Coefficient of relative risk aversion | $\texttt{CRRA}$ | $2.0$ | |
| $\Rfree_{boro}$ | Risk free interest factor for borrowing | $\texttt{Rboro}$ | $1.20$ | |
| $\Rfree_{save}$ | Risk free interest factor for saving | $\texttt{Rsave}$ | $1.01$ | |
| $1 - \DiePrb_{t+1}$ |Survival probability | $\texttt{LivPrb}$ | $[0.98]$ | $\surd$ |
|$\PermGroFac_{t+1}$|Permanent income growth factor|$\texttt{PermGroFac}$| $[1.01]$ | $\surd$ |
| $\sigma_\psi $ | Standard deviation of log permanent income shocks | $\texttt{PermShkStd}$ | $[0.1]$ |$\surd$ |
| $N_\psi $ | Number of discrete permanent income shocks | $\texttt{PermShkCount}$ | $7$ | |
| $\sigma_\theta $ | Standard deviation of log transitory income shocks | $\texttt{TranShkStd}$ | $[0.2]$ | $\surd$ |
| $N_\theta $ | Number of discrete transitory income shocks | $\texttt{TranShkCount}$ | $7$ | |
| $\mho$ | Probability of being unemployed and getting $\theta=\underline{\theta}$ | $\texttt{UnempPrb}$ | $0.05$ | |
| $\underline{\theta} $ | Transitory shock when unemployed | $\texttt{IncUnemp}$ | $0.3$ | |
| $\mho^{Ret}$ | Probability of being "unemployed" when retired | $\texttt{UnempPrb}$ | $0.0005$ | |
| $\underline{\theta}^{Ret} $ | Transitory shock when "unemployed" and retired | $\texttt{IncUnemp}$ | $0.0$ | |
| $(none)$ | Period of the lifecycle model when retirement begins | $\texttt{T_retire}$ | $0$ | |
| $(none)$ | Minimum value in assets-above-minimum grid | $\texttt{aXtraMin}$ | $0.001$ | |
| $(none)$ | Maximum value in assets-above-minimum grid | $\texttt{aXtraMax}$ | $20.0$ | |
| $(none)$ | Number of points in base assets-above-minimum grid | $\texttt{aXtraCount}$ | $48$ | |
| $(none)$ | Exponential nesting factor for base assets-above-minimum grid | $\texttt{aXtraNestFac}$ | $3$ | |
| $(none)$ | Additional values to add to assets-above-minimum grid | $\texttt{aXtraExtra}$ | $None$ | |
| $\underline{a} $ | Artificial borrowing constraint (normalized) | $\texttt{BoroCnstArt}$ | $None$ | |
| $(none) $ |Indicator for whether $\texttt{vFunc}$ should be computed | $\texttt{vFuncBool}$ | $True$ | |
| $(none)$ |Indicator for whether $\texttt{cFunc}$ should use cubic splines | $\texttt{CubicBool}$ | $False$ | |
|$T$| Number of periods in this type's "cycle" |$\texttt{T_cycle}$| $1$ | |
|(none)| Number of times the "cycle" occurs |$\texttt{cycles}$| $0$ | |
These example parameters are almostidentical to those used for IndShockExample in the prior notebook, except that the interest rate on borrowing is 20% (like a credit card), and the interest rate on saving is 1%. Moreover, the artificial borrowing constraint has been set to None. The cell below defines a parameter dictionary with these example values.
End of explanation
"""
KinkyExample = KinkedRconsumerType(**KinkedRdict)
KinkyExample.cycles = 0 # Make the example infinite horizon
KinkyExample.solve()
"""
Explanation: Solving and examining the solution of the "kinked R" model
The cell below creates an infinite horizon instance of KinkedRconsumerType and solves its model by calling its solve method.
End of explanation
"""
print("Kinked R consumption function:")
plot_funcs(KinkyExample.solution[0].cFunc, KinkyExample.solution[0].mNrmMin, 5)
print("Kinked R marginal propensity to consume:")
plot_funcs_der(KinkyExample.solution[0].cFunc, KinkyExample.solution[0].mNrmMin, 5)
"""
Explanation: An element of a KinkedRconsumerType's solution will have all the same attributes as that of a IndShockConsumerType; see that notebook for details.
We can plot the consumption function of our "kinked R" example, as well as the MPC:
End of explanation
"""
KinkyExample.track_vars = ['mNrm', 'cNrm', 'pLvl']
KinkyExample.initialize_sim()
KinkyExample.simulate()
"""
Explanation: Simulating the "kinked R" model
In order to generate simulated data, an instance of KinkedRconsumerType needs to know how many agents there are that share these particular parameters (and are thus ex ante homogeneous), the distribution of states for newly "born" agents, and how many periods to simulated. These simulation parameters are described in the table below, along with example values.
| Description | Code | Example value |
| :---: | --- | --- |
| Number of consumers of this type | $\texttt{AgentCount}$ | $10000$ |
| Number of periods to simulate | $\texttt{T_sim}$ | $500$ |
| Mean of initial log (normalized) assets | $\texttt{aNrmInitMean}$ | $-6.0$ |
| Stdev of initial log (normalized) assets | $\texttt{aNrmInitStd}$ | $1.0$ |
| Mean of initial log permanent income | $\texttt{pLvlInitMean}$ | $0.0$ |
| Stdev of initial log permanent income | $\texttt{pLvlInitStd}$ | $0.0$ |
| Aggregrate productivity growth factor | $\texttt{PermGroFacAgg}$ | $1.0$ |
| Age after which consumers are automatically killed | $\texttt{T_age}$ | $None$ |
Here, we will simulate 10,000 consumers for 500 periods. All newly born agents will start with permanent income of exactly $P_t = 1.0 = \exp(\texttt{pLvlInitMean})$, as $\texttt{pLvlInitStd}$ has been set to zero; they will have essentially zero assets at birth, as $\texttt{aNrmInitMean}$ is $-6.0$; assets will be less than $1\%$ of permanent income at birth.
These example parameter values were already passed as part of the parameter dictionary that we used to create KinkyExample, so it is ready to simulate. We need to set the track_vars attribute to indicate the variables for which we want to record a history.
End of explanation
"""
plt.plot(np.mean(KinkyExample.history['mNrm'], axis=1))
plt.xlabel("Time")
plt.ylabel("Mean market resources")
plt.show()
"""
Explanation: We can plot the average (normalized) market resources in each simulated period:
End of explanation
"""
plt.plot(np.sort(KinkyExample.state_now['aNrm']), np.linspace(0.0, 1.0, KinkyExample.AgentCount))
plt.xlabel("End-of-period assets")
plt.ylabel("Cumulative distribution")
plt.ylim(-0.01, 1.01)
plt.show()
"""
Explanation: Now let's plot the distribution of (normalized) assets $a_t$ for the current population, after simulating for $500$ periods; this should be fairly close to the long run distribution:
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.19/_downloads/d52b5321a00f5cf4d4be975019fb541b/plot_morph_surface_stc.ipynb | bsd-3-clause | # Author: Tommy Clausner <tommy.clausner@gmail.com>
#
# License: BSD (3-clause)
import os
import mne
from mne.datasets import sample
print(__doc__)
"""
Explanation: Morph surface source estimate
This example demonstrates how to morph an individual subject's
:class:mne.SourceEstimate to a common reference space. We achieve this using
:class:mne.SourceMorph. Pre-computed data will be morphed based on
a spherical representation of the cortex computed using the spherical
registration of FreeSurfer <tut-freesurfer>
(https://surfer.nmr.mgh.harvard.edu/fswiki/SurfaceRegAndTemplates) [1]_. This
transform will be used to morph the surface vertices of the subject towards the
reference vertices. Here we will use 'fsaverage' as a reference space (see
https://surfer.nmr.mgh.harvard.edu/fswiki/FsAverage).
The transformation will be applied to the surface source estimate. A plot
depicting the successful morph will be created for the spherical and inflated
surface representation of 'fsaverage', overlaid with the morphed surface
source estimate.
References
.. [1] Greve D. N., Van der Haegen L., Cai Q., Stufflebeam S., Sabuncu M.
R., Fischl B., Brysbaert M.
A Surface-based Analysis of Language Lateralization and Cortical
Asymmetry. Journal of Cognitive Neuroscience 25(9), 1477-1492, 2013.
<div class="alert alert-info"><h4>Note</h4><p>For background information about morphing see `ch_morph`.</p></div>
End of explanation
"""
sample_dir_raw = sample.data_path()
sample_dir = os.path.join(sample_dir_raw, 'MEG', 'sample')
subjects_dir = os.path.join(sample_dir_raw, 'subjects')
fname_stc = os.path.join(sample_dir, 'sample_audvis-meg')
"""
Explanation: Setup paths
End of explanation
"""
# Read stc from file
stc = mne.read_source_estimate(fname_stc, subject='sample')
"""
Explanation: Load example data
End of explanation
"""
morph = mne.compute_source_morph(stc, subject_from='sample',
subject_to='fsaverage',
subjects_dir=subjects_dir)
"""
Explanation: Setting up SourceMorph for SourceEstimate
In MNE surface source estimates represent the source space simply as
lists of vertices (see
tut-source-estimate-class).
This list can either be obtained from
:class:mne.SourceSpaces (src) or from the stc itself.
Since the default spacing (resolution of surface mesh) is 5 and
subject_to is set to 'fsaverage', :class:mne.SourceMorph will use
default ico-5 fsaverage vertices to morph, which are the special
values [np.arange(10242)] * 2.
<div class="alert alert-info"><h4>Note</h4><p>This is not generally true for other subjects! The set of vertices
used for ``fsaverage`` with ico-5 spacing was designed to be
special. ico-5 spacings for other subjects (or other spacings
for fsaverage) must be calculated and will not be consecutive
integers.</p></div>
If src was not defined, the morph will actually not be precomputed, because
we lack the vertices from that we want to compute. Instead the morph will
be set up and when applying it, the actual transformation will be computed on
the fly.
Initialize SourceMorph for SourceEstimate
End of explanation
"""
stc_fsaverage = morph.apply(stc)
"""
Explanation: Apply morph to (Vector) SourceEstimate
The morph will be applied to the source estimate data, by giving it as the
first argument to the morph we computed above.
End of explanation
"""
# Define plotting parameters
surfer_kwargs = dict(
hemi='lh', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]), views='lateral',
initial_time=0.09, time_unit='s', size=(800, 800),
smoothing_steps=5)
# As spherical surface
brain = stc_fsaverage.plot(surface='sphere', **surfer_kwargs)
# Add title
brain.add_text(0.1, 0.9, 'Morphed to fsaverage (spherical)', 'title',
font_size=16)
"""
Explanation: Plot results
End of explanation
"""
brain_inf = stc_fsaverage.plot(surface='inflated', **surfer_kwargs)
# Add title
brain_inf.add_text(0.1, 0.9, 'Morphed to fsaverage (inflated)', 'title',
font_size=16)
"""
Explanation: As inflated surface
End of explanation
"""
stc_fsaverage = mne.compute_source_morph(stc,
subjects_dir=subjects_dir).apply(stc)
"""
Explanation: Reading and writing SourceMorph from and to disk
An instance of SourceMorph can be saved, by calling
:meth:morph.save <mne.SourceMorph.save>.
This method allows for specification of a filename under which the morph
will be save in ".h5" format. If no file extension is provided, "-morph.h5"
will be appended to the respective defined filename::
>>> morph.save('my-file-name')
Reading a saved source morph can be achieved by using
:func:mne.read_source_morph::
>>> morph = mne.read_source_morph('my-file-name-morph.h5')
Once the environment is set up correctly, no information such as
subject_from or subjects_dir must be provided, since it can be
inferred from the data and use morph to 'fsaverage' by default. SourceMorph
can further be used without creating an instance and assigning it to a
variable. Instead :func:mne.compute_source_morph and
:meth:mne.SourceMorph.apply can be
easily chained into a handy one-liner. Taking this together the shortest
possible way to morph data directly would be:
End of explanation
"""
|
ajgpitch/qutip-notebooks | examples/qip-optpulseprocessor.ipynb | lgpl-3.0 | from numpy import pi
from qutip.qip.device import OptPulseProcessor
from qutip.qip.circuit import QubitCircuit
from qutip.qip.operations import expand_operator, toffoli
from qutip.operators import sigmaz, sigmax, identity
from qutip.states import basis
from qutip.metrics import fidelity
from qutip.tensor import tensor
"""
Explanation: Examples for OptPulseProcessor
Author: Boxi Li (etamin1201@gmail.com)
End of explanation
"""
N = 1
# Drift Hamiltonian
H_d = sigmaz()
# The (single) control Hamiltonian
H_c = sigmax()
processor = OptPulseProcessor(N, drift=H_d)
processor.add_control(H_c, 0)
"""
Explanation: The qutip.OptPulseProcessor is a noisy quantum device simulator integrated with the optimal pulse algorithm from the qutip.control module. It is a subclass of qutip.Processor and is equipped with a method to find the optimal pulse sequence (hence the name OptPulseProcessor) for a qutip.QubitCircuit or a list of qutip.Qobj. For the user guide of qutip.Processor, please refer to the introductory notebook.
Single-qubit gate
Like in the parent class Processor, we need to first define the available Hamiltonians in the system. The OptPulseProcessor has one more parameter, the drift Hamiltonian, which has no time-dependent coefficients and thus won't be optimized.
End of explanation
"""
qc = QubitCircuit(N)
qc.add_gate("SNOT", 0)
# This method calls optimize_pulse_unitary
tlist, coeffs = processor.load_circuit(qc, min_grad=1e-20, init_pulse_type='RND',
num_tslots=6, evo_time=1, verbose=True)
processor.plot_pulses(title="Control pulse for the Hadamard gate");
"""
Explanation: The method load_circuit calls qutip.control.optimize_pulse_unitary and returns the pulse coefficients.
End of explanation
"""
rho0 = basis(2,1)
plus = (basis(2,0) + basis(2,1)).unit()
minus = (basis(2,0) - basis(2,1)).unit()
result = processor.run_state(init_state=rho0)
print("Fidelity:", fidelity(result.states[-1], minus))
# add noise
processor.t1 = 40.0
result = processor.run_state(init_state=rho0)
print("Fidelity with qubit relaxation:", fidelity(result.states[-1], minus))
"""
Explanation: Like the Processor, the simulation is calculated with a QuTiP solver. The method run_state calls mesolve and returns the result. One can also add noise to observe the change in the fidelity, e.g. the t1 decoherence time.
End of explanation
"""
toffoli()
"""
Explanation: Multi-qubit gate
In the following example, we use OptPulseProcessor to find the optimal control pulse of a multi-qubit circuit. For simplicity, the circuit contains only one Toffoli gate.
End of explanation
"""
N = 3
H_d = tensor([identity(2)] * 3)
test_processor = OptPulseProcessor(N, H_d, [])
test_processor.add_control(sigmaz(), cyclic_permutation=True)
test_processor.add_control(sigmax(), cyclic_permutation=True)
"""
Explanation: We have single-qubit control $\sigma_x$ and $\sigma_z$, with the argument cyclic_permutation=True, it creates 3 operators each targeted on one qubit.
End of explanation
"""
sxsx = tensor([sigmax(),sigmax()])
sxsx01 = expand_operator(sxsx, N=3, targets=[0,1])
sxsx12 = expand_operator(sxsx, N=3, targets=[1,2])
test_processor.add_control(sxsx01)
test_processor.add_control(sxsx12)
"""
Explanation: The interaction is generated by $\sigma_x\sigma_x$ between the qubit 0 & 1 and qubit 1 & 2. expand_operator can be used to expand the operator to a larger dimension with given target qubits.
End of explanation
"""
test_processor.load_circuit([toffoli()], num_tslots=6, evo_time=1, verbose=True);
test_processor.plot_pulses(title="Contorl pulse for toffoli gate");
"""
Explanation: Use the above defined control Hamiltonians, we now find the optimal pulse for the Toffoli gate with 6 time slots. Instead of a QubitCircuit, a list of operators can also be given as an input. Different color in the figure represents different control pulses.
End of explanation
"""
qc = QubitCircuit(N=3)
qc.add_gate("CNOT", controls=0, targets=2)
qc.add_gate("RX", targets=2, arg_value=pi/4)
qc.add_gate("RY", targets=1, arg_value=pi/8)
setting_args = {"CNOT": {"num_tslots": 20, "evo_time": 3},
"RX": {"num_tslots": 2, "evo_time": 1},
"RY": {"num_tslots": 2, "evo_time": 1}}
test_processor.load_circuit(qc, merge_gates=False, setting_args=setting_args, verbose=True);
test_processor.plot_pulses(title="Control pulse for a each gate in the circuit");
"""
Explanation: Merging a quantum circuit
If there are multiple gates in the circuit, we can choose if we want to first merge them and then find the pulse for the merged unitary.
End of explanation
"""
qc = QubitCircuit(N=3)
qc.add_gate("CNOT", controls=0, targets=2)
qc.add_gate("RX", targets=2, arg_value=pi/4)
qc.add_gate("RY", targets=1, arg_value=pi/8)
test_processor.load_circuit(qc, merge_gates=True, verbose=True, num_tslots=20, evo_time=5);
test_processor.plot_pulses(title="Control pulse for a merged unitary evolution");
"""
Explanation: In the above figure, the pulses from $t=0$ to $t=3$ are for the CNOT gate while the rest for are the two single qubits gates. The difference in the frequency of change is merely a result of our choice of evo_time. Here we can see that the three gates are carried out in sequence.
End of explanation
"""
from qutip.ipynbtools import version_table
version_table()
"""
Explanation: In this figure there are no different stages, the three gates are first merged and then the algorithm finds the optimal pulse for the resulting unitary evolution.
End of explanation
"""
|
ijstokes/bokeh-blaze-tutorial | solutions/1.6 Layout (solution).ipynb | mit | # Import the functions from your file
from viz import climate_map, legend, timeseries
# Create your plots with your new functions
climate_map = climate_map()
legend = legend()
timeseries = timeseries()
# Test the visualizations in the notebook
from bokeh.plotting import show, output_notebook
output_notebook()
show(climate_map)
show(legend)
show(timeseries)
"""
Explanation: <img src=images/continuum_analytics_b&w.png align="left" width="15%" style="margin-right:15%">
<h1 align='center'>Bokeh Tutorial</h1>
1.6 Layout
Exercise: Wrap your visualizations in functions
Wrap each of the previous visualizations in a function in a python file (e.g. viz.py):
Climate + Map: climate_map()
Legend: legend()
Timeseries: timeseries()
End of explanation
"""
from bokeh.plotting import vplot, hplot
# Create layout
map_legend = hplot(climate_map, legend)
layout = vplot(map_legend, timeseries)
# Show layout
show(layout)
"""
Explanation: Exercise: Layout your plots using hplot and vplot
End of explanation
"""
from bokeh.plotting import output_file
#output_file("climate.html")
show(layout)
"""
Explanation: Exercise: Store your layout in an html page
End of explanation
"""
|
yttty/python3-scraper-tutorial | Python_Spider_Tutorial_06.ipynb | gpl-3.0 | from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("https://en.wikipedia.org/wiki/Python_(programming_language)")
bsObj = BeautifulSoup(html.read(), "html.parser")
for link in bsObj.findAll("a"):
if 'href' in link.attrs:
print(link.attrs['href'])
"""
Explanation: 用Python 3开发网络爬虫
By Terrill Yang (Github: https://github.com/yttty)
用Python 3开发网络爬虫 - Chapter 06 开始采集
一个抓取Wikipedia的例子
我们从最简单的抓取开始,先写一段可以获取Wikipedia任何页面的代码:
End of explanation
"""
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
html = urlopen("https://en.wikipedia.org/wiki/Python_(programming_language)")
bsObj = BeautifulSoup(html.read(), "html.parser")
for link in bsObj.find("div", {"id":"bodyContent"}).findAll("a", href=re.compile("^(/wiki/)((?!:).)*$")):
if 'href' in link.attrs:
print(link.attrs['href'])
"""
Explanation: 可以发现,所有指向Wikipedia词条的链接都是/wiki/开头,所以我们可以用正则表达式来过滤出这些词条,就像这样
End of explanation
"""
from urllib.request import urlopen
from bs4 import BeautifulSoup
import datetime
import random
import re
count = 0
random.seed(datetime.datetime.now())
def getLinks(articleUrl):
html = urlopen("http://en.wikipedia.org"+articleUrl)
bsObj = BeautifulSoup(html, "html.parser")
return bsObj.find("div", {"id":"bodyContent"}).findAll("a", href=re.compile("^(/wiki/)((?!:).)*$"))
links = getLinks("/wiki/Python_(programming_language)")
while len(links) > 0 and count < 10:
newArticle = links[random.randint(0, len(links)-1)].attrs["href"]
print(newArticle)
count = count + 1
links = getLinks(newArticle)
"""
Explanation: 上面的函数还不太能用于实际抓取,我们稍作改进,变成下面这个样子,就可以初步用于抓取页面的所有链接了。因为我们不能无限制地抓取下去,我便设置了10个链接的上限。
End of explanation
"""
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
pages = set()
def getLinks(pageUrl):
global pages
html = urlopen("http://en.wikipedia.org"+pageUrl)
bsObj = BeautifulSoup(html, "html.parser")
try:
print(bsObj.h1.get_text())
print(bsObj.find(id ="mw-content-text").findAll("p")[0])
print(bsObj.find(id="ca-edit").find("span").find("a").attrs['href'])
except AttributeError:
print("This page is missing something! No worries though!")
for link in bsObj.findAll("a", href=re.compile("^(/wiki/)")):
if 'href' in link.attrs:
if link.attrs['href'] not in pages:
#We have encountered a new page
newPage = link.attrs['href']
print("----------------\n"+newPage)
pages.add(newPage)
getLinks(newPage)
getLinks("")
"""
Explanation: 为了避免一个页面被采集两次,链接去重是非常重要的,下面的代码用Python的set来保存已经采集的链接。下面这段代码将无限制地运行下去,除非set集为空,然而这几乎是不可能的。
End of explanation
"""
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import datetime
import random
pages = set()
random.seed(datetime.datetime.now())
#Retrieves a list of all Internal links found on a page
def getInternalLinks(bsObj, includeUrl):
internalLinks = []
#Finds all links that begin with a "/"
for link in bsObj.findAll("a", href=re.compile("^(/|.*"+includeUrl+")")):
if link.attrs['href'] is not None:
if link.attrs['href'] not in internalLinks:
internalLinks.append(link.attrs['href'])
return internalLinks
#Retrieves a list of all external links found on a page
def getExternalLinks(bsObj, excludeUrl):
externalLinks = []
#Finds all links that start with "http" or "www" that do
#not contain the current URL
for link in bsObj.findAll("a", href=re.compile("^(http|www)((?!"+excludeUrl+").)*$")):
if link.attrs['href'] is not None:
if link.attrs['href'] not in externalLinks:
externalLinks.append(link.attrs['href'])
return externalLinks
def splitAddress(address):
addressParts = address.replace("http://", "").split("/")
return addressParts
def getRandomExternalLink(startingPage):
html = urlopen(startingPage)
bsObj = BeautifulSoup(html, "html.parser")
externalLinks = getExternalLinks(bsObj, splitAddress(startingPage)[0])
if len(externalLinks) == 0:
internalLinks = getInternalLinks(startingPage)
return getNextExternalLink(internalLinks[random.randint(0,
len(internalLinks)-1)])
else:
return externalLinks[random.randint(0, len(externalLinks)-1)]
def followExternalOnly(startingSite):
externalLink = getRandomExternalLink(startingSite)
print("Random external link is: "+externalLink)
#followExternalOnly(externalLink)
followExternalOnly("http://oreilly.com")
followExternalOnly("http://oreilly.com")
followExternalOnly("http://oreilly.com")
"""
Explanation: 我们接下来可以写一个随机找外链的小程序
End of explanation
"""
allExtLinks = set()
allIntLinks = set()
def getAllExternalLinks(siteUrl):
html = urlopen(siteUrl)
bsObj = BeautifulSoup(html.read(), "html.parser")
internalLinks = getInternalLinks(bsObj, splitAddress(siteUrl)[0])
externalLinks = getExternalLinks(bsObj, splitAddress(siteUrl)[0])
for link in externalLinks:
if link not in allExtLinks:
allExtLinks.add(link)
print(link)
for link in internalLinks:
if link not in allIntLinks:
print(link)
allIntLinks.add(link)
getAllExternalLinks(link)
getAllExternalLinks("http://oreilly.com")
"""
Explanation: 如果我们的目标是获取页面上所有的外链,并记录之,我们可以用下面的函数来办
End of explanation
"""
import scrapy
class BlogSpider(scrapy.Spider):
name = 'blogspider'
start_urls = ['https://blog.scrapinghub.com']
def parse(self, response):
for title in response.css('h2.entry-title'):
yield {'title': title.css('a ::text').extract_first()}
next_page = response.css('div.prev-post > a ::attr(href)').extract_first()
if next_page:
yield scrapy.Request(response.urljoin(next_page), callback=self.parse)
"""
Explanation: 使用Scrapy
写网络爬虫的挑战之一就是你要不断重复一些简单的任务:找出页面上所有的链接,区分外链和内链,跳转到新的页面。掌握这些基本模式非常有用,从零开始也是完全可行的,但是这里有一个工具可以帮助你自动处理这些细节。
https://scrapy.org
关于这个框架的使用,请参考其文档,这里不作更多叙述了。
如果以后有时间再继续补充。
End of explanation
"""
|
jay-johnson/sci-pype | examples/ML-IRIS-Extract-Models-From-Cache.ipynb | apache-2.0 | # Setup the Sci-pype environment
import sys, os
# Only redis is needed for this notebook:
os.environ["ENV_DEPLOYMENT_TYPE"] = "JustRedis"
# Load the Sci-pype PyCore as a named-object called "core" and environment variables
from src.common.load_ipython_env import *
"""
Explanation: Extracting the IRIS Models from Cache
This notebook demonstrates how to extract the machine learning Models + Analysis from the redis cache CACHE" and saved to disk as a compressed, string artifact file (Pickle + zlib compression). Once the file is saved, it is uploaded to the configured S3 Bucket for archiving and sharing.
Overview
Extract the IRIS Regressor and Classification datasets from the redis CACHE. After extraction, compile a manifest for defining a cache mapping for all the Models + their respective Analysis. Once cached, the Models can be extract and shared + deployed on other Sci-pype instances by using something like this notebook or the command-line versions.
Command-line Versions
I built this notebook from the extractor examples:
https://github.com/jay-johnson/sci-pype/tree/master/bins/ml/extractors
1) Extract the IRIS Classifier Models + Analysis from the Cache
End of explanation
"""
ds_name = "iris_classifier"
"""
Explanation: 2) Setup the Request
Extract the Models from the Cache with this request and upload them object files to the configured S3 Bucket.
Please make sure the environment variables are set correctly and the S3 Bucket exists:
ENV_AWS_KEY=<AWS API Key>
ENV_AWS_SECRET=<AWS API Secret>
For docker containers make sure to set these keys in the correct Jupyter env file and restart the container:
<repo base dir>/justredis/jupyter.env
<repo base dir>/local/jupyter.env
<repo base dir>/test/jupyter.env
What's the dataset name?
End of explanation
"""
data_dir = str(os.getenv("ENV_DATA_DST_DIR", "/opt/work/data/dst"))
if not os.path.exists(data_dir):
os.mkdir(data_dir, 0777)
"""
Explanation: Where is the downloaded file getting stored?
End of explanation
"""
s3_bucket = "unique-bucket-name-for-datasets" # name this something under your AWS Account (This might be open to the public in the future...stay tuned)
s3_key = "dataset_" + core.to_upper(ds_name) + ".cache.pickle.zlib"
s3_loc = str(s3_bucket) + ":" + str(s3_key)
"""
Explanation: What's the S3 Location (Unique Bucket Name + Key)?
End of explanation
"""
cache_req = {
"RAName" : "CACHE", # Redis instance name holding the models
"DSName" : str(ds_name), # Dataset name for pulling out of the cache
"S3Loc" : str(s3_loc), # S3 location to store the model file
"DeleteAfter" : False, # Optional delete after upload
"SaveDir" : data_dir, # Optional dir to save the model file - default is ENV_DATA_DST_DIR
"TrackingID" : "" # Future support for using the tracking id
}
upload_results = core.ml_upload_cached_dataset_to_s3(cache_req, core.get_rds(), core.get_dbs(), debug)
if upload_results["Status"] == "SUCCESS":
lg("Done Uploading Model and Analysis DSName(" + str(ds_name) + ") S3Loc(" + str(cache_req["S3Loc"]) + ")", 6)
else:
lg("", 6)
lg("ERROR: Failed Upload Model and Analysis Caches as file for DSName(" + str(ds_name) + ")", 6)
lg(upload_results["Error"], 6)
lg("", 6)
# end of if extract + upload worked
lg("", 6)
lg("Extract and Upload Completed", 5)
lg("", 6)
"""
Explanation: Build the full request and run it
End of explanation
"""
ds_name = "iris_regressor"
"""
Explanation: 3) Setup the Extract and Upload for the IRIS Regressor Models and Analysis
End of explanation
"""
cache_req = {
"RAName" : "CACHE", # Redis instance name holding the models
"DSName" : str(ds_name), # Dataset name for pulling out of the cache
"S3Loc" : str(s3_loc), # S3 location to store the model file
"DeleteAfter" : False, # Optional delete after upload
"SaveDir" : data_dir, # Optional dir to save the model file - default is ENV_DATA_DST_DIR
"TrackingID" : "" # Future support for using the tracking id
}
upload_results = core.ml_upload_cached_dataset_to_s3(cache_req, core.get_rds(), core.get_dbs(), debug)
if upload_results["Status"] == "SUCCESS":
lg("Done Uploading Model and Analysis DSName(" + str(ds_name) + ") S3Loc(" + str(cache_req["S3Loc"]) + ")", 6)
else:
lg("", 6)
lg("ERROR: Failed Upload Model and Analysis Caches as file for DSName(" + str(ds_name) + ")", 6)
lg(upload_results["Error"], 6)
lg("", 6)
sys.exit(1)
# end of if extract + upload worked
lg("", 6)
lg("Extract and Upload Completed", 5)
lg("", 6)
"""
Explanation: 4) Build and Run the Extract + Upload Request
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.12/_downloads/plot_introduction.ipynb | bsd-3-clause | import mne
"""
Explanation: .. _intro_tutorial:
Basic MEG and EEG data processing
MNE-Python reimplements most of MNE-C's (the original MNE command line utils)
functionality and offers transparent scripting.
On top of that it extends MNE-C's functionality considerably
(customize events, compute contrasts, group statistics, time-frequency
analysis, EEG-sensor space analyses, etc.) It uses the same files as standard
MNE unix commands: no need to convert your files to a new system or database.
What you can do with MNE Python
Raw data visualization to visualize recordings, can also use
mne_browse_raw for extended functionality (see :ref:ch_browse)
Epoching: Define epochs, baseline correction, handle conditions etc.
Averaging to get Evoked data
Compute SSP projectors to remove ECG and EOG artifacts
Compute ICA to remove artifacts or select latent sources.
Maxwell filtering to remove environmental noise.
Boundary Element Modeling: single and three-layer BEM model
creation and solution computation.
Forward modeling: BEM computation and mesh creation
(see :ref:ch_forward)
Linear inverse solvers (dSPM, sLORETA, MNE, LCMV, DICS)
Sparse inverse solvers (L1/L2 mixed norm MxNE, Gamma Map,
Time-Frequency MxNE)
Connectivity estimation in sensor and source space
Visualization of sensor and source space data
Time-frequency analysis with Morlet wavelets (induced power,
intertrial coherence, phase lock value) also in the source space
Spectrum estimation using multi-taper method
Mixed Source Models combining cortical and subcortical structures
Dipole Fitting
Decoding multivariate pattern analyis of M/EEG topographies
Compute contrasts between conditions, between sensors, across
subjects etc.
Non-parametric statistics in time, space and frequency
(including cluster-level)
Scripting (batch and parallel computing)
What you're not supposed to do with MNE Python
- **Brain and head surface segmentation** for use with BEM
models -- use Freesurfer.
- **Raw movement compensation** -- use Elekta Maxfilter™
.. note:: This package is based on the FIF file format from Neuromag. It
can read and convert CTF, BTI/4D, KIT and various EEG formats to
FIF.
Installation of the required materials
See :ref:getting_started with Python.
.. note:: The expected location for the MNE-sample data is
my-path-to/mne-python/examples. If you downloaded data and an example asks
you whether to download it again, make sure
the data reside in the examples directory and you run the script from its
current directory.
From IPython e.g. say::
cd examples/preprocessing
%run plot_find_ecg_artifacts.py
From raw data to evoked data
.. _ipython: http://ipython.scipy.org/
Now, launch ipython_ (Advanced Python shell) using the QT backend which best
supported across systems::
$ ipython --matplotlib=qt
First, load the mne package:
End of explanation
"""
mne.set_log_level('WARNING')
"""
Explanation: If you'd like to turn information status messages off:
End of explanation
"""
mne.set_log_level('INFO')
"""
Explanation: But it's generally a good idea to leave them on:
End of explanation
"""
mne.set_config('MNE_LOGGING_LEVEL', 'WARNING')
"""
Explanation: You can set the default level by setting the environment variable
"MNE_LOGGING_LEVEL", or by having mne-python write preferences to a file:
End of explanation
"""
mne.get_config_path()
"""
Explanation: Note that the location of the mne-python preferences file (for easier manual
editing) can be found using:
End of explanation
"""
from mne.datasets import sample # noqa
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
print(raw_fname)
"""
Explanation: By default logging messages print to the console, but look at
mne.set_log_file() to save output to a file.
Access raw data
^^^^^^^^^^^^^^^
End of explanation
"""
raw = mne.io.read_raw_fif(raw_fname)
print(raw)
print(raw.info)
"""
Explanation: .. note:: The MNE sample dataset should be downloaded automatically but be
patient (approx. 2GB)
Read data from file:
End of explanation
"""
print(raw.ch_names)
"""
Explanation: Look at the channels in raw:
End of explanation
"""
start, stop = raw.time_as_index([100, 115]) # 100 s to 115 s data segment
data, times = raw[:, start:stop]
print(data.shape)
print(times.shape)
data, times = raw[2:20:3, start:stop] # access underlying data
raw.plot()
"""
Explanation: Read and plot a segment of raw data
End of explanation
"""
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True,
exclude='bads')
raw.save('sample_audvis_meg_raw.fif', tmin=0, tmax=150, picks=picks,
overwrite=True)
"""
Explanation: Save a segment of 150s of raw data (MEG only):
End of explanation
"""
events = mne.find_events(raw, stim_channel='STI 014')
print(events[:5])
"""
Explanation: Define and read epochs
^^^^^^^^^^^^^^^^^^^^^^
First extract events:
End of explanation
"""
mne.set_config('MNE_STIM_CHANNEL', 'STI101')
"""
Explanation: Note that, by default, we use stim_channel='STI 014'. If you have a different
system (e.g., a newer system that uses channel 'STI101' by default), you can
use the following to set the default stim channel to use for finding events:
End of explanation
"""
event_id = dict(aud_l=1, aud_r=2) # event trigger and conditions
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.5 # end of each epoch (500ms after the trigger)
"""
Explanation: Events are stored as 2D numpy array where the first column is the time
instant and the last one is the event number. It is therefore easy to
manipulate.
Define epochs parameters:
End of explanation
"""
raw.info['bads'] += ['MEG 2443', 'EEG 053']
"""
Explanation: Exclude some channels (original bads + 2 more):
End of explanation
"""
picks = mne.pick_types(raw.info, meg=True, eeg=True, eog=True, stim=False,
exclude='bads')
"""
Explanation: The variable raw.info['bads'] is just a python list.
Pick the good channels, excluding raw.info['bads']:
End of explanation
"""
mag_picks = mne.pick_types(raw.info, meg='mag', eog=True, exclude='bads')
grad_picks = mne.pick_types(raw.info, meg='grad', eog=True, exclude='bads')
"""
Explanation: Alternatively one can restrict to magnetometers or gradiometers with:
End of explanation
"""
baseline = (None, 0) # means from the first instant to t = 0
"""
Explanation: Define the baseline period:
End of explanation
"""
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
"""
Explanation: Define peak-to-peak rejection parameters for gradiometers, magnetometers
and EOG:
End of explanation
"""
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=baseline, preload=False, reject=reject)
print(epochs)
"""
Explanation: Read epochs:
End of explanation
"""
epochs_data = epochs['aud_l'].get_data()
print(epochs_data.shape)
"""
Explanation: Get single epochs for one condition:
End of explanation
"""
from scipy import io # noqa
io.savemat('epochs_data.mat', dict(epochs_data=epochs_data), oned_as='row')
"""
Explanation: epochs_data is a 3D array of dimension (55 epochs, 365 channels, 106 time
instants).
Scipy supports read and write of matlab files. You can save your single
trials with:
End of explanation
"""
epochs.save('sample-epo.fif')
"""
Explanation: or if you want to keep all the information about the data you can save your
epochs in a fif file:
End of explanation
"""
saved_epochs = mne.read_epochs('sample-epo.fif')
"""
Explanation: and read them later with:
End of explanation
"""
evoked = epochs['aud_l'].average()
print(evoked)
evoked.plot()
"""
Explanation: Compute evoked responses for auditory responses by averaging and plot it:
End of explanation
"""
max_in_each_epoch = [e.max() for e in epochs['aud_l']] # doctest:+ELLIPSIS
print(max_in_each_epoch[:4]) # doctest:+ELLIPSIS
"""
Explanation: .. topic:: Exercise
Extract the max value of each epoch
End of explanation
"""
evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
evoked1 = mne.read_evokeds(
evoked_fname, condition='Left Auditory', baseline=(None, 0), proj=True)
"""
Explanation: It is also possible to read evoked data stored in a fif file:
End of explanation
"""
evoked2 = mne.read_evokeds(
evoked_fname, condition='Right Auditory', baseline=(None, 0), proj=True)
"""
Explanation: Or another one stored in the same file:
End of explanation
"""
contrast = evoked1 - evoked2
print(contrast)
"""
Explanation: Compute a contrast:
End of explanation
"""
import numpy as np # noqa
n_cycles = 2 # number of cycles in Morlet wavelet
freqs = np.arange(7, 30, 3) # frequencies of interest
"""
Explanation: Time-Frequency: Induced power and inter trial coherence
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Define parameters:
End of explanation
"""
from mne.time_frequency import tfr_morlet # noqa
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
return_itc=True, decim=3, n_jobs=1)
# power.plot()
"""
Explanation: Compute induced power and phase-locking values and plot gradiometers:
End of explanation
"""
from mne.minimum_norm import apply_inverse, read_inverse_operator # noqa
"""
Explanation: Inverse modeling: MNE and dSPM on evoked and raw data
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Import the required functions:
End of explanation
"""
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
inverse_operator = read_inverse_operator(fname_inv)
"""
Explanation: Read the inverse operator:
End of explanation
"""
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM"
"""
Explanation: Define the inverse parameters:
End of explanation
"""
stc = apply_inverse(evoked, inverse_operator, lambda2, method)
"""
Explanation: Compute the inverse solution:
End of explanation
"""
stc.save('mne_dSPM_inverse')
"""
Explanation: Save the source time courses to disk:
End of explanation
"""
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
label = mne.read_label(fname_label)
"""
Explanation: Now, let's compute dSPM on a raw file within a label:
End of explanation
"""
from mne.minimum_norm import apply_inverse_raw # noqa
start, stop = raw.time_as_index([0, 15]) # read the first 15s of data
stc = apply_inverse_raw(raw, inverse_operator, lambda2, method, label,
start, stop)
"""
Explanation: Compute inverse solution during the first 15s:
End of explanation
"""
stc.save('mne_dSPM_raw_inverse_Aud')
"""
Explanation: Save result in stc files:
End of explanation
"""
print("Done!")
"""
Explanation: What else can you do?
^^^^^^^^^^^^^^^^^^^^^
- detect heart beat QRS component
- detect eye blinks and EOG artifacts
- compute SSP projections to remove ECG or EOG artifacts
- compute Independent Component Analysis (ICA) to remove artifacts or
select latent sources
- estimate noise covariance matrix from Raw and Epochs
- visualize cross-trial response dynamics using epochs images
- compute forward solutions
- estimate power in the source space
- estimate connectivity in sensor and source space
- morph stc from one brain to another for group studies
- compute mass univariate statistics base on custom contrasts
- visualize source estimates
- export raw, epochs, and evoked data to other python data analysis
libraries e.g. pandas
- and many more things ...
Want to know more ?
^^^^^^^^^^^^^^^^^^^
Browse the examples gallery <auto_examples/index.html>_.
End of explanation
"""
|
pyemma/deeplearning | assignment2/BatchNormalization.ipynb | gpl-3.0 | # As usual, a bit of setup
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.iteritems():
print '%s: ' % k, v.shape
"""
Explanation: Batch Normalization
One way to make deep networks easier to train is to use more sophisticated optimization procedures such as SGD+momentum, RMSProp, or Adam. Another strategy is to change the architecture of the network to make it easier to train. One idea along these lines is batch normalization which was recently proposed by [3].
The idea is relatively straightforward. Machine learning methods tend to work better when their input data consists of uncorrelated features with zero mean and unit variance. When training a neural network, we can preprocess the data before feeding it to the network to explicitly decorrelate its features; this will ensure that the first layer of the network sees data that follows a nice distribution. However even if we preprocess the input data, the activations at deeper layers of the network will likely no longer be decorrelated and will no longer have zero mean or unit variance since they are output from earlier layers in the network. Even worse, during the training process the distribution of features at each layer of the network will shift as the weights of each layer are updated.
The authors of [3] hypothesize that the shifting distribution of features inside deep neural networks may make training deep networks more difficult. To overcome this problem, [3] proposes to insert batch normalization layers into the network. At training time, a batch normalization layer uses a minibatch of data to estimate the mean and standard deviation of each feature. These estimated means and standard deviations are then used to center and normalize the features of the minibatch. A running average of these means and standard deviations is kept during training, and at test time these running averages are used to center and normalize features.
It is possible that this normalization strategy could reduce the representational power of the network, since it may sometimes be optimal for certain layers to have features that are not zero-mean or unit variance. To this end, the batch normalization layer includes learnable shift and scale parameters for each feature dimension.
[3] Sergey Ioffe and Christian Szegedy, "Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift", ICML 2015.
End of explanation
"""
# Check the training-time forward pass by checking means and variances
# of features both before and after batch normalization
# Simulate the forward pass for a two-layer network
N, D1, D2, D3 = 200, 50, 60, 3
X = np.random.randn(N, D1)
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
a = np.maximum(0, X.dot(W1)).dot(W2)
print 'Before batch normalization:'
print ' means: ', a.mean(axis=0)
print ' stds: ', a.std(axis=0)
# Means should be close to zero and stds close to one
print 'After batch normalization (gamma=1, beta=0)'
a_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})
print ' mean: ', a_norm.mean(axis=0)
print ' std: ', a_norm.std(axis=0)
# Now means should be close to beta and stds close to gamma
gamma = np.asarray([1.0, 2.0, 3.0])
beta = np.asarray([11.0, 12.0, 13.0])
a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})
print 'After batch normalization (nontrivial gamma, beta)'
print ' means: ', a_norm.mean(axis=0)
print ' stds: ', a_norm.std(axis=0)
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
N, D1, D2, D3 = 200, 50, 60, 3
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
bn_param = {'mode': 'train'}
gamma = np.ones(D3)
beta = np.zeros(D3)
for t in xrange(50):
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
batchnorm_forward(a, gamma, beta, bn_param)
bn_param['mode'] = 'test'
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
a_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print 'After batch normalization (test-time):'
print ' means: ', a_norm.mean(axis=0)
print ' stds: ', a_norm.std(axis=0)
"""
Explanation: Batch normalization: Forward
In the file cs231n/layers.py, implement the batch normalization forward pass in the function batchnorm_forward. Once you have done so, run the following to test your implementation.
End of explanation
"""
# Gradient check batchnorm backward pass
N, D = 4, 5
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
fx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda a: batchnorm_forward(x, gamma, beta, bn_param)[0]
fb = lambda b: batchnorm_forward(x, gamma, beta, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma, dout)
db_num = eval_numerical_gradient_array(fb, beta, dout)
_, cache = batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = batchnorm_backward(dout, cache)
print 'dx error: ', rel_error(dx_num, dx)
print 'dgamma error: ', rel_error(da_num, dgamma)
print 'dbeta error: ', rel_error(db_num, dbeta)
"""
Explanation: Batch Normalization: backward
Now implement the backward pass for batch normalization in the function batchnorm_backward.
To derive the backward pass you should write out the computation graph for batch normalization and backprop through each of the intermediate nodes. Some intermediates may have multiple outgoing branches; make sure to sum gradients across these branches in the backward pass.
Once you have finished, run the following to numerically check your backward pass.
End of explanation
"""
N, D = 100, 500
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
out, cache = batchnorm_forward(x, gamma, beta, bn_param)
t1 = time.time()
dx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)
t2 = time.time()
dx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)
t3 = time.time()
print 'dx difference: ', rel_error(dx1, dx2)
print 'dgamma difference: ', rel_error(dgamma1, dgamma2)
print 'dbeta difference: ', rel_error(dbeta1, dbeta2)
print 'speedup: %.2fx' % ((t2 - t1) / (t3 - t2))
"""
Explanation: Batch Normalization: alternative backward
In class we talked about two different implementations for the sigmoid backward pass. One strategy is to write out a computation graph composed of simple operations and backprop through all intermediate values. Another strategy is to work out the derivatives on paper. For the sigmoid function, it turns out that you can derive a very simple formula for the backward pass by simplifying gradients on paper.
Surprisingly, it turns out that you can also derive a simple expression for the batch normalization backward pass if you work out derivatives on paper and simplify. After doing so, implement the simplified batch normalization backward pass in the function batchnorm_backward_alt and compare the two implementations by running the following. Your two implementations should compute nearly identical results, but the alternative implementation should be a bit faster.
NOTE: You can still complete the rest of the assignment if you don't figure this part out, so don't worry too much if you can't get it.
End of explanation
"""
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print 'Running check with reg = ', reg
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64,
use_batchnorm=True)
loss, grads = model.loss(X, y)
print 'Initial loss: ', loss
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))
if reg == 0: print
"""
Explanation: Fully Connected Nets with Batch Normalization
Now that you have a working implementation for batch normalization, go back to your FullyConnectedNet in the file cs2312n/classifiers/fc_net.py. Modify your implementation to add batch normalization.
Concretely, when the flag use_batchnorm is True in the constructor, you should insert a batch normalization layer before each ReLU nonlinearity. The outputs from the last layer of the network should not be normalized. Once you are done, run the following to gradient-check your implementation.
HINT: You might find it useful to define an additional helper layer similar to those in the file cs231n/layer_utils.py. If you decide to do so, do it in the file cs231n/classifiers/fc_net.py.
End of explanation
"""
# Try training a very deep net with batchnorm
hidden_dims = [100, 100, 100, 100, 100]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
weight_scale = 2e-2
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
bn_solver.train()
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
solver.train()
"""
Explanation: Batchnorm for deep networks
Run the following to train a six-layer network on a subset of 1000 training examples both with and without batch normalization.
End of explanation
"""
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label='baseline')
plt.plot(bn_solver.loss_history, 'o', label='batchnorm')
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label='baseline')
plt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label='baseline')
plt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
"""
Explanation: Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.
End of explanation
"""
# Try training a very deep net with batchnorm
hidden_dims = [50, 50, 50, 50, 50, 50, 50]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
bn_solvers = {}
solvers = {}
weight_scales = np.logspace(-4, 0, num=20)
for i, weight_scale in enumerate(weight_scales):
print 'Running weight scale %d / %d' % (i + 1, len(weight_scales))
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
bn_solver.train()
bn_solvers[weight_scale] = bn_solver
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
solver.train()
solvers[weight_scale] = solver
# Plot results of weight scale experiment
best_train_accs, bn_best_train_accs = [], []
best_val_accs, bn_best_val_accs = [], []
final_train_loss, bn_final_train_loss = [], []
for ws in weight_scales:
best_train_accs.append(max(solvers[ws].train_acc_history))
bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))
best_val_accs.append(max(solvers[ws].val_acc_history))
bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))
final_train_loss.append(np.mean(solvers[ws].loss_history[-100:]))
bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history[-100:]))
plt.subplot(3, 1, 1)
plt.title('Best val accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best val accuracy')
plt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')
plt.legend(ncol=2, loc='lower right')
plt.subplot(3, 1, 2)
plt.title('Best train accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best training accuracy')
plt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')
plt.legend()
plt.subplot(3, 1, 3)
plt.title('Final training loss vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Final training loss')
plt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')
plt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')
plt.legend()
plt.gcf().set_size_inches(10, 15)
plt.show()
"""
Explanation: Batch normalization and initialization
We will now run a small experiment to study the interaction of batch normalization and weight initialization.
The first cell will train 8-layer networks both with and without batch normalization using different scales for weight initialization. The second layer will plot training accuracy, validation set accuracy, and training loss as a function of the weight initialization scale.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/ncc/cmip6/models/noresm2-mh/atmoschem.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'noresm2-mh', 'atmoschem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era: CMIP6
Institute: NCC
Source ID: NORESM2-MH
Topic: Atmoschem
Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry.
Properties: 84 (39 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:24
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmospheric chemistry model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmospheric chemistry model code.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Chemistry Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Form of prognostic variables in the atmospheric chemistry component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of advected tracers in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry calculations (not advection) generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.8. Coupling With Chemical Reactivity
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the evolution of a given variable
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemical species advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Split Operator Chemistry Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemistry (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.5. Split Operator Alternate Order
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.6. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the atmospheric chemistry model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.7. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.2. Convection
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Precipitation
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.4. Emissions
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.5. Deposition
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.6. Gas Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.9. Photo Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.10. Aerosols
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the atmopsheric chemistry grid
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
* Does the atmospheric chemistry grid match the atmosphere grid?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 7.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview of transport implementation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Use Atmospheric Transport
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is transport handled by the atmosphere, rather than within atmospheric cehmistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Transport Details
Is Required: FALSE Type: STRING Cardinality: 0.1
If transport is handled within the atmospheric chemistry scheme, describe it.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric chemistry emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via any other method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview gas phase atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Species included in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.3. Number Of Bimolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of bi-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.4. Number Of Termolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of ter-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.7. Number Of Advected Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of advected species in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.8. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.9. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.10. Wet Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.11. Wet Oxidation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview stratospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
"""
Explanation: 14.2. Gas Phase Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Gas phase species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
"""
Explanation: 14.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.5. Sedimentation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview tropospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Gas Phase Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of gas phase species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
"""
Explanation: 15.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric photo chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 16.2. Number Of Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the photo-chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Photolysis scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.2. Environmental Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.)
End of explanation
"""
|
chongxi/spiketag | notebooks/LMNN.ipynb | bsd-3-clause | %pylab inline
x = numpy.array([[0,0],[-1,0.1],[0.3,-0.05],[0.7,0.3],[-0.2,-0.6],[-0.15,-0.63],[-0.25,0.55],[-0.28,0.67]])
y = numpy.array([0,0,0,0,1,1,2,2])
"""
Explanation: Metric Learning with the Shogun Machine Learning Toolbox
Building up the intuition to understand LMNN
First of all, let us introduce LMNN through a simple example. For this purpose, we will be using the following two-dimensional toy data set:
End of explanation
"""
def plot_data(features,labels,axis,alpha=1.0):
# separate features according to their class
X0,X1,X2 = features[labels==0], features[labels==1], features[labels==2]
# class 0 data
axis.plot(X0[:,0], X0[:,1], 'o', color='green', markersize=12, alpha=alpha)
# class 1 data
axis.plot(X1[:,0], X1[:,1], 'o', color='red', markersize=12, alpha=alpha)
# class 2 data
axis.plot(X2[:,0], X2[:,1], 'o', color='blue', markersize=12, alpha=alpha)
# set axes limits
axis.set_xlim(-1.5,1.5)
axis.set_ylim(-1.5,1.5)
axis.set_aspect('equal')
axis.set_xlabel('x')
axis.set_ylabel('y')
figure,axis = pyplot.subplots(1,1)
plot_data(x,y,axis)
axis.set_title('Toy data set')
pyplot.show()
"""
Explanation: That is, there are eight feature vectors where each of them belongs to one out of three different classes (identified by either 0, 1, or 2). Let us have a look at this data:
End of explanation
"""
def make_covariance_ellipse(covariance):
import matplotlib.patches as patches
import scipy.linalg as linalg
# the ellipse is centered at (0,0)
mean = numpy.array([0,0])
# eigenvalue decomposition of the covariance matrix (w are eigenvalues and v eigenvectors),
# keeping only the real part
w,v = linalg.eigh(covariance)
# normalize the eigenvector corresponding to the largest eigenvalue
u = v[0]/linalg.norm(v[0])
# angle in degrees
angle = 180.0/numpy.pi*numpy.arctan(u[1]/u[0])
# fill Gaussian ellipse at 2 standard deviation
ellipse = patches.Ellipse(mean, 2*w[0]**0.5, 2*w[1]**0.5, 180+angle, color='orange', alpha=0.3)
return ellipse
# represent the Euclidean distance
figure,axis = pyplot.subplots(1,1)
plot_data(x,y,axis)
ellipse = make_covariance_ellipse(numpy.eye(2))
axis.add_artist(ellipse)
axis.set_title('Euclidean distance')
pyplot.show()
"""
Explanation: In the figure above, we can see that two of the classes are represented by two points that are, for each of these classes, very close to each other. The third class, however, has four points that are close to each other with respect to the y-axis, but spread along the x-axis.
If we were to apply kNN (k-nearest neighbors) in a data set like this, we would expect quite some errors using the standard Euclidean distance. This is due to the fact that the spread of the data is not similar amongst the feature dimensions. The following piece of code plots an ellipse on top of the data set. The ellipse in this case is in fact a circunference that helps to visualize how the Euclidean distance weights equally both feature dimensions.
End of explanation
"""
x.T.shape
from modshogun import RealFeatures, MulticlassLabels
features = RealFeatures(x.T)
labels = MulticlassLabels(y.astype(numpy.float64))
"""
Explanation: A possible workaround to improve the performance of kNN in a data set like this would be to input to the kNN routine a distance measure. For instance, in the example above a good distance measure would give more weight to the y-direction than to the x-direction to account for the large spread along the x-axis. Nonetheless, it would be nicer (and, in fact, much more useful in practice) if this distance could be learnt automatically from the data at hand. Actually, LMNN is based upon this principle: given a number of neighbours k, find the Mahalanobis distance measure which maximizes kNN accuracy (using the given value for k) in a training data set. As we usually do in machine learning, under the assumption that the training data is an accurate enough representation of the underlying process, the distance learnt will not only perform well in the training data, but also have good generalization properties.
Now, let us use the LMNN class implemented in Shogun to find the distance and plot its associated ellipse. If everything goes well, we will see that the new ellipse only overlaps with the data points of the green class.
First, we need to wrap the data into Shogun's feature and label objects:
End of explanation
"""
from modshogun import LMNN
# number of target neighbours per example
k = 1
lmnn = LMNN(features,labels,k)
# set an initial transform as a start point of the optimization
init_transform = numpy.eye(2)
lmnn.set_maxiter(5000)
lmnn.train(init_transform)
"""
Explanation: Secondly, perform LMNN training:
End of explanation
"""
# get the linear transform from LMNN
L = lmnn.get_linear_transform()
# square the linear transform to obtain the Mahalanobis distance matrix
M = numpy.matrix(numpy.dot(L.T,L))
# represent the distance given by LMNN
figure,axis = pyplot.subplots(1,1)
plot_data(x,y,axis)
ellipse = make_covariance_ellipse(M.I)
axis.add_artist(ellipse)
axis.set_title('LMNN distance')
pyplot.show()
"""
Explanation: LMNN is an iterative algorithm. The argument given to train represents the initial state of the solution. By default, if no argument is given, then LMNN uses PCA to obtain this initial value.
End of explanation
"""
# project original data using L
lx = numpy.dot(L,x.T)
# represent the data in the projected space
figure,axis = pyplot.subplots(1,2, figsize=(10,5))
plot_data(lx.T,y,axis[0])
ellipse0 = make_covariance_ellipse(numpy.eye(2))
axis[0].add_artist(ellipse0)
axis[0].set_title('LMNN\'s linear transform')
ellipse1 = make_covariance_ellipse(numpy.eye(2))
plot_data(x,y,axis[1],1)
axis[1].add_artist(ellipse1)
axis[1].set_title('original')
statistics = lmnn.get_statistics()
pyplot.plot(statistics.obj.get())
pyplot.grid(True)
pyplot.xlabel('Number of iterations')
pyplot.ylabel('LMNN objective')
"""
Explanation: Beyond the main idea
End of explanation
"""
|
maliyngh/LTPython | LightTools_Data_Examples.ipynb | apache-2.0 | # Import the packages/libraries you typically use
import clr
import System
import numpy as np
import matplotlib.pyplot as plt
#This forces plots inline in the Spyder/Python Command Console
%matplotlib inline
#In the line below, make sure the path matches your installation!
LTCOM64Path="C:\\Program Files\\Optical Research Associates\\"
LTCOM64Path=LTCOM64Path + "LightTools 8.4.0\\Utilities.NET\\LTCOM64.dll"
clr.AddReference(LTCOM64Path)
from LTCOM64 import LTAPIx
lt0=LTAPIx()
#If PID capabilities (for multiple LightTools sessions) needed, use the PID for the session you want
#lt0.LTPID=12040
lt0.UpdateLTPointer
#If no PID is specified, connect to the first running session
"""
Explanation: Overview
Installation instructions for Anaconda and Python for .NET
Examples presented here are based on Python 3.5
Examples are shown in jupyter notebook application. You can use the same code in the spyder.exe (winPython IDE)
You can run examples from the notebook itself (this browser window), or use spyder scripts
<font color='blue'>Examples shown in early parts of this notebook are simple, barebone scripts in order to explain the communication process with LightTools. A more flexible, easy to use function library project is decribed later in this presentation. Project name: LTPython</font>
How to initiate a connection with LightTools
Macro examples
Passing a message, commands, and data access with plotting (XY Scatter)
Mesh data access and plotting (2D Raster)
Parameter study example
Optimization with Python (scipy.minimize)
Simple 2-variable example
Focus with conic lens
Collimator with Swept/Bezier
SplinePatch example
Still using COM (pyWin32)?
There are many reasons to change your macros to use LTCOM64 library as soon as possible
Your macros are stuck with a specific version of a type library
e.g. A macro referenced LTAPI will have to be re-programmed if you want to use a new function in LTAPI4
We are not able to provide any temporary updates to the type library if you find an issue/limitation with a given function, etc. This is due to the complexity associated with COM architecture/distribution
It's unlikely that we will udate current COM libraries in the future, although the existing functionality will continue to work as long as Windows supports COM
Connecting with LightTools using COM (pyWin32) is described at the end
Most examples presented here will work "as is", but creating pointers to LTAPI and JumpStart functions will be slightly different
Using Python with LightTools - A Quick Start Guide
This is a brief introduction for how to use LightTools macros from jupyter Notebook, using Python language and .NET features
For full development environment, use winPython distribution (spyder)
Jupyter Notebook is an excellent tool for presentations, training, quick macros, etc.
Install Anaconda
https://www.continuum.io/downloads
Used version for examples: 4.2.0, 64-bit
The Anaconda installation includes the following packages we need
Python base package
numpy
scipy
matplotlib (includes pyplot library)
jupyter notebook
and many others
Install Python for .NET
This requires Framework 4.0
This is where you can download the Python for .NET
http://www.lfd.uci.edu/~gohlke/pythonlibs/#pythonnet
Make sure to select the version that matches the version of Python you installed with Anaconda
Installation of the Python .NET
Open a DOS command prompt (cmd)
Change the directory to where you dounloaded the *.whl file
Enter the following command: pip install some-package.whl
With Anaconda and Python for .NET installed, the installation is complete. The next step in writing a macro is to connect to the .NET librarries.
- LTCOM64.dll installed under the /LightTools/Utilities.NET/ folder is what we need
- Python NET provides the .NET access capabilities. The "import clr" statement below provides the System.Reflection capabilities in .NET
- The LTCOM64 library contains the LTCOM64.LTAPIx and LTCOM64.JSNET2 (JumpStart library functions). The special nature of these functions is that they do not require any COM pointers
- In the .NET interface, COM pointers are not allowed
- COM aspects needed to interact with LightTools are automatically handled by the library
End of explanation
"""
lt0.Message("Hello from jupyter Notebook - 2!")
"""
Explanation: Sending a message to LightTools
The message will appear in the Console Window, and the Macro Output tab
End of explanation
"""
#Set the focus to the 3D Window, pass a fixed command string to create a sphere with radius 5
lt0.Cmd('\V3D ctrsphere xyz 0,0,0 xyz 0,0,5')
"""
Explanation: Sending commands to LightTools
The command below will:
set the focus to the 3D Window, and add a sphere
Get the name of the last created solid object
Set the radius of the last sphere to 10
End of explanation
"""
cmdstr="ctrsphere " + lt0.Coord3(0,0,0) + lt0.Coord3(0,0,5)
print(cmdstr) #so that we can see it
lt0.Cmd(cmdstr)
"""
Explanation: Send a command with Coord3() function
The coord3() function will create a string in the format "XYZ x,y,z"
End of explanation
"""
#Set the radius to 10
key="Solid[@Last].Primitive[1]"
lt0.DbSet(key,"Radius",10)
r=lt0.DbGet(key,"Radius")
print("Radius of the sphere is: " + str(r))
"""
Explanation: Setting and getting data
Following example shows how to use DbSet() and DbGet() functions to access data
Set the radius of the sphere primitive to 10
Get the radius to test whether the "set" worked correctly
End of explanation
"""
from IPython.display import Image
Image(filename = PATH + 'BooleanAndMove.PNG',width=500,height=100)
cmdstr="Cylinder " +lt0.Coord3(0,0,0) + " 3 15" #radius =3, length = 15
lt0.Cmd(cmdstr)
#Get the names of the objects. We have 2 objects
#Notice that we are using the "index" of each solid object
names=[]
for i in [1,2]:
key="Solid[" + str(i) + "]"
print("Current data key is: " + key) #so that we can see it
names.append(lt0.DbGet(key, "Name"))
print(names[i-1])
#Select two objects
lt0.Cmd("Select " + lt0.Str(names[0]) + " More " + lt0.Str(names[1]))
lt0.Cmd("Subtract")
#Resulting object has te name of the first selected object for boolean
lt0.Cmd("Select " + lt0.Str(names[0]))
lt0.Cmd("Move " + lt0.Coord3(0,10,10))
"""
Explanation: Select, Copy, Boolean, Move
Make a cylinder
Subtract the cylinder from sphere
Move the resulting solid to XYZ 0,10,10
If you need more controls with images
End of explanation
"""
#Get the spectral power distribution from a receiver (1D grids)
key="receiver[1].spectral_distribution[1]"
cellcount=int(lt0.DbGet(key,"Count"))
print("Number of rows: " + str(cellcount))
w=np.zeros((cellcount))
p=np.zeros((cellcount))
for i in range(1,cellcount+1,1):
w[i-1],stat=lt0.DbGet(key,"Wavelength_At",0,i,1) #data returned is a tuple!
p[i-1],stat=lt0.DbGet(key,"Power_At",0,i,1)
plt.plot(w,p,'-r')
"""
Explanation: Access data in grids (1D and 2D)
Access to data in grids is a slightly different process
There are two types of data grids
1D and 2D
When accessing grid data, we need to use the two optional arguments in the DbGet() and DbSet() functions. Typically we omit these arguments for general data access
Note that LTPython project, described later in this presentation provides more flexible data access methods. This is an illustration of barebone script code
Here's an example of getting the spectral distribution from a receiver
End of explanation
"""
#Get the mesh data one cell at a time (this is a 2D grid)
# Note that a faster method for mesh data is described below
key="receiver[1].Mesh[1]"
xdim=int(lt0.DbGet(key,"X_Dimension")) #Columns
ydim=int(lt0.DbGet(key,"Y_Dimension")) #Rows
cv=np.zeros((ydim,xdim))
for i in range(1,xdim+1,1):
for j in range(1,ydim+1):
cv[j-1,i-1],stat=lt0.DbGet(key,"CellValue",0,i,j)
#Get the mesh bounds
MinX=lt0.DbGet(key,"Min_X_Bound")
MaxX=lt0.DbGet(key,"Max_X_Bound")
MinY=lt0.DbGet(key,"Min_Y_Bound")
MaxY=lt0.DbGet(key,"Max_Y_Bound")
#Create a data grid for plotting, and plot the data
xvec=np.linspace(MinX,MaxX,xdim+1)
yvec=np.linspace(MinY,MaxY,ydim+1)
X,Y=np.meshgrid(xvec,yvec)
plt.pcolormesh(X,Y,cv,cmap='jet')
plt.xlabel("X")
plt.ylabel("Y")
plt.axis("equal")
#See below for a simpler/faster method to access mesh data
"""
Explanation: Here's an example of getting mesh data from a receiver
This example shows how to access individual cell values
Typically, you can use the GetMeshData() function described later in this document to get the data for a given mesh in a single call
Note that LTPython project, described later in this presentation provides more flexible data access methods. This is an illustration of barebone script code
End of explanation
"""
def GetLTMeshParams(MeshKey,CellValueType):
"""Get the data from a receiver mesh.
Parameters
----------
MeshKey : String
data access string for the receiver mesh
CellValueType : data type to retrieve
Returns
-------
X_Dimension
Number of bins in X dimension
Y_Dimension
Number of bins in Y dimension
Min_X_Bound
Minimum X bound for the mesh
Max_X_Bound
Maximum X bound for the mesh
Min_Y_Bound
Minimum Y bound for the mesh
Max_Y_Bound
Maximum Y bound for the mesh
Mesh_Data_Array
An array of data, based on the cell value type requested
Examples
--------
meshkey="receiver[1].Mesh[1]"
xdim,ydim,minx,maxx,miny,maxy,md=GetLTMeshParams(meshkey,"CellValue")
"""
XDim=int(lt0.DbGet(MeshKey,"X_Dimension"))
YDim=int(lt0.DbGet(MeshKey,"Y_Dimension"))
MinX=lt0.DbGet(MeshKey,"Min_X_Bound")
MaxX=lt0.DbGet(MeshKey,"Max_X_Bound")
MinY=lt0.DbGet(MeshKey,"Min_Y_Bound")
MaxY=lt0.DbGet(MeshKey,"Max_Y_Bound")
# We need a double array to retrieve data
dblArray=System.Array.CreateInstance(System.Double,XDim,YDim)
[Stat,mData]=lt0.GetMeshData(MeshKey,dblArray,CellValueType)
MeshData=np.ones((XDim,YDim))
print(XDim,YDim)
for i in range(0,XDim):
for j in range(0,YDim):
MeshData[i,j]=mData[i,j]
#print(mData[i,j])
MeshData=np.rot90(MeshData)
#Notice how we return multiple data items
return XDim,YDim,MinX,MaxX,MinY,MaxY,MeshData
"""
Explanation: Writing and calling functions
This is a function to retrieve data from a receiver mesh
Get the data from the entire mesh in one call, without having to iterate through each cell
The function below also returns some other mesh parameters such as the dimensions and bounds
Notice also that it includes help strings (known as Doc Strings)
End of explanation
"""
import matplotlib
meshkey="receiver[1].Mesh[1]"
xdim,ydim,minx,maxx,miny,maxy,md=GetLTMeshParams(meshkey,"CellValue")
cellx=np.linspace(minx,maxx,xdim+1)
celly=np.linspace(miny,maxy,ydim+1)
X,Y=np.meshgrid(cellx,celly)
#Raster chart in LOG scale
plt.pcolormesh(X,Y,np.flipud(md),cmap="jet",norm=matplotlib.colors.LogNorm())
plt.colorbar()
plt.axis("equal")
plt.xlabel("X")
plt.ylabel("Y")
"""
Explanation: Here's how we call the above function with arguments
Get the data
Create a 2D grids for x and y, uniformly spaced, for plotting
Use 'pcolormesh()' for plotting
'pcolormesh()' is faster than 'pcolor()'
End of explanation
"""
from LTCOM64 import JSNET2
js=JSNET2()
#If PID capabilities (for multiple LightTools sessions) needed, use the PID for the session you want
#js.LTPID=12040
js.UpdateLTPointer
"""
Explanation: Accessing JumpStart Functions
JumpStart library is a set of helper functions available for macro users
These functions attempt to simplify the syntax/usage so that you can write macros faster!
LTCOM64 includes all JumpStart functions
This means you can access both LightTools API (that we looked at so far) and JumpStart functions using a single reference library
The example below shows how to create a handle to JumpStart functions
End of explanation
"""
js.MakeSphere(5,"mySphere")
js.MoveVector("mySphere",0,10,10)
# js.MoveVector("mys*",0,10,10) will move all objects whose name starts with 'mys'
"""
Explanation: After creating the handle, you can use all the available functions
For details on these functions, please refer to Help>Document Library>API Reference Guide
Most JumpStart functions support wild card (*) capability
i.e. you can perform a given operation across multiple objects simultaneously
Example below shows how to create a sphere and move it to a specific location, using JumpStart functions
End of explanation
"""
#First, let's create a simple function to add a new optical property
#This will create a new property, and return the name
def AddNewProperty(propname):
lt0.Cmd("\O" + lt0.Str("PROPERTY_MANAGER[1]"))
lt0.Cmd("AddNew=")
lt0.Cmd("\Q")
lt0.DbSet("Property[@Last]", "Name", propname)
return 0
op="myMirror"
AddNewProperty(op)
key="PROPERTY[" + op + "]"
lt0.DbSet(key,"Simple Type","Mirror")
"""
Explanation: Creating a simple model for a parameter study
Make a block, set positon/orientation
Change one surface to a "Smooth/specular Mirror"
Add a single NSRay
Add a dummy plane to capture the reflected ray
End of explanation
"""
mirrorname="myMirror"
js.MakeTube(0.25,10,10,"R",mirrorname)
key="SOLID[@Last].SURFACE[LeftSurface].ZONE[1]"
lt0.DbSet(key,"PropertyName",op)
#Set the orientation, Alpha=45
key="Solid[@Last]"
lt0.DbSet(key,"Alpha",-45)
"""
Explanation: Add the mirror, set the optical property and orientation
End of explanation
"""
#Add a NSRay
lt0.Cmd("NSRayAim xyz 0,10,0 xyz 0,0,0")
#Add a dummy plane
lt0.Cmd("DummyPlane xyz 0,0,-20 xyz 0,0,-40")
"""
Explanation: Add the dummy and NSRay
End of explanation
"""
key="Solid[1]"
segkey="NS_RAY[@Last].NS_SEGMENT[segment_2]"
numpts=11
datax=np.zeros((numpts,numpts))
datay=np.zeros((numpts,numpts))
alpha=np.linspace(-55,-35,11)
beta=np.linspace(-20,20,numpts)
for i in range(0,numpts,1):
lt0.DbSet(key,"Alpha",float(alpha[i]))
for j in range(0,11,1):
lt0.DbSet(key,"Beta",float(beta[j]))
datax[i,j]=lt0.DbGet(segkey,"Local_Surface_X")
datay[i,j]=lt0.DbGet(segkey,"Local_Surface_Y")
plt.scatter(datax,datay)
plt.xlabel('X')
plt.ylabel('Y')
"""
Explanation: Now we are ready to change mirror and get the ray data
End of explanation
"""
from scipy.optimize import minimize
import numpy as np
import matplotlib.pyplot as plt
import clr
#Initiate the connection with LightTools
clr.AddReference("C:\\Program Files\\Optical Research Associates\\LightTools 8.4.0\\Utilities.NET\\LTCOM64.dll")
from LTCOM64 import LTAPIx
lt0=LTAPIx()
lt0.UpdateLTPointer
"""
Explanation: How to use optimization algorithms from 'scipy'
Use of 'minimize' function
There are three key parts to setup an optimization problem
Initiate the minimize function
Use initial variable data
call the objective function
Apply variable values generated by the minimize to LightTools model
Evaluate the metrit function, return the merit function value
Import minimize from scipy library
We still need the libraries mentioned above in order to connect to LightTools, etc.
Import the LTCOM64 library and create a connection to the running LightTools session
End of explanation
"""
def EvalMF():
lt0.Cmd("\O" + lt0.Str("OPT_MERITFUNCTIONS[1]"))
lt0.Cmd("EvaluateAll=")
lt0.Cmd("\Q")
return 0
"""
Explanation: Our objective function, called by the minimize, should use the parameters sent from the minimize function
Update variables
Evaluate the merit function
Return the merit function value
First, a separate function to evaluate the merit function
End of explanation
"""
def setVarVals(v):
v=np.asarray(v)
vlist=lt0.DbList('Lens_Manager[1]','Opt_DBVariable')
vcount=lt0.ListSize(vlist)
lt0.SetOption('DbUpdate',0)
for i in range(1,vcount+1):
vkey=lt0.ListAtPos(vlist,i)
lt0.DbSet(vkey,'CurrentValue',float(v[i-1]))
print('Variable Value: ' + str(v[i-1]))
lt0.SetOption('DbUpdate',1)
lt0.ListDelete(vlist)
"""
Explanation: Another function to apply variable values
Note that we do not skip disabled variables!
End of explanation
"""
def ApplyVarsReturnMF(vardata):
myd=np.asarray(vardata)
setVarVals(myd)
EvalMF()
mfv=lt0.DbGet('OPT_MERITFUNCTIONS[1]','CurrentValue')
print("MF Value: " + str(mfv))
print('****')
return mfv
"""
Explanation: Now we can create the objective function
'vardata' is what we get from minimize function
for example, if we setup 3 variables, we will get 3 values
End of explanation
"""
# Here's a sample list of optimization algorithms we can try
# Some of these algorithms require 'jac', which is the Jacobian (gradiant), and it's not shown here
# The Nelder-Mead is the best option to try first, given its simplicity
optengines=['Nelder-Mead','BFGS','powell','Newton-CG','SLSQP','TNC']
vlist=lt0.DbList('Lens_Manager[1]','Opt_DBVariable')
vcount=int(lt0.ListSize(vlist))
lt0.ListDelete(vlist)
v0=np.zeros((vcount))
for i in range(1,vcount+1):
v0[i-1]=lt0.DbGet('OPT_DBVARIABLE[' +str(i) +']','CurrentValue')
# Note that 'maxiter' should be small (e.g. 5) for other algorithms, except 'Nelder-Mead'
res=minimize(ApplyVarsReturnMF,v0,method=optengines[0],options={'disp': True,'maxiter':50})
"""
Explanation: Finally, we call the minimize function with arguments
We need to pass the initial variable values to the minimize
For convenience, we can read the values from LightTools rather than hard coding
Make sure to save the original values since we will modify them during optimization
End of explanation
"""
res=minimize(ApplyVarsReturnMF,v0,method=optengines[2],options={'disp': True,'maxiter':5})
"""
Explanation: Simple optimization example
Open 'Simple2VarOpt.1.lts'
X and Y coordinates of the NSRay are variables
Merit function is defined for X=0, Y=0 (local intersection coordinates on dummy plane)
When optimized, the ray should be placed at the origin of the dummy plane
Run the above code blocks in the sequential order to see the optimization process
Results will be printed below the last code block, where we invke the minimize function
Repeat the optimization for the following models
BezierSweptOpt.1.lts
Collimate a fan of rays using a collimator built with Swept geometry
The second profile of the Swept is 'Bezier', and we try to optimize Bezier parameters
Simple2VarOpt_Lens.1.lts
Focus a ray fan using a conic lens
The curvature and the conic constant are the variables
RayGrid_SplinePatch.1.lts
Start with a flat mirror, created with a splinepatch lens surface
Collimate the ray grid (i.e. perpendicular to the dummy plane)
This is a 9-variable problem and Nelder-Mead will require many iterations
Try 'powell' (or optengines[2])
<font color='red'>res=minimize(ApplyVarsReturnMF,v0, method=optengines[2] ,options={'disp': True, 'maxiter':5})</font>
End of explanation
"""
#Import the module and update the LT pointer
import LTData as ltd
ltd.lt0=lt0 #update the pointer
#Now you can get/set the data items like this
R = ltd.GetLTDbItem('Solid[1].Primitive[1].radius')
print('Radius is: ' + str(R))
ltd.SetLTDbItem('solid[1].primitive[1].radius',15)
illum=ltd.GetLTGridItem('receiver[1].mesh[1].CellValue_UI',45,45) #Accessing a 2D grid
print('Value is: ' + str(illum))
wave=ltd.GetLTGridItem('RECEIVER[1].SPECTRAL_DISTRIBUTION[1].Wavelength_At',5) #Accessing a 1D grid
print('Wavelength is: ' + str(wave))
#Make sure there's a valid spectral region with at least 1 row for the following code!
stat=ltd.SetLTGridItem('spectral_region[1].WavelengthAt',600,1) #Setting data in a 1D grid
"""
Explanation: Sample Library Project ("LTPython")
This is a library of supporting functions that enable you to write macros more efficiently
Shown below are few examples. Refer to the following section on using Spyder, in order to see how to utilize the function library in your scripts
In order to run the following examples, you must have the two modules (LTData.py, LTUtilities.py) in your work directory. Work directory is shown in the notebook kernel, as shown below
Note that the *.ipynb file is the jupyter notebook file we are using here
Several data get/set examples
Note that the full data access string, via Copy Data Access Name, can be passed to these functions
End of explanation
"""
#First, import standard libraries we need for arrays/plotting
import matplotlib.pyplot as plt # general plotting
import numpy as np #additional support for arrays, etc.
#Plot a mesh
ltd.PlotRaster('receiver[1].mesh[1]','cellvalue',colormap='jet',
xlabel='X-Value',ylabel='Y-Value',title='Mesh Data',plotsize=(5,5),plottype='2D')
#Plot the spectral distribution
numrows,spd=ltd.PlotSpectralDistribution('receiver[1].spectral_distribution[1]',returndata=True)
plt.plot(spd[:,0],spd[:,1])
#Plot true color data. Note the index=2 for the CIE mesh
r,g,b=ltd.PlotTrueColorRster('receiver[1].mesh[2]',plotsize=(5,5),returndata=True)
"""
Explanation: Several examples of getting and plotting receiver mesh and spectral data
End of explanation
"""
#We need to save the screenshot as an image file in the work directory
#LTUtilities module handles the work directory and file IO
import LTUtilities as ltu
ltu.lt0=lt0
ltd.ltu=ltu
#check the workdir
wd=ltu.checkpyWorkDir()
print(ltu.workdirstr) # this is where image files are saved
#Get a screenshot of the 3D View
viewname='3d'
im,imname=ltd.GetViewImage(viewname)
plt.imshow(im)
#Get a screenshot of an open chart view
#Usually, V3D is the first view. The '3' below indicates the second chart view currently open
viewname='3'
im,imname=ltd.GetViewImage(viewname)
plt.imshow(im)
"""
Explanation: Examples of capturing screenshots
End of explanation
"""
#Let's get a screenshot of the full system
viewname='1'
im,imname=ltd.GetViewImage(viewname)
plt.imshow(im)
"""
Explanation: Access to ray path data
Consider the following system, where three sources are used to illuminate a dummy plane
Assume we want to see ray paths going through the cylinder object
End of explanation
"""
#Ray path data
key='receiver[1]'
#First, let's hide all ray paths
lt0.Cmd('\O"RECEIVER[1].FORWARD_SIM_FUNCTION[1]" HideAll= \Q')
#Now get the ray path data, and show only the matchine paths
va,pa,ra,st=ltd.GetRayPathData(key,usevisibleonly=False)
# Two subplots, different size
from matplotlib import gridspec
fig = plt.figure(figsize=(6, 6))
gs = gridspec.GridSpec(2,1, height_ratios=[1,3])
ax1 = plt.subplot(gs[0])
ax1.plot(pa,'o')
ax1.set_xlabel('Path Index')
ax1.set_ylabel('Power')
ax1.grid(True)
s2='cylin' #this is the string we're searching for
for i in range(0,len(st)):
#print(st[i])
s1=st[i].lower()
if s2 in s1:
#print(str(i) + ';' + st[i])
ltd.SetLTGridItem(key + '.forward_sim_function[1].RayPathVisibleAt','yes',(i+1))
#Finally, let's get another screenshot to show the results
viewname='1'
im,imname=ltd.GetViewImage(viewname)
ax2 = plt.subplot(gs[1])
ax2.imshow(im)
ax2.axis('off')
plt.tight_layout()
"""
Explanation: Now we can get the ray path strings, and turn on only the paths that involve the cylinder object
End of explanation
"""
#receiver ray data
des=['raydatax','raydatay','raydataz']
reckey='receiver[1]'
simtype='Forward_Sim_Function[1]'
#Note here that we specify the following function to
# use passfilters flag
N,M,raydata=ltd.GetLTReceiverRays(reckey,des,usepassfilters=True)
plt.plot(raydata[:,0],raydata[:,1],'o')
plt.xlabel('Ray Data Local X')
plt.ylabel('Ray Data Local Y')
plt.axis('equal')
"""
Explanation: Get receiver ray data that match the selected ray paths
End of explanation
"""
#Assume default data, x, y, z, l, m, n, p
simdata='forward_sim_function[1]'
reckey1='receiver[1]' #receiver on the lens surface
reckey2='receiver[2]' #receiver on the dummy plane
n,rayfname=ltd.MakeRayFileUsingRayOrdinal(reckey1,DataAccessKey_Ordinal=reckey2)
"""
Explanation: Receiver rays based on Ray Ordinal Number
Every ray starts with an ordinal number, based on the ray sequence (1, 2, 3, etc.)
Diring ray trace, ordinal number does not change
Ordinal number can be used as a unique identifier when filtering ray data on receivers
Consider the following ray paths through a lens
One can isolate the ray paths using ray path analyzer or a macro approach discussed above
However, in this particular case, we want to obtan the ray intersection points on the lens surface
A receiver on the lens surface can give the ray intersection points for all rays, not just the ray path shown
If the ray ordinal numbers on the receiver attached to the dummy plane are known, then we can match those ray ordinal numbers to the subset of rays on the receiver attached to the lens surface
The simplest way to visualize ray intersection points as a point cloud is to generate a ray data source using the subset of rays, and importing that ray source using the local coordinate system on the lens surface
End of explanation
"""
#Extra ray data, OPL
reckey='receiver[1]'
#Notice that the second argument is an Enum (integer) for the filter type
N,exdata=ltd.GetLTReceiverRays_Extra(reckey,ltd.ExtraRayData.Optical_Path_Length.value)
plt.hist(exdata,bins=21,color='green')
plt.xlabel('OPL')
plt.ylabel('Frequency')
"""
Explanation: Import the resulting ray source using the local coordinate system on the lens surface
RaySource "C:/.../pyWorkDir/1mi8clam.txt" LXYZ 0,0,0 LXYZ 0,0,1 LXYZ 0,1,0
Note: rename the ray source with a meaningful name. The default name used is random
After the ray source is loaded into the model, intersection points can be visualized as a point cloud in the 3D model
Extra ray data for receiver filters
This data is not directly available with LTAPI4.GetMeshData()
Only way to access this data is the use DbGet() function for each ray
This means the process will be slower when there's a large number of rays on the receiver
Following example shows how to access optical path length for each ray
Optical Path Length filter is required on the receiver
End of explanation
"""
import win32com.client
import numpy as np
import matplotlib.pyplot as plt
#DbGet() and Mesh data example
lt = win32com.client.Dispatch("LightTools.LTAPI4")
XD=int(lt.DbGet(MeshKey,"X_Dimension"))
YD=int(lt.DbGet(MeshKey,"Y_Dimension"))
k=np.ones((XD,YD))
#The CellFilter may not work for all options in COM mode
[stat,myd,f]=lt.GetMeshData("receiver[1].Mesh[1]",list(k),"CellValue")
g=np.asarray(myd)
g=np.rot90(g)
x = np.linspace(-3, 3, XD)
y = np.linspace(-3, 3, XD)
X,Y = np.meshgrid(x, y)
plt.pcolor(X,Y,g)
plt.pcolormesh(X,Y,g,cmap="gray")
plt.xlabel("X")
plt.ylabel("Y")
#JumpStart library
js = win32com.client.Dispatch("LTCOM64.JSML")
js.MakeSphere(lt,5,"mySphere")
js.MoveVector(lt,"mySphere",0,10,10)
# js.MoveVector(lt,"mys*",0,10,10) will move all objects whose name starts with 'mys'
"""
Explanation: Running Spyder
Spyder provides a more versatile code environment with debug capabilities. For regular macro development work, this is the best environment
Typical Spyder environment will appear like this
How to load the test project into Spyder
Unzip the supplied LTPython.zip to your current working directory
This is usually C:/Users/YourUserName/
Run Spyder
Go to Project>Open Project.
Project files will appear like this
Test code to test most of the available functions are in "TestLTDataFunctions.py"
Most of the code is commented out. Make sure to uncomment the portions you like to try
Watch the attached video clip to see few examples
These are the different modules
LTData
This includes a set of functions to get/set database items, grid items, receiver data, ray path data, etc.
LTUtilities
This module contains some general purpose utilities, used by LTData and other modules
LTProperties
This is a special module to illustrate how to use JumpStart Optical Property functions
Notice that this module still uses COM. We will fix this issue. For now, this is the only way to access these JumpStart functions (fairly new to the JS library)
This module only contains "test code" that illustrates how to use the base functions in JS library
LTOpt
Few optimization examples. Use the attached test models for these examples
Ignore other modules
How to use win32COM client to connect to LightTools
Note that this is not a recommended method due to possible compatibility issues in the future!
End of explanation
"""
|
tensorflow/docs-l10n | site/ja/tensorboard/graphs.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2019 The TensorFlow Authors.
End of explanation
"""
# Load the TensorBoard notebook extension.
%load_ext tensorboard
from datetime import datetime
from packaging import version
import tensorflow as tf
from tensorflow import keras
print("TensorFlow version: ", tf.__version__)
assert version.parse(tf.__version__).release[0] >= 2, \
"This notebook requires TensorFlow 2.0 or above."
import tensorboard
tensorboard.__version__
# Clear any logs from previous runs
!rm -rf ./logs/
"""
Explanation: TensorFlow グラフを調査する
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/tensorboard/graphs"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org で表示</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/tensorboard/graphs.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab で実行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/tensorboard/graphs.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/tensorboard/graphs.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード</a></td>
</table>
概要
TensorBoard の Graphs ダッシュボードは、TensorFlow モデルを調べるために使用できる強力なツールです。モデルの構造の概念的なグラフを素早く表示し、意図した設計と一致することを確認することができます。また、演算レベルのグラフも表示できるため、TensorFlow がどのようにプログラムを理解しているかを把握することができます。演算レベルグラフを調査すると、モデルの変更方法に関する洞察を得ることができます。たとえば、トレーニングの進行速度が思ったより遅い場合に、モデルを再設計することが可能となります。
このチュートリアルでは、グラフの診断データを生成して TensorBoard の Graph ダッシュボードで視覚化する方法を簡単に説明します。チュートリアルでは、Fashion-MNIST データセットの簡単な Keras Sequential モデルを定義してトレーニングし、モデルグラフのログと調査の方法を学習します。また、トレーニング API を使用して、新しい tf.function 注釈を使って作成した関数のグラフデータを生成します。
セットアップ
End of explanation
"""
# Define the model.
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation='softmax')
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
"""
Explanation: Keras モデルを定義する
この例では、単純な 4 つのレイヤーを使った Sequential モデルを分類器とします。
End of explanation
"""
(train_images, train_labels), _ = keras.datasets.fashion_mnist.load_data()
train_images = train_images / 255.0
"""
Explanation: トレーニングデータをダウンロードして準備します。
End of explanation
"""
# Define the Keras TensorBoard callback.
logdir="logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
# Train the model.
model.fit(
train_images,
train_labels,
batch_size=64,
epochs=5,
callbacks=[tensorboard_callback])
"""
Explanation: モデルをトレーニングしてデータをログする
トレーニングの前に、ログディレクトリを指定して Keras TensorBoard コールバックを定義します。Model.fit() にこのコールバックを渡すことで、グラフデータが TensorBoard で視覚化できるようにログされるようにします。
End of explanation
"""
%tensorboard --logdir logs
"""
Explanation: 演算レベルグラフ
TensorBoard を起動し、UI が読み込まれるまで数秒ほど待ってから、上部にある「Graph」をタップして、Graphs ダッシュボードを選択します。
End of explanation
"""
!tensorboard dev upload \
--logdir logs \
--name "Sample op-level graph" \
--one_shot
"""
Explanation: オプションで TensorBoard.dev を使用して、ホストされた共有可能な実験を作成することもできます。
End of explanation
"""
# The function to be traced.
@tf.function
def my_func(x, y):
# A simple hand-rolled layer.
return tf.nn.relu(tf.matmul(x, y))
# Set up logging.
stamp = datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = 'logs/func/%s' % stamp
writer = tf.summary.create_file_writer(logdir)
# Sample data for your function.
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
# Bracket the function call with
# tf.summary.trace_on() and tf.summary.trace_export().
tf.summary.trace_on(graph=True, profiler=True)
# Call only one tf.function when tracing.
z = my_func(x, y)
with writer.as_default():
tf.summary.trace_export(
name="my_func_trace",
step=0,
profiler_outdir=logdir)
%tensorboard --logdir logs/func
"""
Explanation: デフォルトでは、TensorBoard には演算レベルグラフが表示されます(左側に、「Default」タグが選択されているのがわかります)。グラフが反転していることに注意してください。データは下から上に向かって流れているため、コードと比較すると逆さまになっています。それでも、グラフが Keras モデルに緊密に一致しており、ほかの計算ノードへのエッジが追加されていることはわかるでしょう。
グラフは非常に大きいことが多いですが、グラフ表示を操作することができます。
スクロールで拡大縮小
ドラッグでパン
ダブルクリックでノード展開の切り替え(ノードはほかのノードのコンテナである場合があります)
また、ノードをクリックしてメタデータを表示することもできます。こうすると、入力、出力、およびその他の詳細を確認することができます。
<!-- <img class="tfo-display-only-on-site" src="https://github.com/tensorflow/tensorboard/blob/master/docs/images/graphs_computation.png?raw=1"/> -->
<!-- <img class="tfo-display-only-on-site" src="https://github.com/tensorflow/tensorboard/blob/master/docs/images/graphs_computation_detail.png?raw=1"/> -->
概念グラフ
TensorBoard には、実行グラフのほかに概念グラフも表示されます。これは Keras モデルのみのビューです。保存したモデルを再利用する際に構造を調査または検証する場合に役立ちます。
概念グラフを表示するには、「keras」タグを選択します。折り畳まれた Sequential ノードが表示されますが、それをダブルクリックすると、モデルの構造を確認できます。
<!-- <img class="tfo-display-only-on-site" src="https://github.com/tensorflow/tensorboard/blob/master/docs/images/graphs_tag_selection.png?raw=1"/> -->
<!-- <img class="tfo-display-only-on-site" src="https://github.com/tensorflow/tensorboard/blob/master/docs/images/graphs_conceptual.png?raw=1"/> -->
tf.function のグラフ
これまでの例では、Keras レイヤーを定義して Model.fit() を呼び出すことでグラフを作成するという、Keras モデルのグラフについて説明しました。
tf.function 注釈を使用して、「グラフの自動作成」、つまり Python 計算関数を高性能 TensorFlow グラフに変換しなければならない状況に遭遇することがあるかもしれません。こういった場合には、TensorFlow Summary Trace API を使用して、TensorBoard に視覚化するための自動グラフ作成関数をログすることができます。
Summary Trace API を使用するには、次を行います。
関数を定義して、tf.function で注釈をつけます。
関数呼び出しの直前に tf.summary.trace_on() を使用します。
profiler=True を渡して、グラフにプロファイル情報(メモリ、CPU 時間)を追加します。
サマリーファイルライターを使って、tf.summary.trace_export() を呼び出してログデータを保存します。
その後、TensorBoard を使用すると、関数の動作を確認することができます。
<br/>
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/launching_into_ml/solutions/decision_trees_and_random_Forests_in_Python.ipynb | apache-2.0 | # Scikit-learn is a free machine learning library for Python.
# It features various algorithms like random forests, and k-neighbours.
# It also supports Python numerical and scientific libraries like NumPy and SciPy.
!pip install scikit-learn==0.22.2
"""
Explanation: Decision Trees and Random Forests in Python
Learning Objectives
Explore and analyze data using a Pairplot
Train a single Decision Tree
Predict and evaluate the Decision Tree
Compare the Decision Tree model to a Random Forest
Introduction
In this lab, you explore and analyze data using a Pairplot, train a single Decision Tree, predict and evaluate the Decision Tree, and compare the Decision Tree model to a Random Forest. Recall that the Decision Tree algorithm belongs to the family of supervised learning algorithms. Unlike other supervised learning algorithms, the decision tree algorithm can be used for solving both regression and classification problems too. Simply, the goal of using a Decision Tree is to create a training model that can use to predict the class or value of the target variable by learning simple decision rules inferred from prior data(training data).
Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook.
End of explanation
"""
# Importing necessary tensorflow library and printing the TF version.
import tensorflow as tf
print("TensorFlow version: ",tf.version.VERSION)
# Here we'll import Pandas and Numpy data processing libraries
import pandas as pd
import numpy as np
# Use matplotlib for visualizing the model
import matplotlib.pyplot as plt
# Use seaborn for data visualization
import seaborn as sns
%matplotlib inline
"""
Explanation: Restart the kernel before proceeding further (On the Notebook menu, select Kernel > Restart Kernel > Restart).
Load necessary libraries
End of explanation
"""
# Reading "kyphosis.csv" file using the read_csv() function included in the pandas library
df = pd.read_csv('../kyphosis.csv')
# Output the first five rows
df.head()
"""
Explanation: Get the Data
End of explanation
"""
# Here we are using the pairplot() function to plot multiple pairwise bivariate distributions in a dataset
# TODO 1
sns.pairplot(df,hue='Kyphosis',palette='Set1')
"""
Explanation: Exploratory Data Analysis
We'll just check out a simple pairplot for this small dataset.
End of explanation
"""
# Import train_test_split function from sklearn.model_selection
from sklearn.model_selection import train_test_split
# Remove column name 'Kyphosis'
X = df.drop('Kyphosis',axis=1)
y = df['Kyphosis']
# Let's split up the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
"""
Explanation: Train Test Split
End of explanation
"""
# Import Decision Tree Classifier from sklearn.tree
from sklearn.tree import DecisionTreeClassifier
# Create Decision Tree classifer object
dtree = DecisionTreeClassifier()
# Train Decision Tree Classifer
# TODO 2
dtree.fit(X_train,y_train)
"""
Explanation: Decision Trees
We'll start just by training a single decision tree.
End of explanation
"""
# Predict the response for test dataset
predictions = dtree.predict(X_test)
# Importing the classification_report and confusion_matrix
from sklearn.metrics import classification_report,confusion_matrix
# Here we will build a text report showing the main classification metrics
# TODO 3a
print(classification_report(y_test,predictions))
# Now we can compute confusion matrix to evaluate the accuracy of a classification
# TODO 3b
print(confusion_matrix(y_test,predictions))
"""
Explanation: Prediction and Evaluation
Let's evaluate our decision tree.
End of explanation
"""
# Here we are importing some built-in visualization functionalities for decision trees
from IPython.display import Image
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
import pydot
features = list(df.columns[1:])
features
# Now we are ready to visualize our Decision Tree model
dot_data = StringIO()
export_graphviz(dtree, out_file=dot_data,feature_names=features,filled=True,rounded=True)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
Image(graph[0].create_png())
"""
Explanation: Tree Visualization
Scikit learn actually has some built-in visualization capabilities for decision trees, you won't use this often and it requires you to install the pydot library, but here is an example of what it looks like and the code to execute this:
End of explanation
"""
# Import Random Forest Model
from sklearn.ensemble import RandomForestClassifier
# Create a Gaussian Classifier
rfc = RandomForestClassifier(n_estimators=100)
# Train Random Forest Classifer
rfc.fit(X_train, y_train)
# Train model using the training sets
rfc_pred = rfc.predict(X_test)
# Now we can compute confusion matrix to evaluate the accuracy
# TODO 4a
print(confusion_matrix(y_test,rfc_pred))
# Finally we will build a text report showing the main metrics
# TODO 4b
print(classification_report(y_test,rfc_pred))
"""
Explanation: Random Forests
Now let's compare the decision tree model to a random forest.
End of explanation
"""
|
amirziai/learning | algorithms/Merge-Sort.ipynb | mit | import random
random.seed(0)
from resources.utils import run_tests
"""
Explanation: Merge Sort
Known to John von Neumann in 1945, 70+ years ago
Step 0- Testing utilities
Take a look at resources/utils.py if you're curious.
End of explanation
"""
def split(input_list):
"""
Splits a list into two pieces
:param input_list: list
:return: left and right lists (list, list)
"""
input_list_len = len(input_list)
midpoint = input_list_len // 2
return input_list[:midpoint], input_list[midpoint:]
tests_split = [
({'input_list': [1, 2, 3]}, ([1], [2, 3])),
({'input_list': [1, 2, 3, 4]}, ([1, 2], [3, 4])),
({'input_list': [1, 2, 3, 4, 5]}, ([1, 2], [3, 4, 5])),
({'input_list': [1]}, ([], [1])),
({'input_list': []}, ([], []))
]
run_tests(tests_split, split)
"""
Explanation: Step 1- split
Given a list let's split it into two lists right down the middle
End of explanation
"""
def merge_sorted_lists(list_left, list_right):
"""
Merge two sorted lists
This is a linear operation
O(len(list_right) + len(list_right))
:param left_list: list
:param right_list: list
:return merged list
"""
# Special case: one or both of lists are empty
if len(list_left) == 0:
return list_right
elif len(list_right) == 0:
return list_left
# General case
index_left = index_right = 0
list_merged = [] # list to build and return
list_len_target = len(list_left) + len(list_right)
while len(list_merged) < list_len_target:
if list_left[index_left] <= list_right[index_right]:
# Value on the left list is smaller (or equal so it should be selected)
list_merged.append(list_left[index_left])
index_left += 1
else:
# Right value bigger
list_merged.append(list_right[index_right])
index_right += 1
# If we are at the end of one of the lists we can take a shortcut
if index_right == len(list_right):
# Reached the end of right
# Append the remainder of left and break
list_merged += list_left[index_left:]
break
elif index_left == len(list_left):
# Reached the end of left
# Append the remainder of right and break
list_merged += list_right[index_right:]
break
return list_merged
tests_merged_sorted_lists = [
({'list_left': [1, 5], 'list_right': [3, 4]}, [1, 3, 4, 5]),
({'list_left': [5], 'list_right': [1]}, [1, 5]),
({'list_left': [], 'list_right': []}, []),
({'list_left': [1, 2, 3, 5], 'list_right': [4]}, [1, 2, 3, 4, 5]),
({'list_left': [1, 2, 3], 'list_right': []}, [1, 2, 3]),
({'list_left': [1], 'list_right': [1, 2, 3]}, [1, 1, 2, 3]),
({'list_left': [1, 1], 'list_right': [1, 1]}, [1, 1, 1, 1]),
({'list_left': [1, 1], 'list_right': [1, 2]}, [1, 1, 1, 2]),
({'list_left': [3, 3], 'list_right': [1, 4]}, [1, 3, 3, 4]),
]
run_tests(tests_merged_sorted_lists, merge_sorted_lists)
"""
Explanation: Step 2- merge sorted lists
Given two sorted lists we should be able to "merge" them into a single list as a linear operation
End of explanation
"""
def merge_sort(input_list):
if len(input_list) <= 1:
return input_list
else:
left, right = split(input_list)
# The following line is the most important piece in this whole thing
return merge_sorted_lists(merge_sort(left), merge_sort(right))
random_list = [random.randint(1, 1000) for _ in range(100)]
tests_merge_sort = [
({'input_list': [1, 2]}, [1, 2]),
({'input_list': [2, 1]}, [1, 2]),
({'input_list': []}, []),
({'input_list': [1]}, [1]),
({'input_list': [5, 1, 1]}, [1, 1, 5]),
({'input_list': [9, 1, 10, 2]}, [1, 2, 9, 10]),
({'input_list': range(10)[::-1]}, list(range(10))),
({'input_list': random_list}, sorted(random_list))
]
run_tests(tests_merge_sort, merge_sort)
"""
Explanation: Step 3- merge sort
Merge sort only needs to utilize the previous 2 functions
We need to split the lists until they have a single element
A list with a single element is sorted (duh)
Now we can merge these single-element (or empty) lists
End of explanation
"""
|
NYUDataBootcamp/Projects | UG_S17/Wang-VIX.ipynb | mit | # Setup
import sys # system module
import pandas as pd # data package
import matplotlib.pyplot as plt # graphics module
import datetime as dt # date and time module
import seaborn as sns # seaborn graphics module
import os # OS interface module
import quandl # financial data
print('Python version:', sys.version)
print('Pandas version: ', pd.__version__)
print('Seaborn version: ', sns.__version__)
print('quandl version: ', quandl.version.VERSION)
print('Today: ', dt.date.today())
# Time parameters used in analysis
start = dt.datetime(2005, 1,1)
end= dt.datetime(2017,5,11)
quandl.ApiConfig.api_key = "7W3a2MNz8r4uQebgVb5g"
vix = quandl.get("CBOE/VIX",start_date="2005-01-01",end_date="2017-12-09")
vix.info()
# cleaning dataset
vix = vix.drop(['VIX Open', 'VIX High', 'VIX Low'], axis=1)
vix.columns = ['Close']
vix.head()
# plotting dataframe
fig, ax = plt.subplots(figsize=(8,5))
sns.set_style('whitegrid')
vix['Close'].plot(color='orange')
fig.suptitle('CBOE Volatility Index (VIX)')
plt.show()
"""
Explanation: VIX as a measure of Market Uncertainty
by Brandon Wang (bw1115)
Data Bootcamp Final Project (NYU Stern Spring 2017)
Abstract
The VIX index, calculated and published by the Chicago Board Options Exchange, is known to be a "fear gauge" of the stock market. Specifically designed to move in the opposite direction of the S&P, the volatility index seeks to somehow quantify the Street's anxiety and risk appetite. Also priced into the index are the expected price swings of the broader market, as the VIX's underlying are S&P options and futures.
Objective
This project aims to examine the relationship between the VIX index and several other popular instruments or financial metrics. While the market can be entirely a random-walk, market participants still create narratives to explain movements and trends. For investors, the VIX is an important gauge of the possibility of these narratives. As such, the assumption is that the VIX is a robust indicator of market trends.
Data Sources
This analysis will draw on 2 financial data sources for numerous datasets.
Quandl
CBOE Volatility Index (VIX)
S&P 500 Index
Bloomberg Terminal
S&P Foward Price-Earnings Ratio, 12 months trailing
Global Economic Policy Uncertainty Index
Federal Funds Futures, short-term rates of 30-days
Merrill Lynch Move Index, measuring bond volatility
JP Morgan Currency Volatility Index
S&P E-Mini Futures Bid-Ask Spread (ES1 Index)
Quandl is a financial data company that pools data from many sources into its API. There are also unconvential data sets for purchase, collected by independent companies involved in the market. Luckily, the Chicago Board Options Exchange uploads its data to Quandl for free.
Elsewhere, the data is not-so-public, requiring access to Bloomberg's suite of data. While Bloomberg has its own API for programming languages like Python, the terminal and an account have to be tied to the computer used. Thus, I took the less fancy approach of extracting the data via Bloomberg's excel add-on and storing it locally.
The Bloomberg excel spreadsheets are available here.
These two sources have an underappreciated advantage: they are neat and tailored for data analysis, without too many unneccessary parameters. This removes the trouble of having to create a datetime index and format individual values.
The Current State of VIX
End of explanation
"""
sp500 = quandl.get("YAHOO/INDEX_GSPC",start_date="2005-01-03",end_date="2017-05-11")
sp500 = sp500.drop(['Open','High','Low','Volume','Adjusted Close'], axis=1)
# creating fig and ax, plotting objects
fig,ax1 = plt.subplots(figsize=(8,5))
sns.set_style('whitegrid')
ax2 = ax1.twinx()
a = ax1.plot(vix['Close'], color='orange', label='VIX')
b = ax2.plot(sp500['Close'], label='S&P 500')
# titling and formating
ax1.set_ylabel('VIX', color='orange')
ax2.set_ylabel('S&P 500', color='blue')
fig.suptitle('S&P gains as VIX remains subdued')
ax2.grid(False)
# adding lines on different axes into one legend
line = a + b
label = [l.get_label() for l in line]
ax1.legend(line, label, loc='upper left')
plt.show()
"""
Explanation: The index shows the relative calm that the stock market has enjoyed, especially in the first few months of 2017. Just recently, the index has hit its lowest closing level since December of 1993. However, long troughs in VIX with long periods of low volatility is troubling to some investors. Blankfein, CEO of Goldman Sachs, has cautioned against the current norm of calmness and the potential hubris of thinking everything is under control.
While many investors use VIX as a metric in their bets, it is worth noting that depending on VIX as a measurement of "fear" can cause ripple effects if it is inaccurate. In the late 2006s and early 2007s, leading up to the large financial crisis, the VIX was also hovering at a low level, also reflecting a period of calm that we also have today.
VIX Movement with S&P 500
End of explanation
"""
# changing directory to where .csv file is downloaded
os.chdir('C:/Users/Brandon/Downloads')
sp_pe = pd.read_excel('SPX PE.xlsx')
# cleaning dataset
sp_pe.columns = sp_pe.iloc[0]
sp_pe = sp_pe.set_index(['Date'])
sp_pe = sp_pe[1:]
sp_pe = sp_pe.rename(columns={'PE_RATIO': 'S&P P/E'})
# merging vix dataset with S&P PE ratios
vix_sppe = pd.merge(vix, sp_pe,
how='left',
right_index=True,
left_index=True,
)
# changing index for scatterplot
vix_sppe = vix_sppe.rename(columns={'Close': 'VIX'})
vix_sppe.index = range(len(vix_sppe))
# array of last 30 days
vix_sppe_30 = vix_sppe.iloc[-30:]
vix_sppe_30 = vix_sppe_30.values
vix_sppe.head()
fig, ax = plt.subplots()
sns.set(style='whitegrid')
sns.regplot('VIX', 'S&P P/E', data=vix_sppe)
fig.suptitle('Historical PE Ratios and Volatility')
ax.set_xlabel('VIX Volatility Level')
ax.set_ylabel('PE Multiple Level')
ax.set_ylim([10, 25])
for item in vix_sppe_30:
item.flatten()
ax.plot(item[0], item[1], 'o',
color='orange', markersize=10)
plt.show()
"""
Explanation: S&P Valuations and VIX - a rare relationship
End of explanation
"""
fig, ax = plt.subplots()
sns.kdeplot(vix_sppe, shade=True, cmap='Blues')
ax.set_xlabel('VIX Volatility Level')
ax.set_ylabel('PE Multiple Level')
ax.set_ylim([10, 25])
for item in vix_sppe_30:
item.flatten()
ax.plot(item[0], item[1], 'o',
color='orange', markersize=8)
plt.show()
"""
Explanation: With the absence of sharp-moves in the VIX, the S&P 500 index has reached record highs. However, it is difficult to ignore the rarity of how low the VIX is, while stocks enjoy lofty valuations. The orange circles represent the data points for the last 30 trading sessions, nearing the highest P/E multiples for the lowest instances of volatility.
Outliers include the batch of high PE multiples nearing 25, which occured at the height of real-estate bubble. Instances with incredibly high volatility represent days with large swings in prices.
End of explanation
"""
gpu = pd.read_excel('EPUCGLCP.xlsx')
# cleaning dataset
gpu.columns = gpu.iloc[0]
gpu = gpu.set_index(['Date'])
gpu = gpu[1:]
gpu = gpu.rename(columns={'PX_LAST': 'GPU Index'})
# merging with vix
vix_gpu = pd.merge(vix, gpu,
how='left',
right_index=True,
left_index=True,
)
vix_gpu.head()
# removing rows with NaN values
vix_gpu = vix_gpu[pd.notnull(vix_gpu['GPU Index'])]
vix_gpu.head()
# creating fig and ax, plotting objects
fig,ax1 = plt.subplots(figsize=(8,5))
sns.set_style('whitegrid')
ax2 = ax1.twinx()
a = ax1.plot(vix_gpu['VIX'], color='orange', label='VIX')
b = ax2.plot(vix_gpu['GPU Index'], color='red', label='GPU Index')
# titling and formating
ax1.set_ylabel('VIX', color='orange')
ax2.set_ylabel('GPU Index', color='red')
fig.suptitle('Global Political Uncertainty grows as VIX suppresed')
ax2.grid(False)
# adding lines on different axes into one legend
line = a + b
label = [l.get_label() for l in line]
ax1.legend(line, label, loc='upper left')
plt.show()
"""
Explanation: The density graph above better shows the rarity of the recent S&P valuations paried with the levels of VIX. More commonly, stocks are valued around the 17-18 mark, with a VIX level around the mid teens.
Investors can interpret this in two ways: either the market is complacent towards high market valuations and a potential equity bubble, or the VIX is inaccurate in measuring investor uncertainty as the S&P crawls towards unexplained high stock valuations.
VIX and the Macro Environment
Global Political Uncertainty
End of explanation
"""
# narrowing the data to this year
today = dt.date.today()
vix_gpu2015 = vix_gpu.loc['2015-01-01':today,
['VIX', 'GPU Index',]
]
# creating fig and ax, plotting objects
fig,ax1 = plt.subplots(figsize=(8,5))
sns.set_style('whitegrid')
ax2 = ax1.twinx()
a = ax1.plot(vix_gpu2015['VIX'], color='orange', label='VIX')
b = ax2.plot(vix_gpu2015['GPU Index'], color='red', label='GPU Index')
# titling and formating
ax1.set_ylabel('VIX', color='orange')
ax2.set_ylabel('GPU Index', color='red')
ax1.set_ylim([8,62.5]) #match limits in previous graph
ax2.set_ylim([47,310])
fig.suptitle('Divergence in recent years')
ax2.grid(False)
# adding lines on different axes into one legend
line = a + b
label = [l.get_label() for l in line]
ax1.legend(line, label, loc='upper left')
plt.show()
"""
Explanation: The index for global political uncertainty has, to some degree, tracked the VIX and its yearly trends. However, starting from 2016, we observe a divergence, perhaps showing a decline in the VIX's ability to gauge political uncertainty.
End of explanation
"""
ffr = pd.read_excel('Short-Term Fed Funds Rate (30 Day).xlsx')
# cleaning dataset
ffr.columns = ffr.iloc[0]
ffr = ffr.set_index(['Date'])
ffr = ffr[1:]
ffr = ffr.rename(columns={'PX_LAST': 'Fed Funds Rate'})
# merging with vix
vix_ffr = pd.merge(vix, ffr,
how='left',
right_index=True,
left_index=True,
)
vix_ffr.head()
# removing rows with NaN values
vix_ffr = vix_ffr[pd.notnull(vix_ffr['Fed Funds Rate'])]
vix_ffr.head()
# building out the implied Federal Funds Rate from the index's data
vix_ffr['Fed Funds Rate'] = 100 - vix_ffr['Fed Funds Rate']
vix_ffr.head()
# creating fig and ax, plotting objects
fig,ax1 = plt.subplots(figsize=(8,5))
sns.set_style('whitegrid')
ax2 = ax1.twinx()
a = ax1.plot(vix_ffr['VIX'], color='orange', label='VIX')
b = ax2.plot(vix_ffr['Fed Funds Rate'], color='green',
label='Fed Funds Rate')
# titling and formating
ax1.set_ylabel('VIX', color='orange')
ax2.set_ylabel('Fed Funds Rate (implied)', color='green')
fig.suptitle('VIX remains low as the Fed predicts growth')
ax2.grid(False)
# adding lines on different axes into one legend
line = a + b
label = [l.get_label() for l in line]
ax1.legend(line, label, loc='upper right')
plt.show()
"""
Explanation: As orthodoxy and populism extend from our own White House to politics across the world, the VIX remains suprisingly low for something representing uncertainty. President Trump's election has spurred a rally in financials, industrials, and the broader market, but struggles to codify his agenda in healthcare and tax reform. As investors pull back from their high expectations, VIX should have taken off. Despite the very volatility of the President himself, the VIX remains at its lowests.
Many investors explain away this divergence by citing strong U.S. corporate earnings, low unemployment, and rising inflation. Key macro indicators that are showing strength are essentially pushing away any concern for the policies of the current administration, or elsewhere in the world.
The Federal Reserve's suppression of VIX
End of explanation
"""
bondvol = pd.read_excel('MOVE Index.xlsx')
currvol = pd.read_excel('TYVIX Index.xlsx')
# cleaning dataset
bondvol.columns = bondvol.iloc[0]
bondvol = bondvol.set_index(['Date'])
bondvol = bondvol[1:]
bondvol = bondvol.rename(columns={'PX_LAST': 'Treasury Vol Index'})
currvol.columns = currvol.iloc[0]
currvol = currvol.set_index(['Date'])
currvol = currvol[1:]
currvol = currvol.rename(columns={'PX_LAST': 'Currency Vol Index'})
# merging with vix (equity vol)
vix = vix.rename(columns={'Close': 'VIX'})
marketvol = pd.merge(vix, currvol,
how='left',
right_index=True,
left_index=True,
)
marketvol = pd.merge(marketvol, bondvol,
how='left',
right_index=True,
left_index=True,
)
marketvol.head()
# narrowing the data to this year
today = dt.date.today()
marketvol = marketvol.loc['2017-01-01':today,
['VIX', 'Currency Vol Index',
'Treasury Vol Index']
]
marketvol.head()
# creating fig and ax, plotting objects
fig,ax1 = plt.subplots(figsize=(8,5))
sns.set_style('whitegrid')
ax2 = ax1.twinx()
a = ax1.plot(marketvol['VIX'], color='orange', label='VIX')
b = ax2.plot(marketvol['Treasury Vol Index'],
color='purple',
label='Treasury Vol Index')
c = ax1.plot(marketvol['Currency Vol Index'],
color='cyan',
label='Currency Vol Index')
# titling and formating
ax1.set_ylabel('VIX & Currency Vol Index')
ax2.set_ylabel('Treasury Vol Index', color='purple')
fig.suptitle('Volatility falling across all assets')
ax2.grid(False)
ax1.tick_params(axis='x', labelsize=8)
# adding lines on different axes into one legend
line = a + b + c
label = [l.get_label() for l in line]
ax1.legend(line, label, loc='upper center')
plt.show()
"""
Explanation: Investors commonly use implied volatility as shown in the VIX to measure uncertainty about interest rates, and specifically in this case, the implied federal funds target rate. Typically, when the implied federal funds target rate is rising, signaling strong inflation and growth, the VIX remains at its lows.
In monetary policy, the Fed has, since 2008, kept rates low to encourage investment. However, its recent support of higher benchmark rates has increased the implied fed fund rate, as many Fed officials believe the U.S. economy is on a growth path despite signs of weakness in consumer spending and wage growth. That message has had the effect of subduing levels of uncertainty in VIX, towards the latter half of 2016 to today.
VIX beyond Equities
End of explanation
"""
sp_fut = pd.read_excel('S&P E-Mini Futures.xlsx')
# cleaning dataset
sp_fut.columns = sp_fut.iloc[0]
sp_fut = sp_fut.set_index(['Date'])
sp_fut = sp_fut[1:]
sp_fut = sp_fut.rename(columns={'PX_BID': 'E-Mini Bid',
'PX_ASK': 'E-Mini Ask'})
# new column - bid-ask spread
title = 'S&P500 E-Mini Fut Bid-Ask Spread'
sp_fut[title] = sp_fut['E-Mini Ask'] - sp_fut['E-Mini Bid']
sp_fut.head()
# resampling by month and taking the average
sp_fut.index = pd.to_datetime(sp_fut.index)
sp_fut_resample = sp_fut.resample('MS').sum()
sp_fut_count = sp_fut.resample('MS').count()
sp_fut_resample[title] = sp_fut_resample[title] / sp_fut_count[title] # mean
# narrowing the data to this year
today = dt.date.today()
vix2 = vix.loc['2007-01-01':today, ['VIX']]
sp_fut_resample = sp_fut_resample.loc['2007-01-01':today, [title]]
sp_fut_resample.head()
# creating fig and ax, plotting objects
fig,ax1 = plt.subplots(figsize=(8,5))
sns.set_style('whitegrid')
ax2 = ax1.twinx()
a = ax1.plot(vix2['VIX'], color='orange', label='VIX')
b = ax2.plot(sp_fut_resample[title],
color='blue',
label=title)
# titling and formating
ax1.set_ylabel('VIX', color='orange')
ax2.set_ylabel(title, color='blue')
fig.suptitle('Market Depth reaching Recession levels')
ax2.grid(False)
# adding lines on different axes into one legend
line = a + b
label = [l.get_label() for l in line]
ax1.legend(line, label, loc='upper center')
plt.show()
"""
Explanation: What is concerning is the inconsistency between the uncertainty we observe on social media or news sites and the low levels of uncertainty in recent months, expressed by the volatility indexes above. The Fed even took this into consideration in their meeting from April, expressing their confusion as to why implied volatility has reached decade-long lows, despite the inaction we see from policy makers on key legislation such as Trump's tax reform and infrastructure program.
VIX Reliability Concerns
Investors commonly debate over whether VIX is a proper metric for volatility. In this section, we'll examine one of the main concerns about VIX's reliability: the erosion of demand for S&P 500 options as an insurance against instability.
End of explanation
"""
|
networks-lab/mkD3 | .ipynb_checkpoints/INTEG 120-checkpoint.ipynb | gpl-3.0 | # Only run this the VERY first time
!pip install metaknowledge
!pip install networkx
!pip install pandas
!pip install python-louvain
# Run this before you do anything else
import metaknowledge as mk
import networkx as nx
import pandas
import community
import webbrowser
"""
Explanation: <center> <img src="http://networkslab.org/metaknowledge/images/site-logo.png" alt="Drawing" style="width: 100px; margin: auto"/> <center>
<center> metaknowledge </center>
<center>NetLab, University of Waterloo</center>
<center>Reid McIlroy-Young, John McLevey, and Jillian Anderson</center>
My Outline
Introduction
What the purpose of this notebook is. What can it be used for.
Where does all the stuff need to be placed?
Install packages??
Import packages
Networks
Set variables
Do the backend processing
Make network
Add centrality measures
Give them the ability to filter? (Advanced feature)
Make the file
Display the file
RPYS
Set variables
Standard
Multi
Getting Set Up
The very first time you use this jupyter notebook you will need to run the cell directly below. Do not run the cell the next time you use this jupyter notebook. If you do, nothing bad will happen, it just isn't neccessary.
End of explanation
"""
inputFile = "/Users/jilliananderson/Desktop/mkD3Test/pos.txt"
networkType = "CoCitation"
nodeType = "author"
"""
Explanation: Networks
Define Variables
Next, we need to define some variables:
- filepath should be set as the filepath to your isi file.
- networkType should be "CoCitation", "CoAuthor", or "Citation".
- nodeType must be set to one of "full", "original", "author", "journal", or "year".
End of explanation
"""
# This cell creates the network based on
# the variables you provided above.
RC = mk.RecordCollection(inputFile)
if networkType == "CoCitation":
net = RC.networkCoCitation(nodeType = nodeType, coreOnly=True)
elif networkType == "CoAuthor":
net = RC.networkCoAuthor(coreOnly=True)
elif networkType == "Citation":
net = RC.networkCitation(nodeType=nodeType, coreOnly=True)
elif networkType == "BibCoupling":
net = RC.networkBibCoupling()
else:
print("Please ensure networkType has been set to one of the accepted values")
# This code detects communities and centrality
# measures for your network.
partition = community.best_partition(net)
# closeness = nx.closeness_centrality(net)
betweenness = nx.betweenness_centrality(net)
# eigenVect = nx.eigenvector_centrality(net)
for n in net.nodes():
comm = partition[n]
# clos = round(closeness[n], 3)
betw = round(betweenness[n], 3)
# eVct = round(eigenVect[n], 3)
net.add_node(n, community=comm, betweenness=betw)
# This code writes two .csv files to your computer.
# One is the edgeList and the other is the node Attribute file
mk.writeGraph(net, "myNet")
"""
Explanation: Make Network
End of explanation
"""
%%writefile network.html
<!DOCTYPE html>
<head>
<meta charset="utf-8">
<title>Title Here</title>
<link rel="stylesheet" href="http://networkslab.org/mkD3/styles.css">
<script src="https://d3js.org/d3.v4.js"></script>
<script src="http://networkslab.org/mkD3/mkd3.js"></script>
</head>
<body>
<script type = "text/javascript">
mkd3.networkGraph("myNet_edgeList.csv", "myNet_nodeAttributes.csv")
</script>
</body>
"""
Explanation: Writing the HTML file
To display our network, we need to make the file which displays it.
End of explanation
"""
url = 'http://localhost:8888/files/network.html'
webbrowser.open(url)
"""
Explanation: Display the Network
Running the next cell
End of explanation
"""
inputFile = "/Users/jilliananderson/Desktop/mkD3Test/pos.txt"
minYear = 1900
maxYear = 2016
rpysType = "StandardBar"
"""
Explanation: RPYS Visualization
End of explanation
"""
RC = mk.RecordCollection(inputFile)
rpys = RC.rpys(minYear=1900, maxYear=2016)
df = pandas.DataFrame.from_dict(rpys)
df.to_csv("standard_rpys.csv")
# Creating CitationFile
citations = RC.getCitations()
df = pandas.DataFrame.from_dict(citations)
df.to_csv("standard_citation.csv")
%%writefile standardBar.html
<!DOCTYPE html>
<head>
<meta charset="utf-8">
<title>Title Here</title>
<link rel="stylesheet" href="http://networkslab.org/mkD3/styles.css">
<script src="https://d3js.org/d3.v4.js"></script>
<script src="http://networkslab.org/mkD3/mkd3.js"></script>
</head>
<body>
<script type = "text/javascript">
mkd3.standardLine("standard_rpys.csv", "standard_citation.csv")
</script>
</body>
url = 'http://localhost:8888/files/standardBar.html'
webbrowser.open(url)
%%writefile standardLine.html
<!DOCTYPE html>
<head>
<meta charset="utf-8">
<title>Title Here</title>
<link rel="stylesheet" href="http://networkslab.org/mkD3/styles.css">
<script src="https://d3js.org/d3.v4.js"></script>
<script src="http://networkslab.org/mkD3/mkd3.js"></script>
</head>
<body>
<script type = "text/javascript">
mkd3.standardLine("standard_rpys.csv", "standard_citation.csv")
</script>
</body>
url = 'http://localhost:8888/files/standardLine.html'
webbrowser.open(url)
"""
Explanation: Standard RPYS
End of explanation
"""
years = range(minYear, maxYear+1)
RC = mk.RecordCollection(inputFile)
# ***************************
# Create the multiRPYS file
# ***************************
dictionary = {'CPY': [],
"abs_deviation": [],
"num_cites": [],
"rank": [],
"RPY": []}
for i in years:
try:
RCyear = RC.yearSplit(i, i)
if len(RCyear) > 0:
rpys = RCyear.rpys(minYear=1900, maxYear=maxYear)
length = len(rpys['year'])
rpys['CPY'] = [i]*length
dictionary['CPY'] += rpys['CPY']
dictionary['abs_deviation'] += rpys['abs-deviation']
dictionary['num_cites'] += rpys['count']
dictionary['rank'] += rpys['rank']
dictionary['RPY'] += rpys['year']
except:
pass
df = pandas.DataFrame.from_dict(dictionary)
df.to_csv("multi_rpys.csv")
# ***************************
# Create the citation file
# ***************************
dictionary = {"author": [],
"journal": [],
"num_cites": [],
"RPY": [],
"CPY": []}
for i in years:
try:
RCyear = RC.yearSplit(i, i)
if len(RCyear) > 0:
citations = RCyear.getCitations()
length = len(citations['year'])
citations['CPY'] = [i]*length
dictionary['CPY'] += citations['CPY']
dictionary['author'] += citations['author']
dictionary['journal'] += citations['journal']
dictionary['num_cites'] += citations['num-cites']
dictionary['RPY'] += citations['year']
except:
pass
df = pandas.DataFrame.from_dict(dictionary)
df.to_csv("multi_citation.csv")
%%writefile multiRPYS.html
<!DOCTYPE html>
<head>
<meta charset="utf-8">
<title>Title Here</title>
<link rel="stylesheet" href="http://networkslab.org/mkD3/styles.css">
<script src="https://d3js.org/d3.v4.js"></script>
<script src="http://networkslab.org/mkD3/mkd3.js"></script>
</head>
<body>
<script type = "text/javascript">
mkd3.multiRPYS("multi_rpys.csv", "multi_citation.csv")
</script>
</body>
url = 'http://localhost:8888/files/multiRPYS.html'
webbrowser.open(url)
"""
Explanation: Multi RPYS
End of explanation
"""
|
ledeprogram/algorithms | class7/homework/wang_zhizhou_7.ipynb | gpl-3.0 | import pandas as pd
%matplotlib inline
from sklearn import datasets
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import tree
iris = datasets.load_iris()
x = iris.data[:,2:]
y = iris.target
plt.figure(2, figsize=(8, 6))
plt.scatter(x[:, 0], x[:, 1], c=y, cmap=plt.cm.CMRmap)
plt.xlabel('Petal length (cm)')
plt.ylabel('Petal width (cm)')
dt = tree.DecisionTreeClassifier()
dt = dt.fit(x,y)
from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.5,train_size=0.5)
dt = dt.fit(x_train,y_train)
from sklearn import metrics
import numpy as np
def measure_performance(X,y,clf, show_accuracy=True, show_classification_report=True, show_confussion_matrix=True):
y_pred=clf.predict(X)
if show_accuracy:
print("Accuracy:{0:.5f}".format(metrics.accuracy_score(y, y_pred)),"\n")
if show_classification_report:
print("Classification report")
print(metrics.classification_report(y,y_pred),"\n")
if show_confussion_matrix:
print("Confusion matrix")
print(metrics.confusion_matrix(y,y_pred),"\n")
measure_performance(x_test,y_test,dt)
"""
Explanation: We covered a lot of information today and I'd like you to practice developing classification trees on your own. For each exercise, work through the problem, determine the result, and provide the requested interpretation in comments along with the code. The point is to build classifiers, not necessarily good classifiers (that will hopefully come later)
1. Load the iris dataset and create a holdout set that is 50% of the data (50% in training and 50% in test). Output the results (don't worry about creating the tree visual unless you'd like to) and discuss them briefly (are they good or not?)
End of explanation
"""
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.25,train_size=0.75)
dt = dt.fit(x_train,y_train)
def measure_performance(X,y,clf, show_accuracy=True, show_classification_report=True, show_confussion_matrix=True):
y_pred=clf.predict(X)
if show_accuracy:
print("Accuracy:{0:.75f}".format(metrics.accuracy_score(y, y_pred)),"\n")
if show_classification_report:
print("Classification report")
print(metrics.classification_report(y,y_pred),"\n")
if show_confussion_matrix:
print("Confusion matrix")
print(metrics.confusion_matrix(y,y_pred),"\n")
measure_performance(x_test,y_test,dt)
"""
Explanation: 2. Redo the model with a 75% - 25% training/test split and compare the results. Are they better or worse than before? Discuss why this may be.
End of explanation
"""
breast = datasets.load_breast_cancer()
breast.data
x = breast.data[:,:] # the attributes
y = breast.target
plt.figure(2, figsize=(8, 6))
plt.scatter(x[:, 0], x[:, 1], c=y, cmap=plt.cm.CMRmap)
"""
Explanation: 3. Load the breast cancer dataset (datasets.load_breast_cancer()) and perform basic exploratory analysis. What attributes to we have? What are we trying to predict?
For context of the data, see the documentation here: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29
End of explanation
"""
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.5,train_size=0.5)
dt = dt.fit(x_train,y_train)
def measure_performance(X,y,clf, show_accuracy=True, show_classification_report=True, show_confussion_matrix=True):
y_pred=clf.predict(X)
if show_accuracy:
print("Accuracy:{0:.5f}".format(metrics.accuracy_score(y, y_pred)),"\n")
if show_classification_report:
print("Classification report")
print(metrics.classification_report(y,y_pred),"\n")
if show_confussion_matrix:
print("Confusion matrix")
print(metrics.confusion_matrix(y,y_pred),"\n")
measure_performance(x_test,y_test,dt)
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.25,train_size=0.75)
dt = dt.fit(x_train,y_train)
def measure_performance(X,y,clf, show_accuracy=True, show_classification_report=True, show_confussion_matrix=True):
y_pred=clf.predict(X)
if show_accuracy:
print("Accuracy:{0:.75f}".format(metrics.accuracy_score(y, y_pred)),"\n")
if show_classification_report:
print("Classification report")
print(metrics.classification_report(y,y_pred),"\n")
if show_confussion_matrix:
print("Confusion matrix")
print(metrics.confusion_matrix(y,y_pred),"\n")
measure_performance(x_test,y_test,dt)
"""
Explanation: 4. Using the breast cancer data, create a classifier to predict the type of seed. Perform the above hold out evaluation (50-50 and 75-25) and discuss the results.
End of explanation
"""
|
mne-tools/mne-tools.github.io | dev/_downloads/5bedf835c134d956a9b527dc8c5f488c/20_rejecting_bad_data.ipynb | bsd-3-clause | import os
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
events_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw-eve.fif')
events = mne.read_events(events_file)
"""
Explanation: Rejecting bad data spans and breaks
This tutorial covers:
manual marking of bad spans of data,
automated rejection of data spans based on signal amplitude, and
automated detection of breaks during an experiment.
We begin as always by importing the necessary Python modules and loading some
example data <sample-dataset>; to save memory we'll use a pre-filtered
and downsampled version of the example data, and we'll also load an events
array to use when converting the continuous data to epochs:
End of explanation
"""
fig = raw.plot()
fig.fake_keypress('a') # Simulates user pressing 'a' on the keyboard.
"""
Explanation: Annotating bad spans of data
The tutorial tut-events-vs-annotations describes how
:class:~mne.Annotations can be read from embedded events in the raw
recording file, and tut-annotate-raw describes in detail how to
interactively annotate a :class:~mne.io.Raw data object. Here, we focus on
best practices for annotating bad data spans so that they will be excluded
from your analysis pipeline.
The reject_by_annotation parameter
In the interactive raw.plot() window, the annotation controls can be
opened by pressing :kbd:a. Here, new annotation labels can be created or
existing annotation labels can be selected for use.
End of explanation
"""
eog_events = mne.preprocessing.find_eog_events(raw)
onsets = eog_events[:, 0] / raw.info['sfreq'] - 0.25
durations = [0.5] * len(eog_events)
descriptions = ['bad blink'] * len(eog_events)
blink_annot = mne.Annotations(onsets, durations, descriptions,
orig_time=raw.info['meas_date'])
raw.set_annotations(blink_annot)
"""
Explanation: You can see that you need to add a description first to start with
marking spans (Push the button "Add Description" and enter the description).
You can use any description you like, but annotations marking spans that
should be excluded from the analysis pipeline should all begin with "BAD" or
"bad" (e.g., "bad_cough", "bad-eyes-closed", "bad door slamming", etc). When
this practice is followed, many processing steps in MNE-Python will
automatically exclude the "bad"-labelled spans of data; this behavior is
controlled by a parameter reject_by_annotation that can be found in many
MNE-Python functions or class constructors, including:
creation of epoched data from continuous data (:class:mne.Epochs)
many methods of the independent components analysis class
(:class:mne.preprocessing.ICA)
functions for finding heartbeat and blink artifacts
(:func:~mne.preprocessing.find_ecg_events,
:func:~mne.preprocessing.find_eog_events)
covariance computations (:func:mne.compute_raw_covariance)
power spectral density computation (:meth:mne.io.Raw.plot_psd,
:func:mne.time_frequency.psd_welch)
For example, when creating epochs from continuous data, if
reject_by_annotation=True the :class:~mne.Epochs constructor will drop
any epoch that partially or fully overlaps with an annotated span that begins
with "bad".
Generating annotations programmatically
The tut-artifact-overview tutorial introduced the artifact detection
functions :func:~mne.preprocessing.find_eog_events and
:func:~mne.preprocessing.find_ecg_events (although that tutorial mostly
relied on their higher-level wrappers
:func:~mne.preprocessing.create_eog_epochs and
:func:~mne.preprocessing.create_ecg_epochs). Here, for demonstration
purposes, we make use of the lower-level artifact detection function to get
an events array telling us where the blinks are, then automatically add
"bad_blink" annotations around them (this is not necessary when using
:func:~mne.preprocessing.create_eog_epochs, it is done here just to show
how annotations are added non-interactively). We'll start the annotations
250 ms before the blink and end them 250 ms after it:
End of explanation
"""
eeg_picks = mne.pick_types(raw.info, meg=False, eeg=True)
raw.plot(events=eog_events, order=eeg_picks)
"""
Explanation: Now we can confirm that the annotations are centered on the EOG events. Since
blinks are usually easiest to see in the EEG channels, we'll only plot EEG
here:
End of explanation
"""
onsets = [
raw.first_time + 30,
raw.first_time + 180
]
durations = [60, 60]
descriptions = ['block_1', 'block_2']
block_annots = mne.Annotations(onset=onsets,
duration=durations,
description=descriptions,
orig_time=raw.info['meas_date'])
raw.set_annotations(raw.annotations + block_annots) # add to existing
raw.plot()
"""
Explanation: See the section tut-section-programmatic-annotations for more details
on creating annotations programmatically.
Detecting and annotating breaks
Another useful function, albeit not related to artifact detection per se,
is mne.preprocessing.annotate_break: It will generate annotations for
segments of the data where no existing annotations (or, alternatively:
events) can be found. It can therefore be used to automatically detect and
mark breaks, e.g. between experimental blocks, when recording continued.
For the sake of this example, let's assume an experiment consisting of two
blocks, the first one stretching from 30 to 90, and the second from 120 to
180 seconds. We'll mark these blocks by annotations, and then use
mne.preprocessing.annotate_break to detect and annotate any breaks.
<div class="alert alert-info"><h4>Note</h4><p>We need to take ``raw.first_time`` into account, otherwise the
onsets will be incorrect!</p></div>
End of explanation
"""
break_annots = mne.preprocessing.annotate_break(
raw=raw,
min_break_duration=20, # consider segments of at least 20 s duration
t_start_after_previous=5, # start annotation 5 s after end of previous one
t_stop_before_next=2 # stop annotation 2 s before beginning of next one
)
raw.set_annotations(raw.annotations + break_annots) # add to existing
raw.plot()
"""
Explanation: Now detect break periods. We can control how far the break annotations shall
expand toward both ends of each break.
End of explanation
"""
# only keep some button press events (code 32) for this demonstration
events_subset = events[events[:, -1] == 32]
# drop the first and last few events
events_subset = events_subset[3:-3]
break_annots = mne.preprocessing.annotate_break(
raw=raw,
events=events_subset, # passing events will ignore existing annotations
min_break_duration=25 # pick a longer break duration this time
)
# replace existing annotations (otherwise it becomes difficult to see any
# effects in the plot!)
raw.set_annotations(break_annots)
raw.plot(events=events_subset)
"""
Explanation: You can see that 3 segments have been annotated as BAD_break:
the first one starting with the beginning of the recording and ending 2
seconds before the beginning of block 1 (due to t_stop_before_next=2),
the second one starting 5 seconds after block 1 has ended, and ending 2
seconds before the beginning of block 2 (t_start_after_previous=5,
t_stop_before_next=2),
and the last one starting 5 seconds after block 2 has ended
(t_start_after_previous=5) and continuing until the end of the
recording.
You can also see that only the block_1 and block_2 annotations
were considered in the detection of the break periods – the EOG annotations
were simply ignored. This is because, by default,
~mne.preprocessing.annotate_break ignores all annotations starting with
'bad'. You can control this behavior via the ignore parameter.
It is also possible to perform break period detection based on an array
of events: simply pass the array via the events parameter. Existing
annotations in the raw data will be ignored in this case:
End of explanation
"""
reject_criteria = dict(mag=3000e-15, # 3000 fT
grad=3000e-13, # 3000 fT/cm
eeg=100e-6, # 100 µV
eog=200e-6) # 200 µV
flat_criteria = dict(mag=1e-15, # 1 fT
grad=1e-13, # 1 fT/cm
eeg=1e-6) # 1 µV
"""
Explanation: Rejecting Epochs based on channel amplitude
Besides "bad" annotations, the :class:mne.Epochs class constructor has
another means of rejecting epochs, based on signal amplitude thresholds for
each channel type. In the overview tutorial
<tut-section-overview-epoching> we saw an example of this: setting maximum
acceptable peak-to-peak amplitudes for each channel type in an epoch, using
the reject parameter. There is also a related parameter, flat, that
can be used to set minimum acceptable peak-to-peak amplitudes for each
channel type in an epoch:
End of explanation
"""
raw.set_annotations(blink_annot) # restore the EOG annotations
epochs = mne.Epochs(raw, events, tmin=-0.2, tmax=0.5, reject_tmax=0,
reject=reject_criteria, flat=flat_criteria,
reject_by_annotation=False, preload=True)
epochs.plot_drop_log()
"""
Explanation: The values that are appropriate are dataset- and hardware-dependent, so some
trial-and-error may be necessary to find the correct balance between data
quality and loss of power due to too many dropped epochs. Here, we've set the
rejection criteria to be fairly stringent, for illustration purposes.
Two additional parameters, reject_tmin and reject_tmax, are used to
set the temporal window in which to calculate peak-to-peak amplitude for the
purposes of epoch rejection. These default to the same tmin and tmax
of the entire epoch. As one example, if you wanted to only apply the
rejection thresholds to the portion of the epoch that occurs before the
event marker around which the epoch is created, you could set
reject_tmax=0. A summary of the causes of rejected epochs can be
generated with the :meth:~mne.Epochs.plot_drop_log method:
End of explanation
"""
epochs = mne.Epochs(raw, events, tmin=-0.2, tmax=0.5, reject_tmax=0,
reject=reject_criteria, flat=flat_criteria, preload=True)
epochs.plot_drop_log()
"""
Explanation: Notice that we've passed reject_by_annotation=False above, in order to
isolate the effects of the rejection thresholds. If we re-run the epoching
with reject_by_annotation=True (the default) we see that the rejections
due to EEG and EOG channels have disappeared (suggesting that those channel
fluctuations were probably blink-related, and were subsumed by rejections
based on the "bad blink" label).
End of explanation
"""
print(epochs.drop_log)
"""
Explanation: More importantly, note that many more epochs are rejected (~20% instead of
~2.5%) when rejecting based on the blink labels, underscoring why it is
usually desirable to repair artifacts rather than exclude them.
The :meth:~mne.Epochs.plot_drop_log method is a visualization of an
:class:~mne.Epochs attribute, namely epochs.drop_log, which stores
empty lists for retained epochs and lists of strings for dropped epochs, with
the strings indicating the reason(s) why the epoch was dropped. For example:
End of explanation
"""
epochs.drop_bad()
"""
Explanation: Finally, it should be noted that "dropped" epochs are not necessarily deleted
from the :class:~mne.Epochs object right away. Above, we forced the
dropping to happen when we created the :class:~mne.Epochs object by using
the preload=True parameter. If we had not done that, the
:class:~mne.Epochs object would have been memory-mapped_ (not loaded into
RAM), in which case the criteria for dropping epochs are stored, and the
actual dropping happens when the :class:~mne.Epochs data are finally loaded
and used. There are several ways this can get triggered, such as:
explicitly loading the data into RAM with the :meth:~mne.Epochs.load_data
method
plotting the data (:meth:~mne.Epochs.plot,
:meth:~mne.Epochs.plot_image, etc)
using the :meth:~mne.Epochs.average method to create an
:class:~mne.Evoked object
You can also trigger dropping with the :meth:~mne.Epochs.drop_bad method;
if reject and/or flat criteria have already been provided to the
epochs constructor, :meth:~mne.Epochs.drop_bad can be used without
arguments to simply delete the epochs already marked for removal (if the
epochs have already been dropped, nothing further will happen):
End of explanation
"""
stronger_reject_criteria = dict(mag=2000e-15, # 2000 fT
grad=2000e-13, # 2000 fT/cm
eeg=100e-6, # 100 µV
eog=100e-6) # 100 µV
epochs.drop_bad(reject=stronger_reject_criteria)
print(epochs.drop_log)
"""
Explanation: Alternatively, if rejection thresholds were not originally given to the
:class:~mne.Epochs constructor, they can be passed to
:meth:~mne.Epochs.drop_bad later instead; this can also be a way of
imposing progressively more stringent rejection criteria:
End of explanation
"""
|
miqlar/PyFME | examples/examples-notebook/example_001.ipynb | mit | # -*- coding: utf-8 -*-
"""
Explanation: EXAMPLE 001
This is the first example o PyFME. The main purpose of this example is to check if the aircraft trimmed in a given state maintains the trimmed flight condition.
The aircraft used is a Cessna 310, ISA1976 integrated with Flat Earth (euler angles).
Example with trimmed aircraft: stationary, horizontal, symmetric, wings level flight.
End of explanation
"""
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
"""
Explanation: Import python libreries needed.
End of explanation
"""
from pyfme.aircrafts import Cessna310
from pyfme.environment.environment import Environment
from pyfme.environment.atmosphere import ISA1976
from pyfme.environment.gravity import VerticalConstant
from pyfme.environment.wind import NoWind
from pyfme.models.systems import EulerFlatEarth
from pyfme.simulator import BatchSimulation
from pyfme.utils.trimmer import steady_state_flight_trimmer
"""
Explanation: Import PyFME classes
End of explanation
"""
aircraft = Cessna310()
atmosphere = ISA1976()
gravity = VerticalConstant()
wind = NoWind()
environment = Environment(atmosphere, gravity, wind)
"""
Explanation: Initialize variables
End of explanation
"""
TAS = 312.5 * 0.3048 # m/s
h0 = 8000 * 0.3048 # m
psi0 = 1.0 # rad
x0, y0 = 0, 0 # m
turn_rate = 0.0 # rad/s
gamma0 = 0.0 # rad
system = EulerFlatEarth(lat=0, lon=0, h=h0, psi=psi0, x_earth=x0, y_earth=y0)
not_trimmed_controls = {'delta_elevator': 0.05,
'hor_tail_incidence': 0.00,
'delta_aileron': 0.01 * np.sign(turn_rate),
'delta_rudder': 0.01 * np.sign(turn_rate),
'delta_t': 0.5}
controls2trim = ['delta_elevator', 'delta_aileron', 'delta_rudder', 'delta_t']
trimmed_ac, trimmed_sys, trimmed_env, results = steady_state_flight_trimmer(
aircraft, system, environment, TAS=TAS, controls_0=not_trimmed_controls,
controls2trim=controls2trim, gamma=gamma0, turn_rate=turn_rate, verbose=2)
print(results)
my_simulation = BatchSimulation(trimmed_ac, trimmed_sys, trimmed_env)
tfin = 150 # seconds
N = tfin * 100 + 1
time = np.linspace(0, tfin, N)
initial_controls = trimmed_ac.controls
controls = {}
for control_name, control_value in initial_controls.items():
controls[control_name] = np.ones_like(time) * control_value
my_simulation.set_controls(time, controls)
par_list = ['x_earth', 'y_earth', 'height',
'psi', 'theta', 'phi',
'u', 'v', 'w',
'v_north', 'v_east', 'v_down',
'p', 'q', 'r',
'alpha', 'beta', 'TAS',
'F_xb', 'F_yb', 'F_zb',
'M_xb', 'M_yb', 'M_zb']
my_simulation.set_par_dict(par_list)
my_simulation.run_simulation()
plt.style.use('ggplot')
for ii in range(len(par_list) // 3):
three_params = par_list[3*ii:3*ii+3]
fig, ax = plt.subplots(3, 1, sharex=True)
for jj, par in enumerate(three_params):
ax[jj].plot(time, my_simulation.par_dict[par])
ax[jj].set_ylabel(par)
ax[jj].set_xlabel('time (s)')
fig.tight_layout()
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(my_simulation.par_dict['x_earth'],
my_simulation.par_dict['y_earth'],
my_simulation.par_dict['height'])
ax.plot(my_simulation.par_dict['x_earth'],
my_simulation.par_dict['y_earth'],
my_simulation.par_dict['height'] * 0)
ax.set_xlabel('x_earth')
ax.set_ylabel('y_earth')
ax.set_zlabel('z_earth')
plt.show()
"""
Explanation: Initial conditions
End of explanation
"""
|
DTOcean/dtocean-core | notebooks/DTOcean Tidal Hydrodynamics + Database Example.ipynb | gpl-3.0 | %matplotlib inline
from IPython.display import display, HTML
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (14.0, 8.0)
import numpy as np
from dtocean_core import start_logging
from dtocean_core.core import Core
from dtocean_core.menu import DataMenu, ModuleMenu, ProjectMenu
from dtocean_core.pipeline import Tree
def html_list(x):
message = "<ul>"
for name in x:
message += "<li>{}</li>".format(name)
message += "</ul>"
return message
def html_dict(x):
message = "<ul>"
for name, status in x.iteritems():
message += "<li>{}: <b>{}</b></li>".format(name, status)
message += "</ul>"
return message
# Bring up the logger
start_logging()
"""
Explanation: DTOcean Tidal Hydrodynamics + Database Example
Note, this example assumes the DTOcean database and the Hydroynamics Module has been installed
End of explanation
"""
new_core = Core()
data_menu = DataMenu()
project_menu = ProjectMenu()
module_menu = ModuleMenu()
pipe_tree = Tree()
"""
Explanation: Create the core, menus and pipeline tree
The core object carrys all the system information and is operated on by the other classes
End of explanation
"""
project_title = "DTOcean"
new_project = project_menu.new_project(new_core, project_title)
"""
Explanation: Create a new project
End of explanation
"""
credentials = {"host": "localhost",
"dbname": "dtocean_examples",
"user": "dtocean_user",
"pwd": "Lipicana19"} # Fill in password
data_menu.select_database(new_project, credentials=credentials)
"""
Explanation: Connect a database
End of explanation
"""
options_branch = pipe_tree.get_branch(new_core, new_project, "System Type Selection")
variable_id = "device.system_type"
my_var = options_branch.get_input_variable(new_core, new_project, variable_id)
my_var.set_raw_interface(new_core, "Tidal Fixed")
my_var.read(new_core, new_project)
"""
Explanation: Set the device type
End of explanation
"""
project_menu.initiate_pipeline(new_core, new_project)
"""
Explanation: Initiate the pipeline
This step will be important when the database is incorporated into the system as it will effect the operation of the pipeline.
End of explanation
"""
project_menu.initiate_options(new_core, new_project)
options_branch = pipe_tree.get_branch(new_core, new_project, 'Site and System Options')
options_branch.read_auto(new_core, new_project)
input_status = options_branch.get_input_status(new_core, new_project)
message = html_dict(input_status)
HTML(message)
"""
Explanation: Retrieve the available site and technology options from the DB
End of explanation
"""
my_var = options_branch.get_output_variable(new_core, new_project, "device.available_names")
site_list = my_var.get_value(new_core, new_project)
msg = html_list(site_list)
HTML(msg)
"""
Explanation: Check available device names
End of explanation
"""
filter_branch = pipe_tree.get_branch(new_core, new_project, 'Database Filtering Interface')
new_var = filter_branch.get_input_variable(new_core, new_project,
"device.selected_name")
new_var.set_raw_interface(new_core, "Example Tidal Device")
new_var.read(new_core, new_project)
input_status = filter_branch.get_input_status(new_core, new_project)
message = html_dict(input_status)
HTML(message)
"""
Explanation: Select a device
End of explanation
"""
available_locations = options_branch.get_output_variable(new_core, new_project,
"site.available_names")
message = html_list(available_locations.get_value(new_core, new_project))
message = "<h3>Available Locations</h3>" + message
HTML(message)
"""
Explanation: Check the available site names
End of explanation
"""
new_var = filter_branch.get_input_variable(new_core, new_project,
"site.selected_name")
new_var.set_raw_interface(new_core, "Example Tidal Site")
new_var.read(new_core, new_project)
input_status = filter_branch.get_input_status(new_core, new_project)
message = html_dict(input_status)
HTML(message)
"""
Explanation: Add the site location
End of explanation
"""
project_menu.initiate_bathymetry(new_core, new_project)
"""
Explanation: Collect the bathymetric data
The lease area polygon may be editied following this step.
End of explanation
"""
project_menu.initiate_filter(new_core, new_project)
"""
Explanation: Filter the database
End of explanation
"""
names = module_menu.get_available(new_core, new_project)
message = html_list(names)
HTML(message)
"""
Explanation: Discover available modules
End of explanation
"""
module_name = 'Hydrodynamics'
module_menu.activate(new_core, new_project, module_name)
hydro_branch = pipe_tree.get_branch(new_core, new_project, 'Hydrodynamics')
"""
Explanation: Activate a module
Note that the order of activation is important and that we can't deactivate yet!
End of explanation
"""
input_status = hydro_branch.get_input_status(new_core, new_project)
message = html_dict(input_status)
HTML(message)
"""
Explanation: Check the status of the module inputs
End of explanation
"""
project_menu.initiate_dataflow(new_core, new_project)
"""
Explanation: Initiate the dataflow
This indicates that the filtering and module / theme selections are complete
End of explanation
"""
new_core.inspect_level(new_project, "modules initial")
new_core.reset_level(new_project, preserve_level=True)
"""
Explanation: Move the system to the post-filter state and ready the system
End of explanation
"""
hydro_branch.read_auto(new_core, new_project)
input_status = hydro_branch.get_input_status(new_core, new_project)
message = html_dict(input_status)
HTML(message)
"""
Explanation: Get data using DTOcean DB
Using the auto_connect method on a branch, we can pull available variables from the database
End of explanation
"""
new_var = hydro_branch.get_input_variable(new_core,
new_project,
'device.turbine_performance')
new_var.plot(new_core, new_project)
"""
Explanation: Auto plot a variable
End of explanation
"""
plots = new_var.get_available_plots(new_core, new_project)
msg = html_list(plots)
HTML(msg)
"""
Explanation: Look for other available plots
End of explanation
"""
new_var.plot(new_core, new_project, 'Tidal Power Performance')
"""
Explanation: Plot a specific plot
End of explanation
"""
can_execute = module_menu.is_executable(new_core, new_project, module_name)
display(can_execute)
input_status = hydro_branch.get_input_status(new_core, new_project)
message = html_dict(input_status)
HTML(message)
"""
Explanation: Check if the module can be executed
End of explanation
"""
power_bin_width = hydro_branch.get_input_variable(new_core, new_project, "options.power_bin_width")
power_bin_width.set_raw_interface(new_core, 0.04)
power_bin_width.read(new_core, new_project)
rated_power = hydro_branch.get_input_variable(new_core, new_project, "project.rated_power")
rated_power.set_raw_interface(new_core, 10.)
rated_power.read(new_core, new_project)
tidal_occurrence_nbins = hydro_branch.get_input_variable(new_core, new_project, "project.tidal_occurrence_nbins")
tidal_occurrence_nbins.set_raw_interface(new_core, 12)
tidal_occurrence_nbins.read(new_core, new_project)
user_array_option = hydro_branch.get_input_variable(new_core, new_project, "options.user_array_option")
user_array_option.set_raw_interface(new_core, "Rectangular")
user_array_option.read(new_core, new_project)
optimisation_threshold = hydro_branch.get_input_variable(new_core, new_project, "options.optimisation_threshold")
optimisation_threshold.set_raw_interface(new_core, 0.9)
optimisation_threshold.read(new_core, new_project)
"""
Explanation: Enter user options and project data
End of explanation
"""
module_menu.execute_current(new_core, new_project)
"""
Explanation: Execute the current module
The "current" module refers to the next module to be executed in the chain (pipeline) of modules. This command will only execute that module and another will be used for executing all of the modules at once.
Note, any data supplied by the module will be automatically copied into the active data state.
End of explanation
"""
n_devices = new_core.get_data_value(new_project, "project.number_of_devices")
meta = new_core.get_metadata("project.number_of_devices")
name = meta.title
message_one = "<p><b>{}:</b> {}</p>".format(name, n_devices)
farm_annual_energy = new_core.get_data_value(new_project, "project.annual_energy")
meta = new_core.get_metadata("project.annual_energy")
name = meta.title
value = farm_annual_energy
units = meta.units[0]
message_two = "<p><b>{}:</b> <i>{}</i> ({})</p>".format(name, value, units)
HTML(message_one + message_two)
"""
Explanation: Examine the results
Currently, there is no robustness built into the core, so the assumption is that the module executed successfully. This will have to be improved towards deployment of the final software.
Let's check the number of devices and annual output of the farm, using just information in the data object.
End of explanation
"""
mean_power_per_dev_value = new_core.get_data_value(new_project,
"project.mean_power_per_device")
meta = new_core.get_metadata("project.mean_power_per_device")
chart_values = np.array(mean_power_per_dev_value.values())
plt.bar(range(len(mean_power_per_dev_value)),
chart_values,
align='center')
plt.xticks(range(len(mean_power_per_dev_value)),
mean_power_per_dev_value.keys())
plt.title(meta.title)
plt.ylabel(meta.units[0])
plt.tight_layout()
# plt.savefig('annual_power_per_device.png')
plt.show()
"""
Explanation: Plotting some graphs
By having data objects with set formats it should be possible to create automated plot generation. However, some plots may be too complex and some special cases may need defined.
End of explanation
"""
layout_value = new_core.get_data_value(new_project, "project.layout")
layout_meta = new_core.get_metadata("project.layout")
x = []
y = []
for coords in layout_value.itervalues():
x.append(coords.x)
y.append(coords.y)
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1, axisbg='lightskyblue')
ax1.plot(x,y,'k+', mew=2, markersize=10)
plt.title(layout_meta.title)
plt.axis('equal')
plt.show()
"""
Explanation: Plotting the Layout
This may require such a special case. It is not clear is a new data type is required or just special plots associated to variable IDs.
End of explanation
"""
|
citxx/sis-python | crash-course/strings.ipynb | mit | s1 = "Строки можно задавать в двойных кавычках"
s2 = 'А можно в одинарных'
print(s1, type(s1))
print(s2, type(s2))
"""
Explanation: <h1>Содержание<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Спецсимволы" data-toc-modified-id="Спецсимволы-1">Спецсимволы</a></span></li><li><span><a href="#Операции-со-строками" data-toc-modified-id="Операции-со-строками-2">Операции со строками</a></span><ul class="toc-item"><li><span><a href="#Сложение" data-toc-modified-id="Сложение-2.1">Сложение</a></span></li><li><span><a href="#Повторение" data-toc-modified-id="Повторение-2.2">Повторение</a></span></li><li><span><a href="#Индексация" data-toc-modified-id="Индексация-2.3">Индексация</a></span></li><li><span><a href="#Длина-строки" data-toc-modified-id="Длина-строки-2.4">Длина строки</a></span></li><li><span><a href="#Проверка-наличия-подстроки" data-toc-modified-id="Проверка-наличия-подстроки-2.5">Проверка наличия подстроки</a></span></li></ul></li><li><span><a href="#Кодировка-символов" data-toc-modified-id="Кодировка-символов-3">Кодировка символов</a></span><ul class="toc-item"><li><span><a href="#Код-по-символу" data-toc-modified-id="Код-по-символу-3.1">Код по символу</a></span></li><li><span><a href="#Символ-по-коду" data-toc-modified-id="Символ-по-коду-3.2">Символ по коду</a></span></li><li><span><a href="#ASCII" data-toc-modified-id="ASCII-3.3">ASCII</a></span></li></ul></li></ul></div>
Строки
Для представления строк в Python используется тип str (аналог std::string в С++ или String в Pascal).
End of explanation
"""
s = "Эта строка\nсостоит из двух строк"
print(s)
s = "А в этой\tстроке\nиспользуются\tсимволы табуляции"
print(s)
"""
Explanation: Спецсимволы
Для задания в строке особых символов (например переводов строк или табуляций) в Python используются специальный последовательности, вроде \n для перевода строки или \t для символа табуляции:
End of explanation
"""
s1 = "Это \"строка\" с кавычками."
s2 = 'И "это" тоже.'
s3 = 'С одинарными Кавычками \'\' всё работает также.'
print(s1, s2, s3)
print("Если надо задать обратный слэш \\, то его надо просто удвоить: '\\\\'")
"""
Explanation: Такой же синтаксис используетя для задания кавычек в строке:
End of explanation
"""
greeting = "Привет"
exclamation = "!!!"
print(greeting + exclamation)
"""
Explanation: Операции со строками
Сложение
Строки можно складывать. В этом случае они просто припишутся друг к другу. По-умному это называется конкатенацией.
End of explanation
"""
print("I will write in Python with style!\n" * 10)
print(3 * "Really\n")
"""
Explanation: Повторение
Можно умножать на целое число, чтобы повторить строку нужное число раз.
End of explanation
"""
s = "Это моя строка"
print(s[0], s[1], s[2])
"""
Explanation: Индексация
Получить символ на заданной позиции можно также, как и в C++ или Pascal. Индекасация начинается с 0.
End of explanation
"""
s = "Вы не можете изменить символы этой строки"
s[0] = "Т"
"""
Explanation: Но нельзя поменять отдельный символ. Это сделано для того, чтобы более логично и эффективно реализовать некоторые возможности Python.
End of explanation
"""
s = "Строка"
print(s[-1], "=", s[5])
print(s[-2], "=", s[4])
print(s[-3], "=", s[3])
print(s[-4], "=", s[2])
print(s[-5], "=", s[1])
print(s[-6], "=", s[0])
"""
Explanation: Перевод: ОшибкаТипа: объект 'str' не поддерживает присваивание элементов
Можно указывать отрицательные индексы, тогда нумерация происходит с конца.
End of explanation
"""
s = "Для получения длины используется функция len"
print(len(s))
"""
Explanation: Длина строки
End of explanation
"""
vowels = "аеёиоуыэюя"
c = "ы"
if c in vowels:
print(c, "- гласная")
else:
print(c, "- согласная")
s = "Python - лучший из неторопливых языков :)"
print("Python" in s)
print("C++" in s)
"""
Explanation: Проверка наличия подстроки
Проверить наличие или отсутствие в строке подстроки или символа можно с помощью операций in и not in.
End of explanation
"""
# Код любого символа можно получить с помощью функции ord
print(ord("a"))
# Можно пользоваться тем, что коды чисел, маленьких латинских букв и больших латинских букв идут подряд.
print("Цифры:", ord("0"), ord("1"), ord("2"), ord("3"), "...", ord("8"), ord("9"))
print("Маленькие буквы:", ord("a"), ord("b"), ord("c"), ord("d"), "...", ord("y"), ord("z"))
print("Большие буквы:", ord("A"), ord("B"), ord("C"), ord("D"), "...", ord("Y"), ord("Z"))
# Например, так можно получить номер буквы в алфавите
c = "g"
print(ord(c) - ord('a'))
"""
Explanation: Кодировка символов
В памяти компьютера каждый символ хранится как число. Соответствие между символом и числом называется кодировкой.
Самая простая кодировка для латинских букв, цифр и часто используемых символов — ASCII. Она задаёт коды (числа) для 128 символов и используется в Python для представления этих символов.
Код по символу
End of explanation
"""
# Для получение символа по коду используется функция chr
print(chr(100))
"""
Explanation: Символ по коду
End of explanation
"""
# Этот код выводит всю таблицу ASCII
for code in range(128):
print('chr(' + str(code) + ') =', repr(chr(code)))
"""
Explanation: ASCII
End of explanation
"""
|
zomansud/coursera | ml-classification/week-3/module-5-decision-tree-assignment-1-blank.ipynb | mit | import graphlab
graphlab.canvas.set_target('ipynb')
"""
Explanation: Identifying safe loans with decision trees
The LendingClub is a peer-to-peer leading company that directly connects borrowers and potential lenders/investors. In this notebook, you will build a classification model to predict whether or not a loan provided by LendingClub is likely to default.
In this notebook you will use data from the LendingClub to predict whether a loan will be paid off in full or the loan will be charged off and possibly go into default. In this assignment you will:
Use SFrames to do some feature engineering.
Train a decision-tree on the LendingClub dataset.
Visualize the tree.
Predict whether a loan will default along with prediction probabilities (on a validation set).
Train a complex tree model and compare it to simple tree model.
Let's get started!
Fire up Graphlab Create
Make sure you have the latest version of GraphLab Create. If you don't find the decision tree module, then you would need to upgrade GraphLab Create using
pip install graphlab-create --upgrade
End of explanation
"""
loans = graphlab.SFrame('lending-club-data.gl/')
loans.head()
"""
Explanation: Load LendingClub dataset
We will be using a dataset from the LendingClub. A parsed and cleaned form of the dataset is availiable here. Make sure you download the dataset before running the following command.
End of explanation
"""
loans.column_names()
"""
Explanation: Exploring some features
Let's quickly explore what the dataset looks like. First, let's print out the column names to see what features we have in this dataset.
End of explanation
"""
loans['grade'].show()
"""
Explanation: Here, we see that we have some feature columns that have to do with grade of the loan, annual income, home ownership status, etc. Let's take a look at the distribution of loan grades in the dataset.
End of explanation
"""
loans['home_ownership'].show()
"""
Explanation: We can see that over half of the loan grades are assigned values B or C. Each loan is assigned one of these grades, along with a more finely discretized feature called sub_grade (feel free to explore that feature column as well!). These values depend on the loan application and credit report, and determine the interest rate of the loan. More information can be found here.
Now, let's look at a different feature.
End of explanation
"""
# safe_loans = 1 => safe
# safe_loans = -1 => risky
loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1)
loans = loans.remove_column('bad_loans')
"""
Explanation: This feature describes whether the loanee is mortaging, renting, or owns a home. We can see that a small percentage of the loanees own a home.
Exploring the target column
The target column (label column) of the dataset that we are interested in is called bad_loans. In this column 1 means a risky (bad) loan 0 means a safe loan.
In order to make this more intuitive and consistent with the lectures, we reassign the target to be:
* +1 as a safe loan,
* -1 as a risky (bad) loan.
We put this in a new column called safe_loans.
End of explanation
"""
loans['safe_loans'].show(view = 'Categorical')
"""
Explanation: Now, let us explore the distribution of the column safe_loans. This gives us a sense of how many safe and risky loans are present in the dataset.
End of explanation
"""
features = ['grade', # grade of the loan
'sub_grade', # sub-grade of the loan
'short_emp', # one year or less of employment
'emp_length_num', # number of years of employment
'home_ownership', # home_ownership status: own, mortgage or rent
'dti', # debt to income ratio
'purpose', # the purpose of the loan
'term', # the term of the loan
'last_delinq_none', # has borrower had a delinquincy
'last_major_derog_none', # has borrower had 90 day or worse rating
'revol_util', # percent of available credit being used
'total_rec_late_fee', # total late fees received to day
]
target = 'safe_loans' # prediction target (y) (+1 means safe, -1 is risky)
# Extract the feature columns and target column
loans = loans[features + [target]]
"""
Explanation: You should have:
* Around 81% safe loans
* Around 19% risky loans
It looks like most of these loans are safe loans (thankfully). But this does make our problem of identifying risky loans challenging.
Features for the classification algorithm
In this assignment, we will be using a subset of features (categorical and numeric). The features we will be using are described in the code comments below. If you are a finance geek, the LendingClub website has a lot more details about these features.
End of explanation
"""
safe_loans_raw = loans[loans[target] == +1]
risky_loans_raw = loans[loans[target] == -1]
print "Number of safe loans : %s" % len(safe_loans_raw)
print "Number of risky loans : %s" % len(risky_loans_raw)
"""
Explanation: What remains now is a subset of features and the target that we will use for the rest of this notebook.
Sample data to balance classes
As we explored above, our data is disproportionally full of safe loans. Let's create two datasets: one with just the safe loans (safe_loans_raw) and one with just the risky loans (risky_loans_raw).
End of explanation
"""
print "Percentage of safe loans : %.2f" % ((float(len(safe_loans_raw)) / len(loans)) * 100)
print "Percentage of risky loans : %.2f" % ((float(len(risky_loans_raw)) / len(loans)) * 100)
"""
Explanation: Now, write some code to compute below the percentage of safe and risky loans in the dataset and validate these numbers against what was given using .show earlier in the assignment:
End of explanation
"""
# Since there are fewer risky loans than safe loans, find the ratio of the sizes
# and use that percentage to undersample the safe loans.
percentage = len(risky_loans_raw)/float(len(safe_loans_raw))
risky_loans = risky_loans_raw
safe_loans = safe_loans_raw.sample(percentage, seed=1)
# Append the risky_loans with the downsampled version of safe_loans
loans_data = risky_loans.append(safe_loans)
"""
Explanation: One way to combat class imbalance is to undersample the larger class until the class distribution is approximately half and half. Here, we will undersample the larger class (safe loans) in order to balance out our dataset. This means we are throwing away many data points. We used seed=1 so everyone gets the same results.
End of explanation
"""
print "Percentage of safe loans :", len(safe_loans) / float(len(loans_data))
print "Percentage of risky loans :", len(risky_loans) / float(len(loans_data))
print "Total number of loans in our new dataset :", len(loans_data)
"""
Explanation: Now, let's verify that the resulting percentage of safe and risky loans are each nearly 50%.
End of explanation
"""
train_data, validation_data = loans_data.random_split(.8, seed=1)
"""
Explanation: Note: There are many approaches for dealing with imbalanced data, including some where we modify the learning algorithm. These approaches are beyond the scope of this course, but some of them are reviewed in this paper. For this assignment, we use the simplest possible approach, where we subsample the overly represented class to get a more balanced dataset. In general, and especially when the data is highly imbalanced, we recommend using more advanced methods.
Split data into training and validation sets
We split the data into training and validation sets using an 80/20 split and specifying seed=1 so everyone gets the same results.
Note: In previous assignments, we have called this a train-test split. However, the portion of data that we don't train on will be used to help select model parameters (this is known as model selection). Thus, this portion of data should be called a validation set. Recall that examining performance of various potential models (i.e. models with different parameters) should be on validation set, while evaluation of the final selected model should always be on test data. Typically, we would also save a portion of the data (a real test set) to test our final model on or use cross-validation on the training set to select our final model. But for the learning purposes of this assignment, we won't do that.
End of explanation
"""
decision_tree_model = graphlab.decision_tree_classifier.create(train_data, validation_set=None,
target = target, features = features)
"""
Explanation: Use decision tree to build a classifier
Now, let's use the built-in GraphLab Create decision tree learner to create a loan prediction model on the training data. (In the next assignment, you will implement your own decision tree learning algorithm.) Our feature columns and target column have already been decided above. Use validation_set=None to get the same results as everyone else.
End of explanation
"""
small_model = graphlab.decision_tree_classifier.create(train_data, validation_set=None,
target = target, features = features, max_depth = 2)
"""
Explanation: Visualizing a learned model
As noted in the documentation, typically the max depth of the tree is capped at 6. However, such a tree can be hard to visualize graphically. Here, we instead learn a smaller model with max depth of 2 to gain some intuition by visualizing the learned tree.
End of explanation
"""
small_model.show(view="Tree")
"""
Explanation: In the view that is provided by GraphLab Create, you can see each node, and each split at each node. This visualization is great for considering what happens when this model predicts the target of a new data point.
Note: To better understand this visual:
* The root node is represented using pink.
* Intermediate nodes are in green.
* Leaf nodes in blue and orange.
End of explanation
"""
validation_safe_loans = validation_data[validation_data[target] == 1]
validation_risky_loans = validation_data[validation_data[target] == -1]
sample_validation_data_risky = validation_risky_loans[0:2]
sample_validation_data_safe = validation_safe_loans[0:2]
sample_validation_data = sample_validation_data_safe.append(sample_validation_data_risky)
sample_validation_data
"""
Explanation: Making predictions
Let's consider two positive and two negative examples from the validation set and see what the model predicts. We will do the following:
* Predict whether or not a loan is safe.
* Predict the probability that a loan is safe.
End of explanation
"""
decision_tree_model.predict(sample_validation_data)
"""
Explanation: Explore label predictions
Now, we will use our model to predict whether or not a loan is likely to default. For each row in the sample_validation_data, use the decision_tree_model to predict whether or not the loan is classified as a safe loan.
Hint: Be sure to use the .predict() method.
End of explanation
"""
float((sample_validation_data['safe_loans'] == decision_tree_model.predict(sample_validation_data)).sum()) / len(sample_validation_data)
"""
Explanation: Quiz Question: What percentage of the predictions on sample_validation_data did decision_tree_model get correct?
End of explanation
"""
decision_tree_model.predict(sample_validation_data, output_type='probability')
"""
Explanation: Explore probability predictions
For each row in the sample_validation_data, what is the probability (according decision_tree_model) of a loan being classified as safe?
Hint: Set output_type='probability' to make probability predictions using decision_tree_model on sample_validation_data:
End of explanation
"""
small_model.predict(sample_validation_data, output_type='probability')
"""
Explanation: Quiz Question: Which loan has the highest probability of being classified as a safe loan?
Checkpoint: Can you verify that for all the predictions with probability >= 0.5, the model predicted the label +1?
Tricky predictions!
Now, we will explore something pretty interesting. For each row in the sample_validation_data, what is the probability (according to small_model) of a loan being classified as safe?
Hint: Set output_type='probability' to make probability predictions using small_model on sample_validation_data:
End of explanation
"""
sample_validation_data[1]
"""
Explanation: Quiz Question: Notice that the probability preditions are the exact same for the 2nd and 3rd loans. Why would this happen?
Visualize the prediction on a tree
Note that you should be able to look at the small tree, traverse it yourself, and visualize the prediction being made. Consider the following point in the sample_validation_data
End of explanation
"""
small_model.show(view="Tree")
"""
Explanation: Let's visualize the small tree here to do the traversing for this data point.
End of explanation
"""
small_model.predict(sample_validation_data[1])
"""
Explanation: Note: In the tree visualization above, the values at the leaf nodes are not class predictions but scores (a slightly advanced concept that is out of the scope of this course). You can read more about this here. If the score is $\geq$ 0, the class +1 is predicted. Otherwise, if the score < 0, we predict class -1.
Quiz Question: Based on the visualized tree, what prediction would you make for this data point?
Now, let's verify your prediction by examining the prediction made using GraphLab Create. Use the .predict function on small_model.
End of explanation
"""
print small_model.evaluate(train_data)['accuracy']
print decision_tree_model.evaluate(train_data)['accuracy']
"""
Explanation: Evaluating accuracy of the decision tree model
Recall that the accuracy is defined as follows:
$$
\mbox{accuracy} = \frac{\mbox{# correctly classified examples}}{\mbox{# total examples}}
$$
Let us start by evaluating the accuracy of the small_model and decision_tree_model on the training data
End of explanation
"""
print small_model.evaluate(validation_data)['accuracy']
print round(decision_tree_model.evaluate(validation_data)['accuracy'],2)
"""
Explanation: Checkpoint: You should see that the small_model performs worse than the decision_tree_model on the training data.
Now, let us evaluate the accuracy of the small_model and decision_tree_model on the entire validation_data, not just the subsample considered above.
End of explanation
"""
big_model = graphlab.decision_tree_classifier.create(train_data, validation_set=None,
target = target, features = features, max_depth = 10)
"""
Explanation: Quiz Question: What is the accuracy of decision_tree_model on the validation set, rounded to the nearest .01?
Evaluating accuracy of a complex decision tree model
Here, we will train a large decision tree with max_depth=10. This will allow the learned tree to become very deep, and result in a very complex model. Recall that in lecture, we prefer simpler models with similar predictive power. This will be an example of a more complicated model which has similar predictive power, i.e. something we don't want.
End of explanation
"""
print big_model.evaluate(train_data)['accuracy']
print big_model.evaluate(validation_data)['accuracy']
"""
Explanation: Now, let us evaluate big_model on the training set and validation set.
End of explanation
"""
predictions = decision_tree_model.predict(validation_data)
"""
Explanation: Checkpoint: We should see that big_model has even better performance on the training set than decision_tree_model did on the training set.
Quiz Question: How does the performance of big_model on the validation set compare to decision_tree_model on the validation set? Is this a sign of overfitting?
Quantifying the cost of mistakes
Every mistake the model makes costs money. In this section, we will try and quantify the cost of each mistake made by the model.
Assume the following:
False negatives: Loans that were actually safe but were predicted to be risky. This results in an oppurtunity cost of losing a loan that would have otherwise been accepted.
False positives: Loans that were actually risky but were predicted to be safe. These are much more expensive because it results in a risky loan being given.
Correct predictions: All correct predictions don't typically incur any cost.
Let's write code that can compute the cost of mistakes made by the model. Complete the following 4 steps:
1. First, let us compute the predictions made by the model.
1. Second, compute the number of false positives.
2. Third, compute the number of false negatives.
3. Finally, compute the cost of mistakes made by the model by adding up the costs of true positives and false positives.
First, let us make predictions on validation_data using the decision_tree_model:
End of explanation
"""
false_positives = (predictions == +1) == (validation_data['safe_loans'] == -1)
print false_positives.sum()
print len(predictions)
fp = 0
for i in xrange(len(predictions)):
if predictions[i] == 1 and validation_data['safe_loans'][i] == -1:
fp += 1
print fp
"""
Explanation: False positives are predictions where the model predicts +1 but the true label is -1. Complete the following code block for the number of false positives:
End of explanation
"""
false_negatives = (predictions == -1) == (validation_data['safe_loans'] == +1)
print false_negatives.sum()
print len(predictions)
fn = 0
for i in xrange(len(predictions)):
if predictions[i] == -1 and validation_data['safe_loans'][i] == 1:
fn += 1
print fn
"""
Explanation: False negatives are predictions where the model predicts -1 but the true label is +1. Complete the following code block for the number of false negatives:
End of explanation
"""
cost = fp * 20000 + fn * 10000
cost
"""
Explanation: Quiz Question: Let us assume that each mistake costs money:
* Assume a cost of \$10,000 per false negative.
* Assume a cost of \$20,000 per false positive.
What is the total cost of mistakes made by decision_tree_model on validation_data?
End of explanation
"""
|
knowledgeanyhow/notebooks | noaa/hdtadash/weather_dashboard.ipynb | mit | %matplotlib inline
import os
import struct
import glob
import pandas as pd
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
import seaborn as sns
import folium
from IPython.display import HTML
from IPython.display import Javascript, display
"""
Explanation: NOAA Weather Analysis
Frequency of Daily High and Low Record Temperatures
Analysis
Goal
Given historical data for a weather station in the US, what is the frequency for new high or low temperature records?
If there is scientific evidence of extreme fluctuations in our weather patterns due to human impact to the environment, then we should be able to identify significant factual examples of increases in the frequency in extreme temperature changes within the weather station data.
There has been a great deal of discussion around climate change and global warming. Since NOAA has made their data public, let us explore the data ourselves and see what insights we can discover.
General Analytical Questions
For each of the possible 365 days of the year that a specific US weather station has gathered data, can we identify the frequency at which daily High and Low temperature records are broken.
Does the historical frequency of daily temperature records (High or Low) in the US provide statistical evidence of dramatic climate change?
For a given weather station, what is the longest duration of daily temperature record (High or Low) in the US?
Approach
This analysis is based on a <font color="green">15-March-2015</font> snapshot of the Global Historical Climatology Network (GHCN) dataset.
This analysis leverages Historical Daily Summary weather station information that was generated using data derived from reproducible research. This summary data captures information about a given day throughout history at a specific weather station in the US. This dataset contains 365 rows where each row depicts the aggregated low and high record temperatures for a specific day throughout the history of the weather station.
Each US weather station is associated with a single CSV file that contains historical daily summary data.
All temperatures reported in Fahrenheit.
Environment Setup
This noteboook leverages the several Jupyter Incubation Extensions (urth_components):
Declarative Widgets
Dynamic Dashboards
It also depends on a custom polymer widget:
urth-raw-html.html
Import Python Dependencies
Depending on the state of your IPython environment, you may need to pre-instal a few dependencies:
$ pip install seaborn folium
End of explanation
"""
%%html
<link rel="import" href="urth_components/paper-dropdown-menu/paper-dropdown-menu.html" is='urth-core-import' package='PolymerElements/paper-dropdown-menu'>
<link rel="import" href="urth_components/paper-menu/paper-menu.html" is='urth-core-import' package='PolymerElements/paper-menu'>
<link rel="import" href="urth_components/paper-item/paper-item.html" is='urth-core-import' package='PolymerElements/paper-item'>
<link rel="import" href="urth_components/paper-button/paper-button.html" is='urth-core-import' package='PolymerElements/paper-button'>
<link rel="import" href="urth_components/paper-card/paper-card.html" is='urth-core-import' package='PolymerElements/paper-card'>
<link rel="import" href="urth_components/paper-slider/paper-slider.html" is='urth-core-import' package='PolymerElements/paper-slider'>
<link rel="import" href="urth_components/google-map/google-map.html" is='urth-core-import' package='GoogleWebComponents/google-map'>
<link rel="import" href="urth_components/google-map/google-map-marker.html" is='urth-core-import' package='GoogleWebComponents/google-map'>
<link rel="import" href="urth_components/urth-viz-table/urth-viz-table.html" is='urth-core-import'>
<link rel="import" href="urth_components/urth-viz-chart/urth-viz-chart.html" is='urth-core-import'>
<!-- Add custom Polymer Widget for injecting raw HTML into a urth-core widget -->
<link rel="import" href="./urth-raw-html.html">
<!-- HACK: Use Property Watch patch for v0.1.0 of declarativewidgets; This can be removed for v0.1.1 -->
<link rel="import" href="./urth-core-watch.html">
"""
Explanation: Load urth components
End of explanation
"""
DATA_STATE_STATION_LIST = None
DATA_STATION_DETAIL_RESULTS = None
DATA_FREQUENCY_RESULTS = None
"""
Explanation: Declare Globals
End of explanation
"""
IMAGE_DIRECTORY = "plotit"
def image_cleanup(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
else:
for filePath in glob.glob(dirname+"/*.png"):
if os.path.isfile(filePath):
os.remove(filePath)
#image_cleanup(IMAGE_DIRECTORY)
"""
Explanation: Prepare Filesystem
Data Preparation Options
Use the NOAA data Munging project to generate CSV files for the latest NOAA data.
Use the sample March 16, 2015 snapshot provided in this repo and do one of the following:
Open a terminal session and run these commands:
cd /home/main/notebooks/noaa/hdtadash/data/
tar -xvf station_summaries.tar
Enable, execute and then disable the following bash cell
Plot Storage
Earlier versions of this notebook stored chart images to disk. We used a specific directory to store plot images (*.png files). However, this approach does not work if the notebook user would like to deploy as a local application.
End of explanation
"""
# Use this global variable to specify the path for station summary files.
NOAA_STATION_SUMMARY_PATH = "/home/main/notebooks/noaa/hdtadash/data/"
# Use this global variable to specify the path for the GHCND Station Directory
STATION_DETAIL_FILE = '/home/main/notebooks/noaa/hdtadash/data/ghcnd-stations.txt'
# Station detail structures for building station lists
station_detail_colnames = ['StationID','State','Name',
'Latitude','Longitude','QueryTag']
station_detail_rec_template = {'StationID': "",
'State': "",
'Name': "",
'Latitude': "",
'Longitude': "",
'QueryTag': ""
}
# -----------------------------------
# Station Detail Processing
# -----------------------------------
def get_filename(pathname):
'''Fetch filename portion of pathname.'''
plist = pathname.split('/')
fname, fext = os.path.splitext(plist[len(plist)-1])
return fname
def fetch_station_list():
'''Return list of available stations given collection of summary files on disk.'''
station_list = []
raw_files = os.path.join(NOAA_STATION_SUMMARY_PATH,'','*_sum.csv')
for index, fname in enumerate(glob.glob(raw_files)):
f = get_filename(fname).split('_')[0]
station_list.append(str(f))
return station_list
USA_STATION_LIST = fetch_station_list()
def gather_states(fname,stations):
'''Return a list of unique State abbreviations. Weather station data exists for these states.'''
state_list = []
with open(fname, 'r', encoding='utf-8') as f:
lines = f.readlines()
f.close()
for line in lines:
r = noaa_gather_station_detail(line,stations)
state_list += r
df_unique_states = pd.DataFrame(state_list,columns=station_detail_colnames).sort('State').State.unique()
return df_unique_states.tolist()
def noaa_gather_station_detail(line,slist):
'''Build a list of station tuples for stations in the USA.'''
station_tuple_list = []
station_id_key = line[0:3]
if station_id_key == 'USC' or station_id_key == 'USW':
fields = struct.unpack('12s9s10s7s2s30s', line[0:70].encode())
if fields[0].decode().strip() in slist:
station_tuple = dict(station_detail_rec_template)
station_tuple['StationID'] = fields[0].decode().strip()
station_tuple['State'] = fields[4].decode().strip()
station_tuple['Name'] = fields[5].decode().strip()
station_tuple['Latitude'] = fields[1].decode().strip()
station_tuple['Longitude'] = fields[2].decode().strip()
qt = "{0} at {1} in {2}".format(fields[0].decode().strip(),fields[5].decode().strip(),fields[4].decode().strip())
station_tuple['QueryTag'] = qt
station_tuple_list.append(station_tuple)
return station_tuple_list
USA_STATES_WITH_STATIONS = gather_states(STATION_DETAIL_FILE,USA_STATION_LIST)
def process_station_detail_for_state(fname,stations,statecode):
'''Return dataframe of station detail for specified state.'''
station_list = []
with open(fname, 'r', encoding='utf-8') as f:
lines = f.readlines()
f.close()
for line in lines:
r = noaa_build_station_detail_for_state(line,stations,statecode)
station_list += r
return pd.DataFrame(station_list,columns=station_detail_colnames)
def noaa_build_station_detail_for_state(line,slist,statecode):
'''Build a list of station tuples for the specified state in the USA.'''
station_tuple_list = []
station_id_key = line[0:3]
if station_id_key == 'USC' or station_id_key == 'USW':
fields = struct.unpack('12s9s10s7s2s30s', line[0:70].encode())
if ((fields[0].decode().strip() in slist) and (fields[4].decode().strip() == statecode)):
station_tuple = dict(station_detail_rec_template)
station_tuple['StationID'] = fields[0].decode().strip()
station_tuple['State'] = fields[4].decode().strip()
station_tuple['Name'] = fields[5].decode().strip()
station_tuple['Latitude'] = fields[1].decode().strip()
station_tuple['Longitude'] = fields[2].decode().strip()
qt = "Station {0} in {1} at {2}".format(fields[0].decode().strip(),fields[4].decode().strip(),fields[5].decode().strip())
station_tuple['QueryTag'] = qt
station_tuple_list.append(station_tuple)
return station_tuple_list
# We can examine derived station detail data.
#process_station_detail_for_state(STATION_DETAIL_FILE,USA_STATION_LIST,"NE")
"""
Explanation: Data Munging
In this section of the notebook we will define the necessary data extraction, transformation and loading functions for the desired interactive dashboard.
End of explanation
"""
# -----------------------------------
# Station Computation Methods
# -----------------------------------
month_abbrev = { 1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr',
5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug',
9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'
}
def compute_years_of_station_data(df):
'''Compute years of service for the station.'''
yrs = dt.date.today().year-min(df['FirstYearOfRecord'])
return yrs
def compute_tmax_record_quantity(df,freq):
'''Compute number of days where maximum temperature records were greater than frequency factor.'''
threshold = int(freq)
df_result = df.query('(TMaxRecordCount > @threshold)', engine='python')
return df_result
def compute_tmin_record_quantity(df,freq):
'''Compute number of days where minimum temperature records were greater than frequency factor.'''
threshold = int(freq)
df_result = df.query('(TMinRecordCount > @threshold)', engine='python')
return df_result
def fetch_station_data(stationid):
'''Return dataframe for station summary file.'''
fname = os.path.join(NOAA_STATION_SUMMARY_PATH,'',stationid+'_sum.csv')
return pd.DataFrame.from_csv(fname)
def create_day_identifier(month,day):
'''Return dd-mmm string.'''
return str(day)+'-'+month_abbrev[int(month)]
def create_date_list(mlist,dlist):
'''Return list of formated date strings.'''
mv = list(mlist.values())
dv = list(dlist.values())
new_list = []
for index, value in enumerate(mv):
new_list.append(create_day_identifier(value,dv[index]))
return new_list
def create_record_date_list(mlist,dlist,ylist):
'''Return list of dates for max/min record events.'''
mv = list(mlist.values())
dv = list(dlist.values())
yv = list(ylist.values())
new_list = []
for index, value in enumerate(mv):
new_list.append(dt.date(yv[index],value,dv[index]))
return new_list
# Use the Polymer Channel API to establish two-way binding between elements and data.
from urth.widgets.widget_channels import channel
channel("noaaquery").set("states", USA_STATES_WITH_STATIONS)
channel("noaaquery").set("recordTypeOptions", ["Low","High"])
channel("noaaquery").set("recordOccuranceOptions", list(range(4, 16)))
channel("noaaquery").set("stationList",USA_STATION_LIST)
channel("noaaquery").set("stationDetail",STATION_DETAIL_FILE)
channel("noaaquery").set("narrationToggleOptions", ["Yes","No"])
channel("noaaquery").set("cleanupToggleOptions", ["Yes","No"])
channel("noaaquery").set("cleanupPreference", "No")
channel("noaaquery").set("displayTypeOptions", ["Data","Map"])
def reset_settings():
channel("noaaquery").set("isNarration", True)
channel("noaaquery").set("isMap", True)
channel("noaaquery").set("isNewQuery", True)
channel("noaaquery").set("stationResultsReady", "")
reset_settings()
"""
Explanation: Exploratory Analysis
In this section of the notebook we will define the necessary computational functions for the desired interactive dashboard.
End of explanation
"""
%%html
<a name="narrationdata"></a>
<template id="narrationContent" is="urth-core-bind" channel="noaaquery">
<template is="dom-if" if="{{isNarration}}">
<p>This application allows the user to explore historical NOAA data to observer the actual frequency at which weather stations in the USA have actually experienced new high and low temperature records.</p>
<blockquote>Are you able to identify a significant number of temperature changes within the weather station data?</blockquote>
<blockquote>Would you consider these results representative of extreme weather changes?</blockquote>
</paper-card>
</template>
"""
Explanation: Visualization
In this section of the notebook we will define the widgets and supporting functions for the construction of an interactive dashboard. See Polymer Data Bindings for more details.
Narration Widget
Provide some introductory content for the user.
End of explanation
"""
%%html
<template id="weatherchannel_currentusamap" is="urth-core-bind" channel="noaaquery">
<div id="wc_curmap">
<center><embed src="http://i.imwx.com/images/maps/current/curwx_600x405.jpg" width="500" height="300"></center>
<div id="wc_map">
</template>
"""
Explanation: Weather Channel Widget
Display the current USA national weather map.
End of explanation
"""
def process_preferences(narrativepref,viewpref):
if narrativepref == "Yes":
channel("noaaquery").set("isNarration", True)
else:
channel("noaaquery").set("isNarration","")
if viewpref == "Map":
channel("noaaquery").set("isMap", True)
else:
channel("noaaquery").set("isMap", "")
return
%%html
<a name="prefsettings"></a>
<template id="setPreferences" is="urth-core-bind" channel="noaaquery">
<urth-core-function id="applySettingFunc"
ref="process_preferences"
arg-narrativepref="{{narrationPreference}}"
arg-viewpref="{{displayPreference}}" auto>
</urth-core-function>
<paper-card heading="Preferences" elevation="1">
<div class="card-content">
<p class="widget">Select a narration preference to toggle informative content.
<paper-dropdown-menu label="Show Narration" selected-item-label="{{narrationPreference}}" noink>
<paper-menu class="dropdown-content" selected="[[narrationPreference]]" attr-for-selected="label">
<template is="dom-repeat" items="[[narrationToggleOptions]]">
<paper-item label="[[item]]">[[item]]</paper-item>
</template>
</paper-menu>
</paper-dropdown-menu></p>
<p class="widget">Would you like a geospacial view of a selected weather station?
<paper-dropdown-menu label="Select Display Type" selected-item-label="{{displayPreference}}" noink>
<paper-menu class="dropdown-content" selected="[[displayPreference]]" attr-for-selected="label">
<template is="dom-repeat" items="[[displayTypeOptions]]">
<paper-item label="[[item]]">[[item]]</paper-item>
</template>
</paper-menu>
</paper-dropdown-menu></p>
<p class="widget">Would you like to purge disk storage more frequently?
<paper-dropdown-menu label="Manage Storage" selected-item-label="{{cleanupPreference}}" noink>
<paper-menu class="dropdown-content" selected="[[cleanupPreference]]" attr-for-selected="label">
<template is="dom-repeat" items="[[cleanupToggleOptions]]">
<paper-item label="[[item]]">[[item]]</paper-item>
</template>
</paper-menu>
</paper-dropdown-menu></p>
</div>
</paper-card>
</template>
"""
Explanation: Preferences Widget
This composite widget allows the user to control several visualization switches:
Narration: This dropdown menu allows the user to hide/show narrative content within the dashboard.
Display Type: This dropdown menu allows the user to toggle between geospacial and raw data visualizations.
Storage Management: This dropdown menu allows the user to toggle the frequency of storage cleanup.
End of explanation
"""
def process_query(fname,stations,statecode,cleanuppref):
global DATA_STATE_STATION_LIST
if cleanuppref == "Yes":
image_cleanup(IMAGE_DIRECTORY)
reset_settings()
DATA_STATE_STATION_LIST = process_station_detail_for_state(fname,stations,statecode)
channel("noaaquery").set("stationResultsReady", True)
return DATA_STATE_STATION_LIST
# We can examine stations per state data.
#process_query(STATION_DETAIL_FILE,USA_STATION_LIST,"NE","No")
%%html
<a name="loaddata"></a>
<template id="loadCard" is="urth-core-bind" channel="noaaquery">
<urth-core-function id="loadDataFunc"
ref="process_query"
arg-fname="{{stationDetail}}"
arg-stations="{{stationList}}"
arg-statecode="{{stateAbbrev}}"
arg-cleanuppref="{{cleanupPreference}}"
result="{{stationQueryResult}}"
is-ready="{{isloadready}}">
</urth-core-function>
<paper-card heading="Query Preferences" elevation="1">
<div class="card-content">
<div>
<p class="widget">Which region of weather stations in the USA do you wish to examine?.</p>
<paper-dropdown-menu label="Select State" selected-item-label="{{stateAbbrev}}" noink>
<paper-menu class="dropdown-content" selected="{{stateAbbrev}}" attr-for-selected="label">
<template is="dom-repeat" items="[[states]]">
<paper-item label="[[item]]">[[item]]</paper-item>
</template>
</paper-menu>
</paper-dropdown-menu>
</div>
<div>
<p class="widget">Are you interested in daily minimum or maximum temperature records per station?.</p>
<paper-dropdown-menu label="Select Record Type" selected-item-label="{{recType}}" noink>
<paper-menu class="dropdown-content" selected="[[recType]]" attr-for-selected="label">
<template is="dom-repeat" items="[[recordTypeOptions]]">
<paper-item label="[[item]]">[[item]]</paper-item>
</template>
</paper-menu>
</paper-dropdown-menu>
</div>
<div>
<p class="widget">Each weather station has observed more than one new minimum or maximum temperature record event. How many new record occurrences would you consider significant enough to raise concerns about extreme weather fluctuations?.</p>
<paper-dropdown-menu label="Select Occurrence Factor" selected-item-label="{{occurrenceFactor}}" noink>
<paper-menu class="dropdown-content" selected="[[occurrenceFactor]]" attr-for-selected="label">
<template is="dom-repeat" items="[[recordOccuranceOptions]]">
<paper-item label="[[item]]">[[item]]</paper-item>
</template>
</paper-menu>
</paper-dropdown-menu>
</div>
</div>
<div class="card-actions">
<paper-button tabindex="0" disabled="{{!isloadready}}" onClick="loadDataFunc.invoke()">Apply</paper-button>
</div>
</paper-card>
</template
"""
Explanation: Dashboard Control Widget
This composite widget allows the user to control several visualization switches:
State Selector: This dropdown menu allows the user to select a state for analysis. Only the data associated with the selected state will be loaded.
Record Type: This dropdown menu allows the user focus the analysis on either High or Low records.
Occurance Factor: This dropdown menu allows the user to specify the minimum number of new record events for a given calendar day.
The widget uses a control method to manage interactive events.
End of explanation
"""
%%html
<template id="channelMonitorWidget" is="urth-core-bind" channel="noaaquery">
<h2 class="widget">Channel Monitor</h2>
<p class="widget"><b>Query Selections:</b></p>
<table border="1" align="center">
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
<tr>
<td>State</td>
<td>{{stateAbbrev}}</td>
</tr>
<tr>
<td>Record Type</td>
<td>{{recType}}</td>
</tr>
<tr>
<td>Occurance Factor</td>
<td>{{occurrenceFactor}}</td>
</tr>
<tr>
<td>Station ID</td>
<td>{{station.0}}</td>
</tr>
<tr>
<td>Narration</td>
<td>{{isNarration}}</td>
</tr>
<tr>
<td>Map View</td>
<td>{{isMap}}</td>
</tr>
</table>
<p class="widget">{{recType}} temperature record analysis using historical NOAA data from weather {{station.5}}.</p>
</template>
"""
Explanation: Channel Monitor Widget
This widget provides status information pertaining to properties of the dashboard.
End of explanation
"""
# Use Python to generate a Folium Map with Markers for each weather station in the selected state.
def display_map(m, height=500):
'''Takes a folium instance and embed HTML.'''
m._build_map()
srcdoc = m.HTML.replace('"', '"')
embed = '<iframe srcdoc="{0}" style="width: 100%; height: {1}px; border: none"></iframe>'.format(srcdoc, height)
return embed
def render_map(height=500):
'''Generate a map based on a dateframe of station detail.'''
df = DATA_STATE_STATION_LIST
centerpoint_latitude = np.mean(df.Latitude.astype(float))
centerpoint_longitude = np.mean(df.Longitude.astype(float))
map_obj = folium.Map(location=[centerpoint_latitude, centerpoint_longitude],zoom_start=6)
for index, row in df.iterrows():
map_obj.simple_marker([row.Latitude, row.Longitude], popup=row.QueryTag)
return display_map(map_obj)
# We can examine the generated HTML for the dynamic map
#render_map()
"""
Explanation: Station Detail Widget
This composite widget allows the user view station details for the selected state. Tabluar and map viewing options are available.
End of explanation
"""
%%html
<template id="station_detail_combo_func" is="urth-core-bind" channel="noaaquery">
<urth-core-watch value="{{stationResultsReady}}">
<urth-core-function id="renderFoliumMapFunc"
ref="render_map"
result="{{foliumMap}}" auto>
</urth-core-function>
</urth-core-watch>
</template>
%%html
<template id="station_detail_combo_widget" is="urth-core-bind" channel="noaaquery">
<paper-card style="width: 100%;" heading="{{stateAbbrev}} Weather Stations" elevation="1">
<p>These are the weather stations monitoring local conditions. Select a station to explore historical record temperatures.</p>
<urth-viz-table datarows="{{ stationQueryResult.data }}" selection="{{station}}" columns="{{ stationQueryResult.columns }}" rows-visible=20>
</urth-viz-table>
</paper-card>
<template is="dom-if" if="{{isNewQuery}}">
<template is="dom-if" if="{{isMap}}">
<div>
<urth-raw-html html="{{foliumMap}}"/>
</div>
</template>
</template>
</template>
"""
Explanation: HACK: urth-core-watch seems to misbehave when combined with output elements. The workaround is to split the widget into two.
End of explanation
"""
def explore_station_data(station):
global DATA_STATION_DETAIL_RESULTS
df_station_detail = fetch_station_data(station)
channel("noaaquery").set("yearsOfService", compute_years_of_station_data(df_station_detail))
DATA_STATION_DETAIL_RESULTS = df_station_detail
#display(Javascript("stationRecordFreqFunc.invoke()"))
return df_station_detail
%%html
<template id="station_summary_widget" is="urth-core-bind" channel="noaaquery">
<urth-core-function id="exploreStationDataFunc"
ref="explore_station_data"
arg-station="[[station.0]]"
result="{{stationSummaryResult}}" auto>
</urth-core-function>
<paper-card style="width: 100%;" heading="Station Summary" elevation="1">
<template is="dom-if" if="{{stationSummaryResult}}">
<p>{{recType}} temperature record analysis using historical NOAA data from weather {{station.5}}.</p>
<p>This weather station has been in service and collecting data for {{yearsOfService}} years.</p>
<urth-viz-table datarows="{{ stationSummaryResult.data }}" selection="{{dayAtStation}}" columns="{{ stationSummaryResult.columns }}" rows-visible=20>
</urth-viz-table>
</template>
</paper-card>
</template>
"""
Explanation: Station Summary Widget
This widget provides the user with a glimpse into the historic hi/low record data for the selected station.
End of explanation
"""
def plot_record_results(rectype,fname=None):
df = DATA_FREQUENCY_RESULTS
plt.figure(figsize = (9,9), dpi = 72)
if rectype == "High":
dates = create_record_date_list(df.Month.to_dict(),
df.Day.to_dict(),
df.TMaxRecordYear.to_dict()
)
temperatureRecordsPerDate = {'RecordDate' : pd.Series(dates,index=df.index),
'RecordHighTemp' : pd.Series(df.TMax.to_dict(),index=df.index)
}
df_new = pd.DataFrame(temperatureRecordsPerDate)
sns_plot = sns.factorplot(x="RecordDate", y="RecordHighTemp", kind="bar", data=df_new, size=6, aspect=1.5)
sns_plot.set_xticklabels(rotation=30)
else:
dates = create_record_date_list(df.Month.to_dict(),
df.Day.to_dict(),
df.TMinRecordYear.to_dict()
)
temperatureRecordsPerDate = {'RecordDate' : pd.Series(dates,index=df.index),
'RecordLowTemp' : pd.Series(df.TMin.to_dict(),index=df.index)
}
df_new = pd.DataFrame(temperatureRecordsPerDate)
sns_plot = sns.factorplot(x="RecordDate", y="RecordLowTemp", kind="bar", data=df_new, size=6, aspect=1.5)
sns_plot.set_xticklabels(rotation=30)
if fname is not None:
if os.path.isfile(fname):
os.remove(fname)
sns_plot.savefig(fname)
return sns_plot.fig
def compute_record_durations(df,rectype):
'''Return dataframe of max/min temperature record durations for each day.'''
dates = create_date_list(df.Month.to_dict(),df.Day.to_dict())
s_dates = pd.Series(dates)
if rectype == "High":
s_values = pd.Series(df.MaxDurTMaxRecord.to_dict(),index=df.index)
else:
s_values = pd.Series(df.MaxDurTMinRecord.to_dict(),index=df.index)
temperatureDurationsPerDate = {'RecordDate' : pd.Series(dates,index=df.index),
'RecordLowTemp' : s_values
}
df_new = pd.DataFrame(temperatureDurationsPerDate)
return df_new
def plot_duration_results(rectype,fname=None):
df_durations = compute_record_durations(DATA_FREQUENCY_RESULTS,rectype)
fig = plt.figure(figsize = (9,9), dpi = 72)
plt.xlabel('Day')
plt.ylabel('Record Duration in Years')
if rectype == "High":
plt.title('Maximum Duration for TMax Records')
else:
plt.title('Maximum Duration for TMin Records')
ax = plt.gca()
colors= ['r', 'b']
df_durations.plot(kind='bar',color=colors, alpha=0.75, ax=ax)
ax.xaxis.set_ticklabels( ['%s' % i for i in df_durations.RecordDate.values] )
plt.grid(b=True, which='major', linewidth=1.0)
plt.grid(b=True, which='minor')
if fname is not None:
if os.path.isfile(fname):
os.remove(fname)
plt.savefig(fname)
return fig
def explore_record_temperature_frequency(rectype,recfreqfactor):
global DATA_FREQUENCY_RESULTS
channel("noaaquery").set("isAboveFreqFactor", True)
channel("noaaquery").set("numberRecordDays", 0)
if rectype == "High":
df_record_days = compute_tmax_record_quantity(DATA_STATION_DETAIL_RESULTS,recfreqfactor)
else:
df_record_days = compute_tmin_record_quantity(DATA_STATION_DETAIL_RESULTS,recfreqfactor)
if not df_record_days.empty:
channel("noaaquery").set("numberRecordDays", len(df_record_days))
DATA_FREQUENCY_RESULTS = df_record_days
else:
channel("noaaquery").set("isAboveFreqFactor", "")
#display(Javascript("stationRecordFreqFunc.invoke()"))
return df_record_days
%%html
<template id="station_synopsis_data_widget" is="urth-core-bind" channel="noaaquery">
<urth-core-watch value="{{station.0}}">
<urth-core-function id="stationRecordFreqFunc"
ref="explore_record_temperature_frequency"
arg-rectype="[[recType]]"
arg-recfreqfactor="[[occurrenceFactor]]"
result="{{stationFreqRecordsResult}}" auto>
</urth-core-function>
</urth-core-watch>
</template>
%%html
<template id="station_synopsis_chart_widget" is="urth-core-bind" channel="noaaquery">
<template is="dom-if" if="{{stationFreqRecordsResult}}">
<paper-card style="width: 100%;" heading="Temperature Record Analysis" elevation="1">
<p>This station has experienced {{numberRecordDays}} days of new {{recType}} records where a new record has been set more than {{occurrenceFactor}} times throughout the operation of the station.</p>
<urth-viz-table datarows="{{ stationFreqRecordsResult.data }}" selection="{{dayAtStation}}" columns="{{ stationFreqRecordsResult.columns }}" rows-visible=20>
</urth-viz-table>
</paper-card>
<template is="dom-if" if="{{isAboveFreqFactor}}">
<urth-core-function id="stationRecordsFunc"
ref="plot_record_results"
arg-rectype="[[recType]]"
result="{{stationRecordsPlot}}" auto>
</urth-core-function>
<urth-core-function id="stationDurationsFunc"
ref="plot_duration_results"
arg-rectype="[[recType]]"
result="{{stationDurationsPlot}}" auto>
</urth-core-function>
<paper-card heading="Station {{station.0}} Records Per Day" elevation="0">
<p>The current {{recType}} temperature record for each day that has experienced more than {{occurrenceFactor}} new record events since the station has come online.</p>
<img src="{{stationRecordsPlot}}"/><br/>
</paper-card>
<paper-card heading="Duration of Station {{station.0}} Records Per Day" elevation="0">
<p>For each day that has experienced more than {{occurrenceFactor}} {{recType}} temperature records, some days have had records stand for a large portion of the life of the station.</p>
<img src="{{stationDurationsPlot}}"/>
</paper-card>
</template>
<template is="dom-if" if="{{!isAboveFreqFactor}}">
<p>This weather station has not experienced any days with greater than {{occurrenceFactor}} new {{recType}} records.</p>
</template>
</template>
</template>
"""
Explanation: Temperature Record Analysis for Selected Station
This widget provides the user with insights for selected station.
End of explanation
"""
|
google/tf-quant-finance | tf_quant_finance/examples/jupyter_notebooks/Black_Scholes_Price_and_Implied_Vol.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2019 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
"""
#@title Upgrade to TensorFlow 2.1+
!pip install --upgrade tensorflow
#@title Install TF Quant Finance
!pip install tf-quant-finance
#@title Imports
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import tf_quant_finance as tff
option_price = tff.black_scholes.option_price
implied_vol = tff.black_scholes.implied_vol
from IPython.core.pylabtools import figsize
figsize(21, 14) # better graph size for Colab
"""
Explanation: Black Scholes: Price and Implied Vol in TFF
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.google.com/url?q=https://github.com/google/tf-quant-finance/blob/master/tf_quant_finance/examples/jupyter_notebooks/Black_Scholes_Price_and_Implied_Vol.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/google/tf-quant-finance/blob/master/tf_quant_finance/examples/jupyter_notebooks/Black_Scholes_Price_and_Implied_Vol.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
End of explanation
"""
# Calculate discount factors (e^-rT)
rate = 0.05
expiries = np.array([0.5, 1.0, 2.0, 1.3])
discount_factors = np.exp(-rate * expiries)
# Current value of assets.
spots = np.array([0.9, 1.0, 1.1, 0.9])
# Forward value of assets at expiry.
forwards = spots / discount_factors
# Strike prices given by:
strikes = np.array([1.0, 2.0, 1.0, 0.5])
# Indicate whether options are call (True) or put (False)
is_call_options = np.array([True, True, False, False])
# The volatilites at which the options are to be priced.
volatilities = np.array([0.7, 1.1, 2.0, 0.5])
# Calculate the prices given the volatilities and term structure.
prices = option_price(
volatilities=volatilities,
strikes=strikes,
expiries=expiries,
forwards=forwards,
discount_factors=discount_factors,
is_call_options=is_call_options)
prices
"""
Explanation: Black Scholes pricing and implied volatility usage
Here we see how to price vanilla options in the Black Scholes framework using the library.
Semantics of the interface
If $S$ is the spot price of an asset, $r$ the risk free rate, $T$ the time to expiry, $\sigma$ the volatility. The price of a call $C$ under Black Scholes model exhibits the following relationship (suppressing unusued notation):
$C(S, r) = e^{-rT} C(e^{rT}S, 0)$
Where $e^{-rT}$ is the discount factor, and $e^{rT}S_t$ the forward price of the asset to expiry. The tff's interface is framed in terms of forward prices and discount factors (rather than spot prices and risk free rates). This corresponds to the right hand side of the above relationship.
Parallelism
Note that the library allows pricing of options in parallel: each argument (such as the strikes) is an array and each index corresponds to an independent option to price. For example, this allows the simultaneous pricing of the same option with different expiry dates, or strike prices or both.
End of explanation
"""
# Initial positions for finding implied vol.
initial_volatilities = np.array([2.0, 0.5, 2.0, 0.5])
# Identifier whether the option is call (True) or put (False)
is_call_options = np.array([True, True, False, False])
# Find the implied vols beginning at initial_volatilities.
implied_vols = implied_vol(
prices=prices,
strikes=strikes,
expiries=expiries,
forwards=forwards,
discount_factors=discount_factors,
is_call_options=is_call_options,
initial_volatilities=initial_volatilities,
validate_args=True,
tolerance=1e-9,
max_iterations=200,
name=None,
dtype=None)
implied_vols
"""
Explanation: We now show how to invert the Black Scholes pricing model in order to recover the volatility which generated a given market price under a particular term structure. Again, the implied volatility interface operates on batches of options, with each index of the arrays corresponding to an independent problem to solve.
End of explanation
"""
#@title Example data on a grid.
def grid_data(strike_vec, vol_vec, dtype=np.float64):
"""Construct dummy data with known ground truth.
For a grid of known strikes by volatilities, return the price.
Assumes the forward prices and expiries are fixed at unity.
Args:
strikes: a vector of strike prices from which to form the grid.
volatilities: a vector of volatilities from which to form the grid.
dtype: a numpy datatype for the element values of returned arrays.
Returns:
(forwards, strikes, expiries, true_volatilities, prices) all of
which are identically shaped numpy arrays.
"""
nstrikes = len(strike_vec)
nvolatilities = len(vol_vec)
vol_ones = np.matrix(np.ones((1, nvolatilities)))
strike_ones = np.matrix(np.ones((nstrikes, 1)))
strikes = np.array(np.matrix(strike_vec).T * vol_ones, dtype=dtype)
volatilities = np.array(strike_ones * np.matrix(vol_vec), dtype=dtype)
expiries = np.ones_like(strikes, dtype=dtype)
forwards = np.ones_like(strikes, dtype=dtype)
initials = np.ones_like(strikes, dtype=dtype)
prices = option_price(volatilities=volatilities,
strikes=strikes,
expiries=expiries,
forwards=forwards,
dtype=tf.float64)
return (forwards, strikes, expiries, volatilities, initials, prices)
# Build a 1000 x 1000 grid of options find the implied volatilities of.
nstrikes = 1000
nvolatilities = 1000
strike_vec = np.linspace(0.0001, 5.0, nstrikes)
vol_vec = np.linspace(0.0001, 5.0, nvolatilities)
max_iterations = 50
grid = grid_data(strike_vec, vol_vec)
forwards0, strikes0, expiries0, volatilities0, initials0, prices0 = grid
initials0 = discounts0 = signs0 = np.ones_like(prices0)
# Implied volitilities, starting the root finder at 1.
implied_vols_fix = implied_vol(
prices=prices0,
strikes=strikes0,
expiries=expiries0,
forwards=forwards0,
initial_volatilities=initials0,
validate_args=False,
tolerance=1e-8,
max_iterations=max_iterations)
# Implied vols starting the root finder at the Radiocic-Polya approximation.
implied_vols_polya = implied_vol(
prices=prices0,
strikes=strikes0,
expiries=expiries0,
forwards=forwards0,
validate_args=False,
tolerance=1e-8,
max_iterations=max_iterations)
#@title Visualisation of accuracy
plt.clf()
thinner = 100
fig, _axs = plt.subplots(nrows=1, ncols=2)
fig.subplots_adjust(hspace=0.3)
axs = _axs.flatten()
implied_vols = [implied_vols_fix, implied_vols_polya]
titles = ["Fixed initialisation implied vol minus true vol", "Radiocic-Polya initialised implied vol minus true vol"]
vmin = np.min(map(np.min, implied_vols))
vmax = np.max(map(np.max, implied_vols))
images = []
for i in range(2):
_title = axs[i].set_title(titles[i])
_title.set_position([.5, 1.03])
im = axs[i].imshow(implied_vols[i] - volatilities0, origin="lower", interpolation="none", cmap="seismic", vmin=-1.0, vmax=1.0)
images.append(im)
axs[i].set_xticks(np.arange(0, len(vol_vec), thinner))
axs[i].set_yticks(np.arange(0, len(strike_vec), thinner))
axs[i].set_xticklabels(np.round(vol_vec[0:len(vol_vec):thinner], 3))
axs[i].set_yticklabels(np.round(strike_vec[0:len(strike_vec):thinner], 3))
plt.colorbar(im, ax=axs[i], fraction=0.046, pad=0.00)
axs[i].set_ylabel('Strike')
axs[i].set_xlabel('True vol')
plt.show()
pass
"""
Explanation: Which should show that implied_vols is very close to the volatilities used to generate the market prices. Here we provided initial starting positions, however, by default tff will chose an adaptive initialisation position as discussed below.
Black Scholes implied volatility convergence region
We now look at some charts which provide a basic illustration of the convergence region of the implemented root finding method.
The library provides an implied volatility root finding method. If not provided
with an initial starting point, a starting point will be found using the Radiocic-Polya approximation [1] to the implied volatility. This section illustrates both call styles and the comparitive advantage of using targeted initialisation.
In this example:
Forward prices are fixed at 1.
Strike prices are from uniform grid on (0, 5).
Expiries are fixed at 1.
Volatilities are from a uniform grid on (0, 5).
Fixed initial volatilities (where used) are 1.
Option prices were computed by tff.black_scholes.option_price on the other data.
Discount factors are 1.
[1] Dan Stefanica and Rados Radoicic. An explicit implied volatility formula. International Journal of Theoretical and Applied Finance. Vol. 20, no. 7, 2017.
End of explanation
"""
# Indices for selecting the middle of the grid.
vol_slice = np.arange(int(0.25*len(vol_vec)), int(0.75*len(vol_vec)))
strike_slice = np.arange(int(0.25*len(strike_vec)), int(0.75*len(strike_vec)))
error_fix = implied_vols_fix.numpy() - volatilities0
error_fix_sub = [error_fix[i, j] for i, j in zip(strike_slice, vol_slice)]
# Calculate the median absolute error in the central portion of the the grid
# for the fixed initialisation.
median_error_fix = np.median( np.abs(error_fix_sub) )
median_error_fix
error_polya = implied_vols_polya.numpy() - volatilities0
error_polya_sub = [error_polya[i, j] for i, j in zip(strike_slice, vol_slice)]
# Calculate the median absolute error in the central portion of the the grid
# for the Radiocic-Polya approximation.
median_error_polya = np.median( np.abs(error_polya_sub) )
median_error_polya
median_error_fix / median_error_polya
"""
Explanation: Where the grey values represent nans in the grid. Note that the bottom left corner of each image lies outside the bounds where inversion should be possible. The pattern of nan values for different values of a fixed initialisation strategy will be different (rerun the colab to see).
Black Scholes implied volatility initialisation strategy accuracy comparison
We can also consider the median absolute error for fixed versus Radiocic-Polya initialisation of the root finder. We consider a clipped grid looking at performance away from the boundaries where extreme values or nans might occur.
End of explanation
"""
|
jamesjia94/BIDMach | tutorials/MLscalePart1.ipynb | bsd-3-clause | import BIDMat.{CMat,CSMat,DMat,Dict,IDict,Image,FMat,FND,GDMat,GMat,GIMat,GSDMat,GSMat,HMat,IMat,Mat,SMat,SBMat,SDMat}
import BIDMat.MatFunctions._
import BIDMat.SciFunctions._
import BIDMat.Solvers._
import BIDMat.JPlotting._
import BIDMach.Learner
import BIDMach.models.{FM,GLM,KMeans,KMeansw,ICA,LDA,LDAgibbs,Model,NMF,RandomForest,SFA,SVD}
import BIDMach.datasources.{DataSource,MatSource,FileSource,SFileSource}
import BIDMach.mixins.{CosineSim,Perplexity,Top,L1Regularizer,L2Regularizer}
import BIDMach.updaters.{ADAGrad,Batch,BatchNorm,IncMult,IncNorm,Telescoping}
import BIDMach.causal.{IPTW}
Mat.checkMKL
Mat.checkCUDA
Mat.setInline
if (Mat.hasCUDA > 0) GPUmem
"""
Explanation: Machine Learning at Scale, Part I
KMeans clustering at scale
Training models with data that fits in memory is very limiting. But minibatch learners can easily work with data directly from disk.
We'll use the MNIST data set, which has 8 million images (about 17 GB). The dataset has been partition into groups of 100k images (using the unix split command) and saved in compressed lz4 files. This dataset is very large and doesnt get loaded by default by <code>getdata.sh</code>. You have to load it explicitly by calling <code>getmnist.sh</code> from the scripts directory. The script automatically splits the data into files that are small enough to be loaded into memory.
Let's load BIDMat/BIDMach
End of explanation
"""
val mdir = "../data/MNIST8M/parts/"
"""
Explanation: And define the root directory for this dataset.
End of explanation
"""
val (mm, opts) = KMeans.learner(mdir+"alls%02d.fmat.lz4")
"""
Explanation: Constrained Clustering.
For this tutorial, we are going to evaluate the quality of clustering by using it for classification. We use a labeled dataset, and compute clusters of training samples using k-Means. Then we match new test samples to the clusters and find the best match. The label assigned to the new sample is the majority vote of the cluster.
This method by itself doesnt work well. Clusters will often straddle label boundaries leading to poor labelings. Its better to force each cluster to have a single label. We do that by adding the labels in as very strong features before clustering. The label features cause samples with different labels to be very far apart. Far enough that k-Means will never assign them to the same cluster. The data we want looks like this:
<pre>
Instance 0 Instance 1 Instance 2 ...
has label "2" has label "7" has label "0" ...
/ 0 0 10000 ...
| 0 0 0 ...
| 10000 0 0 ...
| 0 0 0 ...
label / 0 0 0 ...
features \ 0 0 0 ...
(10) | 0 0 0 ...
| 0 10000 0 ...
| 0 0 0 ...
\ 0 0 0 ...
/ 128 19 5 ...
| 47 28 9 ...
image / 42 111 18 ...
features \ 37 128 17 ...
(784) | 18 176 14 ...
| .. .. ..
</pre>
We chose the label feature weights (here 10000) to force the distance between differently-labeled samples (2 * 10000^2) to be larger than the distance between two image samples (1000 * 256^2). This guarantees that points will not be assigned to a cluster containing a different label (assuming there is initially at least one cluster center with each label).
Even though these label features are present in cluster centroids after training, they dont affect matching at test time. Test images dont have the label features, and will match the closest cluster based only on image features. That cluster will have a unique label, which we then assign to the test point.
The files containind data in this form are named "alls00.fmat.lz4", "alls01.fmat.lz4" etc. Since they contain both data and labels, we dont need to load label files separately. We can create a learner using a pattern for accessing these files:
End of explanation
"""
opts.dim = 30000
opts.nend = 10
"""
Explanation: The string "%02d" is a C/Scala format string that expands into a two-digit ASCII number to help with the enumeration.
There are several new options that can tailor a files datasource, but we'll mostly use the defaults. One thing we will do is define the last file to use for training (number 70). This leaves us with some held-out files to use for testing.
End of explanation
"""
opts.batchSize = 20000
opts.npasses = 10
"""
Explanation: Note that the training data include image data and labels (0-9). K-Means is an unsupervised algorithm and if we used image data only KMeans will often build clusters containing different digit images. To produce cleaner clusters, and to facilitate classification later on, the <code>alls</code> data includes both labels in the first 10 rows, and image data in the remaining rows. The label features are scaled by a large constant factor. That means that images of different digits will be far apart in feature space. It effectively prevents different digits occuring in the same cluster.
Tuning Options
The following options are the important ones for tuning. For KMeans, batchSize has no effect on accracy since the algorithm uses all the data instances to perform an update. So you're free to tune it for best speed. Generally larger is better, as long as you dont use too much GPU ram.
npasses is the number of passes over the dataset. Larger is typically better, but the model may overfit at some point.
End of explanation
"""
mm.train
"""
Explanation: You invoke the learner the same way as before. You can change the options above after each run to optimize performance.
End of explanation
"""
val modelmat = FMat(mm.modelmat)
"""
Explanation: Now lets extract the model as a Floating-point matrix. We included the category features for clustering to make sure that each cluster is a subset of images for one digit.
End of explanation
"""
val nx = 30
val ny = 10
val im = zeros(28,28)
val allim = zeros(28*nx,28*ny)
for (i<-0 until nx) {
for (j<-0 until ny) {
val slice = modelmat(i+nx*j,10->794)
im(?) = slice(?)
allim((28*i)->(28*(i+1)), (28*j)->(28*(j+1))) = im
}
}
show(allim kron ones(2,2))
"""
Explanation: Next we build a 30 x 10 array of images to view the first 300 cluster centers as images.
End of explanation
"""
val igood = find(sum(modelmat,2) > 100) // find non-empty clusters
val mmat = modelmat(igood,?)
val (dmy, catmap) = maxi2(mmat(?,0->10).t) // Lookup the label for each cluster
mm.model.modelmats(0) = mmat(?,10->mmat.ncols) // Remove the label features
mm.model.modelmats(1) = mm.modelmats(1)(igood,0)
catmap(0->100)
"""
Explanation: We'll predict using the closest cluster (or 1-NN if you like). Since we did constrained clustering, our data include the labels for each instance, but unlabeled test data doesnt have this. So we project the model matrix down to remove its first 10 features. Before doing this though we find the strongest label for each cluster so later on we can map from cluster id to label.
End of explanation
"""
val (pp, popts) = KMeans.predictor(mm.model, mdir+"data%02d.fmat.lz4", mdir+"preds%02d.imat.lz4")
popts.nstart = 70 // start with file 70 as test data
popts.nend = 80 // finish at file 79
popts.ofcols = 100000 // Match number of samples per file to test file
popts.batchSize = 10000
"""
Explanation: Next we define a predictor from the just-computed model and the testdata, with the preds files to catch the predictions.
End of explanation
"""
pp.predict
"""
Explanation: Lets run the predictor
End of explanation
"""
val totals = (popts.nstart until popts.nend).map(i => {
val preds = loadIMat(mdir + "preds%02d.imat.lz4" format i); // predicted centroids
val cats = loadIMat(mdir + "cat%02d.imat.lz4" format i); // reference labels
val cpreds = catmap(preds); // map centroid to label
accum(cats.t \ cpreds.t, 1.0, 10, 10) // form a confusion matrix
}).reduce(_+_)
totals
"""
Explanation: The <code>preds</code> files now contains the numbers of the best-matching cluster centers. We still need to look up the category label for each one, and compare with the reference data. We'll do this one file at a time, so that our evaluation can scale to arbitrary problem sizes.
End of explanation
"""
val conf = float(totals / sum(totals))
"""
Explanation: From the actual and predicted categories, we can compute a confusion matrix:
End of explanation
"""
show((conf * 250f) ⊗ ones(32,32))
"""
Explanation: Now lets create an image by multiplying each confusion matrix cell by a white square:
End of explanation
"""
val dacc = getdiag(conf).t
"""
Explanation: Its useful to isolate the correct classification rate by digit, which is:
End of explanation
"""
mean(dacc)
"""
Explanation: We can take the mean of the diagonal accuracies to get an overall accuracy for this model.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/miroc/cmip6/models/miroc6/land.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'miroc', 'miroc6', 'land')
"""
Explanation: ES-DOC CMIP6 Model Properties - Land
MIP Era: CMIP6
Institute: MIROC
Source ID: MIROC6
Topic: Land
Sub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes.
Properties: 154 (96 required)
Model descriptions: Model description details
Initialized From: CMIP5:MIROC5
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-20 15:02:40
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Conservation Properties
3. Key Properties --> Timestepping Framework
4. Key Properties --> Software Properties
5. Grid
6. Grid --> Horizontal
7. Grid --> Vertical
8. Soil
9. Soil --> Soil Map
10. Soil --> Snow Free Albedo
11. Soil --> Hydrology
12. Soil --> Hydrology --> Freezing
13. Soil --> Hydrology --> Drainage
14. Soil --> Heat Treatment
15. Snow
16. Snow --> Snow Albedo
17. Vegetation
18. Energy Balance
19. Carbon Cycle
20. Carbon Cycle --> Vegetation
21. Carbon Cycle --> Vegetation --> Photosynthesis
22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
23. Carbon Cycle --> Vegetation --> Allocation
24. Carbon Cycle --> Vegetation --> Phenology
25. Carbon Cycle --> Vegetation --> Mortality
26. Carbon Cycle --> Litter
27. Carbon Cycle --> Soil
28. Carbon Cycle --> Permafrost Carbon
29. Nitrogen Cycle
30. River Routing
31. River Routing --> Oceanic Discharge
32. Lakes
33. Lakes --> Method
34. Lakes --> Wetlands
1. Key Properties
Land surface key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code (e.g. MOSES2.2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.3. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Land Atmosphere Flux Exchanges
Is Required: FALSE Type: ENUM Cardinality: 0.N
Fluxes exchanged with the atmopshere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Atmospheric Coupling Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
DOC.set_value("Other: ice")
DOC.set_value("bare soil")
DOC.set_value("lake")
DOC.set_value("vegetated")
"""
Explanation: 1.6. Land Cover
Is Required: TRUE Type: ENUM Cardinality: 1.N
Types of land cover defined in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Land Cover Change
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how land cover change is managed (e.g. the use of net or gross transitions)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Tiling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Conservation Properties
TODO
2.1. Energy
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how energy is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Water
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how water is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Carbon
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping Framework
TODO
3.1. Timestep Dependent On Atmosphere
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a time step dependent on the frequency of atmosphere coupling?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(1)
"""
Explanation: 3.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Overall timestep of land surface model (i.e. time between calls)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestepping Method
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of time stepping method and associated time step(s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of land surface code
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid
Land surface grid
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Horizontal
The horizontal grid in the land surface
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the horizontal grid (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the horizontal grid match the atmosphere?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Vertical
The vertical grid in the soil
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the vertical grid in the soil (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.2. Total Depth
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The total depth of the soil (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Soil
Land surface soil
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of soil in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Heat Water Coupling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the coupling between heat and water in the soil
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 8.3. Number Of Soil layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the soil scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Soil --> Soil Map
Key properties of the land surface soil map
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of soil map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("ISLSCP Initiative I (FAO, GISS, U. Arizona, NASA/GSFC)")
"""
Explanation: 9.2. Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil structure map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("ISLSCP Initiative I (FAO, GISS, U. Arizona, NASA/GSFC)")
"""
Explanation: 9.3. Texture
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil texture map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.4. Organic Matter
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil organic matter map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("ISLSCP Initiative I (ERBE)")
"""
Explanation: 9.5. Albedo
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil albedo map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("N/A")
"""
Explanation: 9.6. Water Table
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil water table map, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.7. Continuously Varying Soil Depth
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the soil properties vary continuously with depth?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.8. Soil Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil depth map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10. Soil --> Snow Free Albedo
TODO
10.1. Prognostic
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow free albedo prognostic?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, describe the dependancies on snow free albedo calculations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Direct Diffuse
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe the distinction between direct and diffuse albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.4. Number Of Wavelength Bands
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If prognostic, enter the number of wavelength bands used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Soil --> Hydrology
Key properties of the land surface soil hydrology
11.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the soil hydrological model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river soil hydrology in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil hydrology tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(6)
"""
Explanation: 11.5. Number Of Ground Water Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers that may contain water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.6. Lateral Connectivity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe the lateral connectivity between tiles
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
DOC.set_value("Explicit diffusion")
"""
Explanation: 11.7. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
The hydrological dynamics scheme in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(6)
"""
Explanation: 12. Soil --> Hydrology --> Freezing
TODO
12.1. Number Of Ground Ice Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
How many soil layers may contain ground ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("Thermo dynamics")
"""
Explanation: 12.2. Ice Storage Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of ice storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Permafrost
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of permafrost, if any, within the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Soil --> Hydrology --> Drainage
TODO
13.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General describe how drainage is included in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
Different types of runoff represented by the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Soil --> Heat Treatment
TODO
14.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of how heat treatment properties are defined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of soil heat scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil heat treatment tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
DOC.set_value("Explicit diffusion")
"""
Explanation: 14.5. Heat Storage
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the method of heat storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
DOC.set_value("soil moisture freeze-thaw")
"""
Explanation: 14.6. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe processes included in the treatment of soil heat
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Snow
Land surface snow
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of snow in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(3)
"""
Explanation: 15.3. Number Of Snow Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of snow levels used in the land surface scheme/model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
DOC.set_value("constant")
"""
Explanation: 15.4. Density
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow density
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
DOC.set_value("prognostic")
"""
Explanation: 15.5. Water Equivalent
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the snow water equivalent
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
DOC.set_value("diagnostic")
"""
Explanation: 15.6. Heat Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the heat content of snow
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
DOC.set_value("prognostic")
"""
Explanation: 15.7. Temperature
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow temperature
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.8. Liquid Water Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow liquid water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
DOC.set_value("ground snow fraction")
DOC.set_value("vegetation snow fraction")
"""
Explanation: 15.9. Snow Cover Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify cover fractions used in the surface snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
DOC.set_value("Other: snow refreezing")
DOC.set_value("snow interception")
DOC.set_value("snow melting")
"""
Explanation: 15.10. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Snow related processes in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.11. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
DOC.set_value("prognostic")
"""
Explanation: 16. Snow --> Snow Albedo
TODO
16.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of snow-covered land albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
DOC.set_value("aerosol deposition")
DOC.set_value("snow age")
"""
Explanation: 16.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
*If prognostic, *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Vegetation
Land surface vegetation
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vegetation in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of vegetation scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.3. Dynamic Vegetation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there dynamic evolution of vegetation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.4. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vegetation tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
DOC.set_value("vegetation types")
"""
Explanation: 17.5. Vegetation Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Vegetation classification used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
DOC.set_value("C3 grass")
DOC.set_value("C4 grass")
DOC.set_value("broadleaf tree")
DOC.set_value("needleleaf tree")
DOC.set_value("vegetated")
"""
Explanation: 17.6. Vegetation Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of vegetation types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.7. Biome Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of biome types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
DOC.set_value("prescribed (varying from files)")
"""
Explanation: 17.8. Vegetation Time Variation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How the vegetation fractions in each tile are varying with time
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.9. Vegetation Map
Is Required: FALSE Type: STRING Cardinality: 0.1
If vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
"""
Explanation: 17.10. Interception
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is vegetation interception of rainwater represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.11. Phenology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.12. Phenology Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
DOC.set_value("prescribed")
"""
Explanation: 17.13. Leaf Area Index
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.14. Leaf Area Index Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.15. Biomass
Is Required: TRUE Type: ENUM Cardinality: 1.1
*Treatment of vegetation biomass *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.16. Biomass Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biomass
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.17. Biogeography
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.18. Biogeography Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
DOC.set_value("CO2")
DOC.set_value("light")
DOC.set_value("temperature")
DOC.set_value("water availability")
"""
Explanation: 17.19. Stomatal Resistance
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify what the vegetation stomatal resistance depends on
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.20. Stomatal Resistance Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation stomatal resistance
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.21. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the vegetation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18. Energy Balance
Land surface energy balance
18.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of energy balance in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the energy balance tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(2)
"""
Explanation: 18.3. Number Of Surface Temperatures
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.4. Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify the formulation method for land surface evaporation, from soil and vegetation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
DOC.set_value("transpiration")
"""
Explanation: 18.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe which processes are included in the energy balance scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19. Carbon Cycle
Land surface carbon cycle
19.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of carbon cycle in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the carbon cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of carbon cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.4. Anthropogenic Carbon
Is Required: FALSE Type: ENUM Cardinality: 0.N
Describe the treament of the anthropogenic carbon pool
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.5. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the carbon scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 20. Carbon Cycle --> Vegetation
TODO
20.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.3. Forest Stand Dynamics
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of forest stand dyanmics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Carbon Cycle --> Vegetation --> Photosynthesis
TODO
21.1. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
TODO
22.1. Maintainance Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for maintainence respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Growth Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for growth respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23. Carbon Cycle --> Vegetation --> Allocation
TODO
23.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the allocation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Allocation Bins
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify distinct carbon bins used in allocation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Allocation Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how the fractions of allocation are calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24. Carbon Cycle --> Vegetation --> Phenology
TODO
24.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the phenology scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25. Carbon Cycle --> Vegetation --> Mortality
TODO
25.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the mortality scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26. Carbon Cycle --> Litter
TODO
26.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27. Carbon Cycle --> Soil
TODO
27.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28. Carbon Cycle --> Permafrost Carbon
TODO
28.1. Is Permafrost Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is permafrost included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.2. Emitted Greenhouse Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
List the GHGs emitted
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.4. Impact On Soil Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the impact of permafrost on soil properties
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Nitrogen Cycle
Land surface nitrogen cycle
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the nitrogen cycle in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the notrogen cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 29.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of nitrogen cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the nitrogen scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30. River Routing
Land surface river routing
30.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of river routing in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the river routing, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river routing scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Grid Inherited From Land Surface
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the grid inherited from land surface?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.5. Grid Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of grid, if not inherited from land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(2)
"""
Explanation: 30.6. Number Of Reservoirs
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of reservoirs
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.7. Water Re Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
TODO
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
"""
Explanation: 30.8. Coupled To Atmosphere
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Is river routing coupled to the atmosphere model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.9. Coupled To Land
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the coupling between land and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.10. Quantities Exchanged With Atmosphere
Is Required: FALSE Type: ENUM Cardinality: 0.N
If couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
DOC.set_value("present day")
"""
Explanation: 30.11. Basin Flow Direction Map
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of basin flow direction map is being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.12. Flooding
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the representation of flooding, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.13. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the river routing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
DOC.set_value("direct (large rivers)")
"""
Explanation: 31. River Routing --> Oceanic Discharge
TODO
31.1. Discharge Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify how rivers are discharged to the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
DOC.set_value("water")
"""
Explanation: 31.2. Quantities Transported
Is Required: TRUE Type: ENUM Cardinality: 1.N
Quantities that are exchanged from river-routing to the ocean model component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Lakes
Land surface lakes
32.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lakes in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
"""
Explanation: 32.2. Coupling With Rivers
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are lakes coupled to the river routing model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 32.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of lake scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
DOC.set_value("water")
"""
Explanation: 32.4. Quantities Exchanged With Rivers
Is Required: FALSE Type: ENUM Cardinality: 0.N
If coupling with rivers, which quantities are exchanged between the lakes and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.5. Vertical Grid
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vertical grid of lakes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the lake scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
"""
Explanation: 33. Lakes --> Method
TODO
33.1. Ice Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is lake ice included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
DOC.set_value("diagnostic")
"""
Explanation: 33.2. Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of lake albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.3. Dynamics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which dynamics of lakes are treated? horizontal, vertical, etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
"""
Explanation: 33.4. Dynamic Lake Extent
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a dynamic lake extent scheme included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
"""
Explanation: 33.5. Endorheic Basins
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Basins not flowing to ocean included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Lakes --> Wetlands
TODO
34.1. Description
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of wetlands, if any
End of explanation
"""
|
romeokienzler/uhack | projects/bosch/ETLPython.ipynb | apache-2.0 |
import ibmos2spark
# @hidden_cell
credentials = {
'auth_url': 'https://identity.open.softlayer.com',
'project_id': '6aaf54352357483486ee2d4981f8ef15',
'region': 'dallas',
'user_id': 'b160340071b3407ca50c6b9a46b0bb25',
'username': 'member_b092a5c6f5c11f819059a83dfbd5d922b8a2299b',
'password': 'qwN4Y5EM*0KuZck['
}
configuration_name = 'os_d3bd5b94a9334de59a55a7fed2bedeaa_configs'
bmos = ibmos2spark.bluemix(sc, credentials, configuration_name)
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
# Please read the documentation of PySpark to learn more about the possibilities to load data files.
# PySpark documentation: https://spark.apache.org/docs/2.0.1/api/python/pyspark.sql.html#pyspark.sql.SparkSession
# The SparkSession object is already initalized for you.
# The following variable contains the path to your file on your Object Storage.
path_1 = bmos.url('dwlive', '4bertholdxor.py')
"""
Explanation: Romeo Kienzler's
Winning a Kaggle competition with Apache Spark and SparkML Machine Learning Pipelines
Initialize Credentials and Path To Object Store
End of explanation
"""
#please create your own cookiescookies_kaggle = """
www.kaggle.com FALSE / TRUE 1535524510 ai_user 6d0UU|2017-08-29T06:30.942Z
.kaggle.com TRUE / FALSE 1506326405 __utmt 1
.kaggle.com TRUE / FALSE 2137425820 intercom-id-koj6gxx6 d136e488-e012b2-917c-18e96cd11b53
.kaggle.com TRUE / FALSE 1569397856 __utma 158690720.1103090056.1503510.1506325806.1506325806.1
.kaggle.com TRUE / FALSE 1506327656 __utmb 158690720.3.9.1506356073
.kaggle.com TRUE / FALSE 1522093856 __utmz 158690720.1506325.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)
.kaggle.com TRUE / TRUE 1569397858 .ASPXAUTH C03B6AF3A11A5A7B1643B1A3045BEA90749D3BDD9E7975FE2B5D993E3807E8A7EBA8014ADDEB41B8097DFE9573858D6F38B191B03049421AC5DCF41136A5520326698DF20838BBA9A357B65934C4FDD63D73D
.kaggle.com TRUE / FALSE 1569397860 _ga GA1.2.110309005503988510
.kaggle.com TRUE / FALSE 1506412260 _gid GA1.2.171929611506325781
www.kaggle.com FALSE / TRUE 1506327661 ai_session 1TUXc|15325787951|1506325861052.695
.kaggle.com TRUE / FALSE 1506930662 intercom-session-koj6gxx6 WG9FRmZRSEVUQjhPdHpTR0xIay9peVBYbzhvazlKNGJ4QzVzMztTnJiVVNiZzArNVRXZHVrcGJ3T0dXYy0tZ2x0S3BRUm53RW4vc3VVMGQ5WlFDUT09--c72537f5db077850756ed76403e3c4e6cf4bb981
"""
with open("cookies_kaggle.txt", "w") as text_file:
text_file.write(cookies_kaggle)
! cat cookies_kaggle.txt
"""
Explanation: Setup Cookies by capturing from your browser so we can download/wget files directly from kaggle
End of explanation
"""
! wget --load-cookies cookies.txt https://www.kaggle.com/c/bosch-production-line-performance/download/train_numeric.csv.zip
!unzip train_numeric.csv.zip
"""
Explanation: Process train_numeric.csv.zip: get, unzip, read using DataFrames and save as parquet file for use later
End of explanation
"""
!ls -lahr train_numeric.csv
df_numeric = spark.read.option("header",True).option("inferSchema", True).csv("train_numeric.csv")
df_numeric = df_numeric.repartition(1)
df_numeric.write.parquet(bmos.url('dwlive', 'train_numeric.parquet'))
"""
Explanation: !ls -lahr train_numeric.csv
End of explanation
"""
! wget --load-cookies cookies.txt https://www.kaggle.com/c/bosch-production-line-performance/download/train_categorical.csv.zip
!unzip train_categorical.csv.zip
df_categorical = spark.read.option("header",True).option("inferSchema", True).csv("train_categorical.csv")
df_categorical = df_categorical.repartition(1)
df_categorical.write.parquet(bmos.url('BoschKaggleCompetition', 'train_categorical.parquet'))
"""
Explanation: Process train_categorical.csv.zip: get, unzip, read using DataFrames and save as parquet file for use later
End of explanation
"""
# Please read the documentation of PySpark to learn more about the possibilities to load data files.
# PySpark documentation: https://spark.apache.org/docs/2.0.1/api/python/pyspark.sql.html#pyspark.sql.SparkSession
# The SparkSession object is already initalized for you.
# The following variable contains the path to your file on your Object Storage.
path_2 = bmos.url('dwlive', 'part-00000-abf604d6-bc0a-4ea3-8ffa-7838c5912fa2.snappy.parquet')
df_categorical = spark.read.parquet(path_2)
df_categorical.show()
df_categorical.printSchema()
!df -H
"""
Explanation: Test loading parquet File
Note: Replace URL path with your own
End of explanation
"""
|
QuantumDamage/AQIP | workspace/03-api.ipynb | apache-2.0 | %matplotlib inline
import requests
from pandas.io.json import json_normalize
import pandas as pd
"""
Explanation: Official documentation:
http://powietrze.gios.gov.pl/pjp/content/api#
End of explanation
"""
r = requests.get('http://api.gios.gov.pl/pjp-api/rest/station/findAll')
allStations = json_normalize(r.json())
print(allStations[allStations["city.name"] == u"Gdańsk"])
"""
Explanation: Getting all stations:
End of explanation
"""
stationId = 733
r = requests.get('http://api.gios.gov.pl/pjp-api/rest/station/sensors/' + str(stationId))
sensors = json_normalize(r.json())
print(sensors)
"""
Explanation: Lets see what we have in "AM5 Gdańsk Szadółki" which has id: 733
End of explanation
"""
sensorId = 4727
r = requests.get('http://api.gios.gov.pl/pjp-api/rest/data/getData/' + str(sensorId))
concentration = json_normalize(r.json())
concentrationFrame = pd.DataFrame()
concentrationFrame["dates"] = [d[u'date'] for d in concentration["values"].values.item()]
concentrationFrame["values"] = [d[u'value'] for d in concentration["values"].values.item()]
concentrationFrame.set_index(["dates"], inplace=True)
#concentrationFrame.sort_index(inplace=True)
# We cannot sort index, because it is not unique. There is 12 hours notation used, but without AM/PM distinction ;(
# But we can just reverse it until API will be fixed
concentrationFrame = concentrationFrame.iloc[::-1]
print(concentrationFrame)
concentrationFrame.plot(figsize=(15,5), grid=True)
"""
Explanation: Lets now see data about PM10 concentration - sensorId = 4727
End of explanation
"""
r = requests.get('http://api.gios.gov.pl/pjp-api/rest/aqindex/getIndex/' + str(stationId))
r.json()
"""
Explanation: And overall air quality index for the same station
End of explanation
"""
|
LucaCanali/Miscellaneous | Spark_Physics/ATLAS_Higgs_opendata/H_ZZ_4l_analysis_basic_experiment_data.ipynb | apache-2.0 | # Run this if you need to install Apache Spark (PySpark)
# !pip install pyspark
# Install sparkhistogram
# Note: if you cannot install the package, create the computeHistogram
# function as detailed at the end of this notebook.
!pip install sparkhistogram
# Run this to download the dataset
# It is a small file (200 KB), this exercise is meant mostly to show the Spark API
# See further details at https://github.com/LucaCanali/Miscellaneous/tree/master/Spark_Physics
!wget https://sparkdltrigger.web.cern.ch/sparkdltrigger/ATLAS_Higgs_opendata/Data_4lep.parquet
# Start the Spark Session
# This uses local mode for simplicity
# the use of findspark is optional
# import findspark
# findspark.init("/home/luca/Spark/spark-3.3.0-bin-hadoop3")
from pyspark.sql import SparkSession
spark = (SparkSession.builder
.appName("H_ZZ_4Lep")
.master("local[*]")
.getOrCreate()
)
# Read data with the candidate events
df_events = spark.read.parquet("Data_4lep.parquet")
df_events.printSchema()
# Count the number of events before cuts (filter)
print(f"Number of events: {df_events.count()}")
"""
Explanation: Higgs Boson Analysis with ATLAS Open Data
This is an example analysis of the Higgs boson detection via the decay channel H → ZZ* → 4l
From the decay products measured at the ATLAS experiment and provided as open data, you will be able to produce a histogram, and from there you can infer the invariant mass of the Higgs boson.
Code: it is based on the original work at ATLAS outreach notebooks
Data: from the 13TeV ATLAS opendata
Physics: See ATLAS paper on the discovery of the Higgs boson (mostly Section 4 and 4.1)
See also: https://github.com/LucaCanali/Miscellaneous/tree/master/Spark_Physics
Author and contact: Luca.Canali@cern.ch
March, 2022
H → ZZ* → 4l analsys
End of explanation
"""
# Apply filters to the input data
# only events with 4 leptons in the input data
# cut on lepton charge
# paper: "selecting two pairs of isolated leptons, each of which is comprised of two leptons with the same flavour and opposite charge"
df_events = df_events.filter("lep_charge[0] + lep_charge[1] + lep_charge[2] + lep_charge[3] == 0")
# cut on lepton type
# paper: "selecting two pairs of isolated leptons, each of which is comprised of two leptons with the same flavour and opposite charge"
df_events = df_events.filter("lep_type[0] + lep_type[1] + lep_type[2] + lep_type[3] in (44, 48, 52)")
print(f"Number of events after applying cuts: {df_events.count()}")
"""
Explanation: Apply basic cuts
More details on the cuts (filters applied to the event data) in the reference ATLAS paper on the discovery of the Higgs boson (mostly Section 4 and 4.1)
End of explanation
"""
# This computes the 4-vectors sum for the 4-lepton system
df_4lep = df_events.selectExpr(
"lep_pt[0] * cos(lep_phi[0]) + lep_pt[1] * cos(lep_phi[1]) + lep_pt[2] * cos(lep_phi[2]) + lep_pt[3] * cos(lep_phi[3]) as Px",
"lep_pt[0] * sin(lep_phi[0]) + lep_pt[1] * sin(lep_phi[1]) + lep_pt[2] * sin(lep_phi[2]) + lep_pt[3] * sin(lep_phi[3]) as Py",
"lep_pt[0] * sinh(lep_eta[0]) + lep_pt[1] * sinh(lep_eta[1]) + lep_pt[2] * sinh(lep_eta[2]) + lep_pt[3] * sinh(lep_eta[3]) as Pz",
"lep_E[0] + lep_E[1] + lep_E[2] + lep_E[3] as E"
)
df_4lep.show(5)
df_4lep_invmass = df_4lep.selectExpr("sqrt(E * E - ( Px * Px + Py * Py + Pz * Pz))/1e3 as invmass_GeV")
df_4lep_invmass.show(5)
# This defines the DataFrame transformation to compute the histogram of invariant mass
# The result is a histogram with (energy) bin values and event counts foreach bin
# Requires sparkhistogram
# See https://github.com/LucaCanali/Miscellaneous/blob/master/Spark_Notes/Spark_DataFrame_Histograms.md
from sparkhistogram import computeHistogram
# histogram parameters
min_val = 80
max_val = 250
num_bins = (max_val - min_val) / 5.0
# use the helper function computeHistogram in the package sparkhistogram
histogram_data = computeHistogram(df_4lep_invmass, "invmass_GeV", min_val, max_val, num_bins)
# The action toPandas() here triggers the computation.
# Histogram data is fetched into the driver as a Pandas Dataframe.
%time histogram_data_pandas=histogram_data.toPandas()
import numpy as np
# Computes statistical error on the data (histogram)
histogram_data_stat_errors = np.sqrt(histogram_data_pandas)
# This plots the data histogram with error bars
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
plt.rcParams.update({'font.size': 20, 'figure.figsize': [14,10]})
f, ax = plt.subplots()
x = histogram_data_pandas["value"]
y = histogram_data_pandas["count"]
err = histogram_data_stat_errors["count"]
# scatter plot
ax.plot(x, y, marker='o', color='red', linewidth=0)
#ax.errorbar(x, y, err, fmt = 'ro')
# histogram with error bars
ax.bar(x, y, width = 5.0, yerr = err, capsize = 5, linewidth = 0.2, ecolor='blue', fill=False)
ax.set_xlim(min_val-2, max_val)
ax.set_xlabel('$m_{4lep}$ (GeV)')
ax.set_ylabel('Number of Events / bucket_size = 5 GeV')
ax.set_title("Distribution of the 4-Lepton Invariant Mass")
# Label for the Z ang Higgs spectrum peaks
txt_opts = {'horizontalalignment': 'left',
'verticalalignment': 'center',
'transform': ax.transAxes}
plt.text(0.10, 0.86, "Z boson, mass = 91 GeV", **txt_opts)
plt.text(0.27, 0.55, "Higgs boson, mass = 125 GeV", **txt_opts)
# Add energy and luminosity
plt.text(0.60, 0.92, "ATLAS open data, for education", **txt_opts)
plt.text(0.60, 0.87, '$\sqrt{s}$=13 TeV,$\int$L dt = 10 fb$^{-1}$', **txt_opts)
plt.show()
spark.stop()
"""
Explanation: Compute the invariant mass
This computes the 4-vectors sum for the 4-lepton system
using formulas from special relativity.
See also http://edu.itp.phys.ethz.ch/hs10/ppp1/2010_11_02.pdf
and https://en.wikipedia.org/wiki/Invariant_mass
End of explanation
"""
def computeHistogram(df: "DataFrame", value_col: str, min: float, max: float, bins: int) -> "DataFrame":
""" This is a dataframe function to compute the count/frequecy histogram of a column
Parameters
----------
df: the dataframe with the data to compute
value_col: column name on which to compute the histogram
min: minimum value in the histogram
max: maximum value in the histogram
bins: number of histogram buckets to compute
Output DataFrame
----------------
bucket: the bucket number, range from 1 to bins (included)
value: midpoint value of the given bucket
count: number of values in the bucket
"""
step = (max - min) / bins
# this will be used to fill in for missing buckets, i.e. buckets with no corresponding values
df_buckets = spark.sql(f"select id+1 as bucket from range({bins})")
histdf = (df
.selectExpr(f"width_bucket({value_col}, {min}, {max}, {bins}) as bucket")
.groupBy("bucket")
.count()
.join(df_buckets, "bucket", "right_outer") # add missing buckets and remove buckets out of range
.selectExpr("bucket", f"{min} + (bucket - 1/2) * {step} as value", # use center value of the buckets
"nvl(count, 0) as count") # buckets with no values will have a count of 0
.orderBy("bucket")
)
return histdf
"""
Explanation: Note on sparkhistogram
Use this to define the computeHistogram function if you cannot pip install sparkhistogram
End of explanation
"""
|
buckleylab/Buckley_Lab_SIP_project_protocols | sequence_analysis_walkthrough/PIPITS_Fungal_ITS_Pipeline.ipynb | mit | import os
# Provide the directory for your index and read files
ITS = '/home/roli/FORESTs_BHAVYA/WoodsLake/raw_seq/ITS/'
# Provide
datasets = [['ITS',ITS,'ITS.metadata.pipits.Woods.tsv']]
# Ensure your reads files are named accordingly (or modify to suit your needs)
readFile1 = 'read1.fq.gz'
readFile2 = 'read2.fq.gz'
indexFile1 = 'index_read1.fq.gz'
indexFile2 = 'index_read2.fq.gz'
# Example of metadata file
#Index1 Index2 Name
#AATTCAA CATCCGG RG1
#CGCGCAG TCATGGT RG2
#AAGGTCT AGAACCG RG3
#ACTGGAC TGGAATA RG4
## Again, for our pipeline Index1 typically is the reverse complement of the reverse barcode, while Index2 is the forward barcode.
"""
Explanation: PIPITS Fungal ITS-dedicated Pipeline
The default pair merge algorithm in vsearch discards 90% of the data. This was observed in other datasets and is believe to be overly conservative. PIPITs offers support for using Pear is a dedicated alternative
Dependencies
|| PIPITS ||
Follow instructions provided at:
https://github.com/hsgweon/pipits
Note: all dependencies which require 'sudo' will already be met (i.e. don't bother running those commands... they won't work anyways)
|| deML ||
Follow instructions provided at:
https://github.com/grenaud/deML
|| phyloseq ||
conda install -c r-igraph
Rscript -e "source('http://bioconductor.org/biocLite.R');biocLite('phyloseq')"
|| FUNGuild ||
download FUNGUild script:
https://raw.githubusercontent.com/UMNFuN/FUNGuild/master/Guilds_v1.1.py
|| PEAR ||
download at: https://sco.h-its.org/exelixis/web/software/pear/
Citations
Gweon, H. S., Oliver, A., Taylor, J., Booth, T., Gibbs, M., Read, D. S., et al. (2015). PIPITS: an automated pipeline for analyses of fungal internal transcribed spacer sequences from the Illumina sequencing platform. Methods in ecology and evolution, 6(8), 973-980.
Renaud, G., Stenzel, U., Maricic, T., Wiebe, V., & Kelso, J. (2014). deML: robust demultiplexing of Illumina sequences using a likelihood-based approach. Bioinformatics, 31(5), 770-772.
McMurdie and Holmes (2013) phyloseq: An R Package for Reproducible Interactive Analysis and Graphics of Microbiome Census Data. PLoS ONE. 8(4):e61217
Nguyen NH, Song Z, Bates ST, Branco S, Tedersoo L, Menke J, Schilling JS, Kennedy PG. 2016. FUNGuild: An open annotation tool for parsing fungal community datasets by ecological guild. Fungal Ecology 20:241–248.
Zhang J, Kobert K, Flouri T, Stamatakis A. 2013. PEAR: a fast and accurate Illumina Paired-End reAd mergeR. Bioinformatics, 30(5): 614-620.
Last Modified by R. Wilhelm on January 2nd, 2018
Step 1: User Input
End of explanation
"""
# Ignore all the 'conflict' errors. The reads are paired so the conflicts are bogus (i.e. it gives a warning everytime an barcode appears in multiple samples, but no pairs are duplicated)
for dataset in datasets:
name = dataset[0]
directory = dataset[1]
metadata = directory+dataset[2]
index1 = directory+indexFile1
index2 = directory+indexFile2
read1 = directory+readFile1
read2 = directory+readFile2
# Make output directory
%mkdir $directory/pipits_input/
# Run deML ## Note: you may get error involving 'ulimit'. If so, exit your notebook. Enter 'ulimit -n 9999' at the command line, then restart a new notebook.
!deML -i $metadata -f $read1 -r $read2 -if1 $index1 -if2 $index2 -o $directory/pipits_input/$name
# Remove unnecessary 'failed' reads and index files
%rm $directory/pipits_input/*.fail.* $directory/pipits_input/unknown*
"""
Explanation: Step 2: Demultiplex Raw Reads
End of explanation
"""
import glob, re
for dataset in datasets:
name = dataset[0]
directory = dataset[1]
# Remove Previously Prepended Name (PIPITS wanted something)
for file in glob.glob(directory+"pipits_input/"+name+"_*"):
new_name = re.sub(name+"_","",file)
os.rename(file, new_name)
# Rename files with with extension .fq (PIPITS is PICKY)
for file in glob.glob(directory+"pipits_input/*.fq.gz"):
new_name = re.sub(".fq.gz",".fastq.gz",file)
os.rename(file, new_name)
# Remove Unbinned Reads
%rm $directory/pipits_input/unknown*
# Run PIPITS List Prep
input_dir = directory+"pipits_input/"
output_dir = directory+name+".readpairslist.txt"
!pipits_getreadpairslist -i $input_dir -o $output_dir -f
"""
Explanation: Step 3: Make Sample Mapping File (aka. 'readpairlist')
End of explanation
"""
for dataset in datasets:
name = dataset[0]
directory = dataset[1]
input_dir = directory+"pipits_input/"
output_dir = directory+"pipits_prep/"
readpairfile = directory+name+".readpairslist.txt"
!pipits_prep -i $input_dir -o $output_dir -l $readpairfile
"""
Explanation: Step 4: Pre-process Data with PIPITS (merge and QC)
End of explanation
"""
ITS_Region = "ITS1"
for dataset in datasets:
name = dataset[0]
directory = dataset[1]
input_file = directory+"pipits_prep/prepped.fasta"
output_dir = directory+"pipits_funits/"
!pipits_funits -i $input_file -o $output_dir -x $ITS_Region
"""
Explanation: Step 4: Extract Variable Region (User Input Required)
End of explanation
"""
for dataset in datasets:
name = dataset[0]
directory = dataset[1]
input_file = directory+"pipits_funits/ITS.fasta"
output_dir = directory+"PIPITS_final/"
!pipits_process -i $input_file -o $output_dir --Xmx 20G
"""
Explanation: Step 5: Cluster and Assign Taxonomy
End of explanation
"""
for dataset in datasets:
name = dataset[0]
directory = dataset[1]
# Prepare PIPITS output for FUNGuild
!pipits_funguild.py -i $directory/PIPITS_final/otu_table.txt -o $directory/PIPITS_final/otu_table_funguild.txt
# Run FUNGuild
!python /home/db/FUNGuild/Guilds_v1.1.py -otu $directory/PIPITS_final/otu_table_funguild.txt -db fungi -m -u
"""
Explanation: Step 6: Push OTU Table through FUNGuild
End of explanation
"""
## Setup R-Magic for Jupyter Notebooks
import rpy2
import pandas as pd
%load_ext rpy2.ipython
%R library(phyloseq)
for dataset in datasets:
name = dataset[0]
directory = dataset[1]
metadata = dataset[2]
# Input Biom
biom = directory+"/PIPITS_final/otu_table.biom"
%R -i biom
%R x <- import_biom(biom)
# Fix taxonomy table
%R colnames(tax_table(x)) <- c("Domain","Phylum","Class","Order","Family","Genus","Species")
%R tax_table(x) = gsub("k__| p__| c__| o__| f__| g__| s__","",tax_table(x))
# Merge Mapping into Phyloseq
sample_file = pd.read_table(directory+metadata, keep_default_na=False)
%R -i sample_file
%R rownames(sample_file) <- sample_file$X.SampleID
%R sample_file$X.SampleID <- NULL
%R sample_file <- sample_data(sample_file)
%R p <- merge_phyloseq(x, sample_file)
# Save Phyloseq Object as '.rds'
output = directory+"/PIPITS_final/p_"+name+".pipits.final.rds"
%%R -i output
%%R saveRDS(p, file = output)
# Confirm Output
%R print(p)
"""
Explanation: Step 7: Import into R
End of explanation
"""
for dataset in datasets:
name = dataset[0]
directory = dataset[1]
%rm -r $directory/pipits_prep/
%rm -r $directory/pipits_funits/
%rm -r $directory/pipits_input/
del_me = directory+name+".readpairslist.txt"
%rm $del_me
"""
Explanation: Step 7: Clean-up Intermediate Files and Final Outputs
End of explanation
"""
|
cleuton/datascience | datavisualization/data_visualization_python_2_english.ipynb | apache-2.0 | import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D # Objects for 3D charts
%matplotlib inline
df = pd.read_csv('../datasets/evasao.csv') # School dropout data I collected
df.head()
"""
Explanation: Data visualization with Python
2 - Data with more than 2 dimensions
Cleuton Sampaio, DataLearningHub
In this lesson, we'll look at how to provide visualizations with more than two dimensions of data.
Three-dimensional dispersion
In cases where we have three measurable and, mainly, plotable characteristics (within the same scale - or we can adjust the scale), it is interesting to see a scatter plot so that we can visually assess the distribution of the samples. This is what we will see with the Matplotlib Toolkits library, especially MPlot3D, which has the Axes3D object for generating three-dimensional graphics.
End of explanation
"""
df2 = df[['periodo','repetiu','desempenho']][df.abandonou == 1]
df2.head()
fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
ax = Axes3D(fig) # Para Matplotlib 0.99
ax.scatter(xs=df2['periodo'],ys=df2['repetiu'],zs=df2['desempenho'], c='r',s=8)
ax.set_xlabel('periodo')
ax.set_ylabel('repetiu')
ax.set_zlabel('desempenho')
plt.show()
"""
Explanation: Some explanations. To start, let's look at the columns in this dataset:
- "periodo": Period the student is in;
- "bolsa": Percentage of scholarship that the student receives;
- "repetiu": Number of subjects in which the student failed;
- "ematraso": If the student is in arrears;
- "disciplinas": Disciplines that the student is currently taking;
- "desempenho": Academic average so far;
- "abandonou": Whether the student abandoned the course after the measurement or not.
In order to plot a chart, we need to reduce the number of dimensions, that is, the characteristics. I will do this in the most "naive" way possible, selecting three characteristics that most influenced the final result, that is, the student's abandonment (Churn).
End of explanation
"""
import numpy as np
np.random.seed(42)
X = np.linspace(1.5,3.0,num=100)
Y = np.array([x**4 + (np.random.rand()*6.5) for x in X])
Z = np.array([(X[i]*Y[i]) + (np.random.rand()*3.2) for i in range(0,100)])
"""
Explanation: I simply used Axes3D to obtain a three-dimensional chart object. The "scatter" method takes on three dimensions (xs, ys and zs), each assigned to one of the columns of the new dataframe. The "c" parameter is the color and the "s" is the size of each point. I informed the labels of each axis and that's it! We have a 3D graph showing the spatial distribution of dropouts, with respect to the three variables.
We can assess the data trend much better if we look at 3D visualizations. Let's look at a synthetic example. Let's generate some 3D values:
End of explanation
"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X, Y, c='b', s=20)
ax.set_xlabel('X')
ax.set_ylabel('Y')
plt.show()
"""
Explanation: First we'll see how this would look in 2D view:
End of explanation
"""
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(X, Y, Z, c='r',s=8)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
"""
Explanation: Ok ... Nothing much ... A positive non-linear correlation, right? But now, let's see this with the Z matrix included:
End of explanation
"""
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
features = pd.DataFrame({'X':X, 'Z':Z})
labels = pd.DataFrame({'Y':Y})
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.33, random_state=42)
dtr3d = DecisionTreeRegressor(max_depth=4, random_state=42)
dtr3d.fit(X_train,y_train)
print('R2',dtr3d.score(X_train,y_train))
yhat3d = dtr3d.predict(X_test)
fig = plt.figure()
ax = ax = fig.add_subplot(111, projection='3d')
ax.scatter(X, Y, Z, c='r',s=8)
ax.scatter(X_test['X'], yhat3d, X_test['Z'], c='k', marker='*',s=100)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
"""
Explanation: And it gets more interesting when we overlay a prediction on the actual data. Let's use a Decision Tree Regressor to create a predictive model for this data:
End of explanation
"""
print(df.groupby("bolsa").count())
"""
Explanation: We plot the predictions using a star-type marker. It was very interesting, right?
More than 3 dimensions
Sometimes we want to demonstrate information with more than 3 dimensions, but how do we do that? Let's assume that we also want to include the scholarship percentage as a variable in our example of school dropout. How would we do it?
One possible approach would be to manipulate the markers to represent the exchange. We can use colors, for example. Let's see, first, we need to know which stock ranges exist in the dataset:
End of explanation
"""
from decimal import Decimal
bolsas = {0.00: 'b',0.05: 'r', 0.10: 'g', 0.15: 'm', 0.20: 'y', 0.25: 'k'}
df['cor'] = [bolsas[float(round(Decimal(codigo),2))] for codigo in df['bolsa']]
df.head()
"""
Explanation: We can create a color table, indexed by the percentage of scholarship:
End of explanation
"""
fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
ax = Axes3D(fig) # Para Matplotlib 0.99
ax.scatter(xs=df['periodo'],ys=df['repetiu'],zs=df['desempenho'], c=df['cor'],s=50)
ax.set_xlabel('periodo')
ax.set_ylabel('repetiu')
ax.set_zlabel('desempenho')
plt.show()
"""
Explanation: This "trick" deserves an explanation. I created a dictionary indexed by the value of the scholarship. So, we get the corresponding color code. I just need to include a column in the dataframe with that value, in order to use it in the chart. There is only one problem: The original dataset is "dirty" (something that happens frequently) and the percentage 0.15 is like 0.1500000002. I can remove this by converting the speaker from "float" to "Decimal", rounding and converting again to float.
When plotting, let's look for the color in the dictionary:
End of explanation
"""
fig, ax = plt.subplots()
ax.scatter(df['periodo'],df['repetiu'], c='r',s=df['desempenho']*30)
ax.set_xlabel('periodo')
ax.set_ylabel('repetiu')
plt.show()
"""
Explanation: Ok! There we have the color of the ball giving the fourth dimension: The percentage of scholarship
We see that there is already a concentration of students with a 25% scholarship (black) with few repetitions, but low performance, in all periods.
Just as we touch the color, we can touch the size, creating something like a "heat map". We will transform this vision in 2D, placing "performance" in a different size.
End of explanation
"""
df_dengue = pd.read_csv('./dengue2018.csv',decimal=',', sep=';')
df_dengue.head()
"""
Explanation: This shows us a curious fact. We have students with good performance (big balls) in all periods, without repeating any discipline, who left. What would have made them do this? Maybe it's financial conditions, or dissatisfaction with the course. A fact to be investigated, which was only revealed thanks to this visualization.
Georeferencing
We often have datasets with geographic information and we need to plot the data on a map. I will show you here how to do this with an example of the dataset of the 2018 Dengue cases in Rio de Janeiro. Source: Data Rio: http://www.data.rio/datasets/fb9ede8d588f45b48b985e62c817f062_0
I created a georeferenced dataset, which is in the folder for this demo. It is in CSV format, separated by semicolons, with a decimal separator in Portuguese (comma) and field separator as ";"
End of explanation
"""
fig, ax = plt.subplots()
ax.scatter(df_dengue['longitude'],df_dengue['latitude'], c='r',s=15)
plt.show()
"""
Explanation: The columns are: "bairro": neighborhood, "quantidade": quantity of cases. The rest you know.
A simple scatter plot gives a good idea of the problem.
End of explanation
"""
fig, ax = plt.subplots()
ax.scatter(df_dengue['longitude'],df_dengue['latitude'], c='r',s=5+df_dengue['quantidade'])
plt.show()
"""
Explanation: We can place the point size proportional to the number of cases, increasing the size of the information:
End of explanation
"""
def calcular_cor(valor):
cor = 'r'
if valor <= 10:
cor = '#ffff00'
elif valor <= 30:
cor = '#ffbf00'
elif valor <= 50:
cor = '#ff8000'
return cor
df_dengue['cor'] = [calcular_cor(codigo) for codigo in df_dengue['quantidade']]
df_dengue.head()
"""
Explanation: We can manipulate color and intensity to create a "heat map" of Dengue:
End of explanation
"""
dfs = df_dengue.sort_values(['quantidade'])
dfs.head()
fig, ax = plt.subplots()
ax.scatter(dfs['longitude'],dfs['latitude'], c=dfs['cor'],s=10+dfs['quantidade'])
plt.show()
"""
Explanation: And we will classify the largest quantities to be last:
End of explanation
"""
!pip install requests
"""
Explanation: Ok! A heat map of Dengue in 2018. But something is missing right? Where's the map of Rio de Janeiro?
A lot of people use geopandas and download map files. I prefer to use Google Maps. It has an API called Static Maps that allows you to download maps. First, I will install requests:
End of explanation
"""
import requests
latitude = -22.9137528
longitude = -43.526409
zoom = 10
size = 800
scale = 1
apikey = "**HERE TYPE YOUR API KEY**"
gmapas = "https://maps.googleapis.com/maps/api/staticmap?center=" + str(latitude) + "," + str(longitude) + \
"&zoom=" + str(zoom) + \
"&scale=" + str(scale) + \
"&size=" + str(size) + "x" + str(size) + "&key=" + apikey
with open('mapa.jpg', 'wb') as handle:
response = requests.get(gmapas, stream=True)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
"""
Explanation: Now, a bit more "smart" part. I have the coordinates of the center of Rio de Janeiro (geographical center, not the city center). I will create a request to the API Static Map to download a map. You see, you have to register an API Key to use this API. I purposely omitted mine. Here you have instructions for this: https://developers.google.com/maps/documentation/maps-static/get-api-key
End of explanation
"""
import math
_C = { 'x': 128, 'y': 128 };
_J = 256 / 360;
_L = 256 / (2 * math.pi);
def tb(a):
return 180 * a / math.pi
def sb(a):
return a * math.pi / 180
def bounds(a, b, c):
if b != None:
a = max(a,b)
if c != None:
a = min(a,c)
return a
def latlonToPt(ll):
a = bounds(math.sin(sb(ll[0])), -(1 - 1E-15), 1 - 1E-15);
return {'x': _C['x'] + ll[1] * _J,'y': _C['y'] + 0.5 * math.log((1 + a) / (1 - a)) * - _L}
def ptToLatlon(pt):
return [tb(2 * math.atan(math.exp((pt['y'] - _C['y']) / -_L)) - math.pi / 2),(pt['x'] - _C['x']) / _J]
def calculateBbox(ll, zoom, sizeX, sizeY, scale):
cp = latlonToPt(ll)
pixelSize = math.pow(2, -(zoom + 1));
pwX = sizeX*pixelSize;
pwY = sizeY*pixelSize;
return {'ne': ptToLatlon({'x': cp['x'] + pwX, 'y': cp['y'] - pwY}),'sw': ptToLatlon({'x': cp['x'] - pwX, 'y': cp['y'] + pwY})}
limites = calculateBbox([latitude,longitude],zoom, size, size, scale)
print(limites)
"""
Explanation: Well, the map was saved, now I need to know the coordinates of the limits. The Google API only allows you to enter the center (latitude and longitude) and the dimensions of the image in pixels. But, to adjust the map to the coordinates in latitudes and longitudes, you need to know the coordinates of the image rectangle. There are several examples of how to calculate this and I use a Javascript example that I converted to Python some time ago. This calculation is based on the script from: https://jsfiddle.net/1wy1mm7L/6/
End of explanation
"""
import matplotlib.image as mpimg
fig, ax = plt.subplots(figsize=(10, 10))
rio_mapa=mpimg.imread('./mapa.jpg')
plt.imshow(rio_mapa, extent=[limites['sw'][1],limites['ne'][1],limites['sw'][0],limites['ne'][0]], alpha=1.0)
ax.scatter(dfs['longitude'],dfs['latitude'], c=dfs['cor'],s=10+dfs['quantidade'])
plt.ylabel("Latitude", fontsize=14)
plt.xlabel("Longitude", fontsize=14)
plt.show()
"""
Explanation: The function "calculateBbox" returns a dictionary containing the Northeast and Southwest points, with the latitude and longitude of each one.
To use this in matplotlib, I need to use the imshow method, except that I need to inform the scale, that is, what is the range of latitudes (vertical) and longitudes (horizontal) that the map represents. Thus, the plotting of points will be correct.
I will use the mpimg library to read the image file I just downloaded.
But the function imshow uses the coordinates in the attribute extent in the order: LEFT, RIGHT, DOWN, TOP. We have to organize the passing of the parameters to it.
End of explanation
"""
|
jamesorr/mocsy | notebooks/mocsy_errors.ipynb | mit | %%bash
pwd
mkdir code
cd code
git clone https://github.com/jamesorr/mocsy.git
cd mocsy
make
pwd
"""
Explanation: Examples of propagating uncertainties in mocsy
<hr>
James Orr - 11 November 2018<br>
<img align="left" width="60%" src="http://www.lsce.ipsl.fr/Css/img/banniere_LSCE_75.png" ><br><br>
LSCE/IPSL, CEA-CNRS-UVSQ, Gif-sur-Yvette, France
<hr>
Table of contents:
Download, build, and import mocsy
Simple use of mocsy's vars routine
Simple use of mocsy's errors routine
1. Setup
1.1 Download and build mocsy (Do this only once)
Put code into ./code/mocsy subdirectory and build mocsy.so for import into python
The build process might take a couple of minutes, depending our your computer
End of explanation
"""
## Preliminaries
import numpy as np
import pandas as pd
import os, sys
"""
Explanation: 1.2 Import standard python libraries
End of explanation
"""
# Comment out 1st line below, and Uncomment 2nd line below (if you have run the cell above under section 1.1)
mocsy_dir = "/homel/orr/Software/fortran/mocsy"
#mocsy_dir = "./code/mocsy"
sys.path.append (mocsy_dir)
import mocsy
"""
Explanation: 1.3 Import mocsy (after specifying location of its shared object file)
The first uncommented line below is the subdirectory where the make command was executed (where mocsy.so is located)
End of explanation
"""
print mocsy.mvars.vars.__doc__
"""
Explanation: 2. Compute derived variables using mocsy's vars routine
For starters, begin with a simple example of how to call (in python) mocsy's most used routine, vars
Some basic documentation
End of explanation
"""
# Options:
optCON = 'mol/kg'
optT = 'Tinsitu'
optP = 'm'
#optB = 'l10'
optB = 'u74'
optK1K2 = 'l'
optKf = 'dg'
# Standard 6 input variables
temp = 18.0
sal = 35.0
alk = 2300.e-6
dic = 2000.e-6
#phos = 2.0e-6
#sil = 60.0e-6
phos = 0.0e-6
sil = 0.0e-6
# Other standard input (depth, atm pressure, latitude)
depth = 0.
Patm = 1.0
lat = 0.
"""
Explanation: Specify input variables and options
End of explanation
"""
ph, pco2, fco2, co2, hco3, co3, OmegaA, OmegaC, BetaD, rhoSW, p, tempis = mocsy.mvars.vars(
temp, sal, alk, dic, sil, phos, Patm, depth, lat,
optCON, optT, optP, optb=optB, optk1k2=optK1K2, optkf=optKf )
"""
Explanation: Call the vars routine
End of explanation
"""
print ph, pco2, fco2, co2, hco3, co3, OmegaA, OmegaC, BetaD, rhoSW, p, tempis
"""
Explanation: Print output
End of explanation
"""
print mocsy.merrors.__doc__
"""
Explanation: 3. Example use of errors routine
End of explanation
"""
#etemp, esal = 0.01, 0.01
etemp, esal = 0.0, 0.0
ealk, edic = 2e-6, 2e-6
esil = 5e-6
ephos = 0.1e-6
"""
Explanation: Define errors
End of explanation
"""
[eH, epCO2, efCO2, eCO2, eHCO3, eCO3, eOmegaA, eOmegaC] = \
mocsy.merrors(
temp, sal, alk, dic, sil, phos, Patm, depth, lat,
etemp, esal, ealk, edic, esil, ephos,
optCON, optT, optP, optb=optB, optk1k2=optK1K2, optkf=optKf, optgas='Pinsitu', ebt=0.01)
print eH, epCO2, efCO2, eCO2, eHCO3, eCO3, eOmegaA, eOmegaC
[eH, epCO2, efCO2, eCO2, eHCO3, eCO3, eOmegaA, eOmegaC] = \
mocsy.merrors(
temp, sal, alk, dic, sil, phos, Patm, depth, lat,
etemp, esal, ealk, edic, esil, ephos,
optCON, optT, optP, optb=optB, optk1k2=optK1K2, optkf=optKf, optgas='Pinsitu')
print eH, epCO2, efCO2, eCO2, eHCO3, eCO3, eOmegaA, eOmegaC
"""
Explanation: Basic call to errors routine
End of explanation
"""
[eH, epCO2, efCO2, eCO2, eHCO3, eCO3, eOmegaA, eOmegaC] = \
mocsy.merrors(
temp, sal, alk, dic, sil, phos, Patm, depth, lat,
etemp, esal, ealk, edic, esil, ephos,
optCON, optT, optP, optb=optB, optk1k2=optK1K2, optkf=optKf,
r=0.0, epk=np.array([0.002,0.0075,0.015,0.01,0.01,0.02,0.02]), ebt=0.02)
print eH, epCO2, efCO2, eCO2, eHCO3, eCO3, eOmegaA, eOmegaC
"""
Explanation: Call errors specifying most all of the arguments, including the optional ones for just the errors routine (r, epk, and ebt).
In the cell below, the results would be identical if those 3 options were not present, because the values listed are the defaults in the errors routine:
* r = 0.0 (no correlation between uncertainties in input pair (ealk and edic).
* epk = a 7-member vector for the uncertainties in the equil. constants (K0, K1, K2, Kb, Kw, Ka, Kc) in pK units
* ebt = the uncertainty in total boron, a relative fractional error [i.e., ebt = 0.02 is a 2% error, the default]
3.1 Errors, specifying standard r, epK, eBt
End of explanation
"""
[eH, epCO2, efCO2, eCO2, eHCO3, eCO3, eOmegaA, eOmegaC] = \
mocsy.merrors(
temp, sal, alk, dic, sil, phos, Patm, depth, lat,
etemp, esal, ealk, edic, esil, ephos,
optCON, optT, optP, optb=optB, optk1k2=optK1K2, optkf=optKf)
print eH, epCO2, efCO2, eCO2, eHCO3, eCO3, eOmegaA, eOmegaC
"""
Explanation: 3.2 Errors, assuming defaults for r, epK, eBt
End of explanation
"""
[eH, epCO2, efCO2, eCO2, eHCO3, eCO3, eOmegaA, eOmegaC] = \
mocsy.merrors(
temp, sal, alk, dic, sil, phos, Patm, depth, lat,
etemp, esal, ealk, edic, esil, ephos,
optCON, optT, optP, optb=optB, optk1k2=optK1K2, optkf=optKf,
epk=np.array([0.000,0.000,0.00,0.00,0.00,0.00,0.00]), ebt=0.00)
print eH, epCO2, efCO2, eCO2, eHCO3, eCO3, eOmegaA, eOmegaC
"""
Explanation: 3.3 Errors, specifying 0 for r, epK, eBt
End of explanation
"""
[eH, epCO2, efCO2, eCO2, eHCO3, eCO3, eOmegaA, eOmegaC] = \
mocsy.merrors(
temp, sal, alk, dic, sil, phos, Patm, depth, lat,
etemp, esal, ealk, edic, esil, ephos,
optCON, optT, optP, optb=optB, optk1k2=optK1K2, optkf=optKf, r=1.0)
print eH, epCO2, efCO2, eCO2, eHCO3, eCO3, eOmegaA, eOmegaC
"""
Explanation: 3.3 Errors, specifying r=1.0
End of explanation
"""
|
mclaughlin6464/pearce | notebooks/Compute Shape Noise.ipynb | mit | from matplotlib import pyplot as plt
%matplotlib inline
#import seaborn as sns
#sns.set()
import matplotlib.colors as colors
import numpy as np
#from nbodykit.source.catalog.halos import HaloCatalog
#from nbodykit.source.catalog.file import HDFCatalog
#from nbodykit.cosmology import Cosmology
#from nbodykit.algorithms import FFTPower
import h5py
import yaml
from scipy.optimize import minimize_scalar
from pearce.mocks.kittens import DarkSky
from pearce.mocks.customHODModels import *
def make_LHC(ordered_params, N, seed = None):
if seed is None:
seed = int(time())
np.random.seed(seed)
points = []
# by linspacing each parameter and shuffling, I ensure there is only one point in each row, in each dimension.
for plow, phigh in ordered_params.itervalues():
point = np.linspace(plow, phigh, num=N)
np.random.shuffle(point) # makes the cube random.
points.append(point)
return np.stack(points).T
def add_logMmin(hod_params, cat):
hod_params['logMmin'] = 13.0 #initial guess
#cat.populate(hod_params) #may be overkill, but will ensure params are written everywhere
def func(logMmin, hod_params):
hod_params.update({'logMmin':logMmin})
return (cat.calc_analytic_nd(hod_params, min_ptcl = min_ptcl) - nd)**2
res = minimize_scalar(func, bounds = logMmin_bounds, args = (hod_params,), options = {'maxiter':100}, method = 'Bounded')
# assuming this doens't fail
#print 'logMmin', res.x
hod_params['logMmin'] = res.x
config_fname = 'xi_cosmo_trainer.yaml'
with open(config_fname, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
n_g = float(cfg['HOD']['fixed_nd'] )
min_ptcl = int(cfg['HOD']['min_ptcl'])
kmax= 50
kmin= 0.5e-3
r = FFTPower(mesh, mode='1d', dk=0.005, kmin=kmin)
k = r.power['k']
p_g = r.power['power'].real
k.shape
plt.loglog(k, p_g)
np.save('./p_g.npy', np.c_[k, p_g])
"""
Explanation: I am trying to estimate a shape noise contribution for my delta sigma estimate. Sukhdeep gave me some code and some instructions for how to compute it, but I'm having a little difficulty translating them. I'm gonna do the tinkering here.
The first thing he said was that I didn't need his code to compute the Diagonal contribution:
If you only need to add shape noise to your existing covariance, you can simply use the formula, Shape Noise (variance)=sigma_e^2 / #of pairs in a bin. This term is diagonal in covariance and for log bins should scale as 1/rp.
of pairs in a bin ~ N_lensN_sourceArea_bin/Area_survey
End of explanation
"""
rp_bins = np.logspace(-1.0, 1.6, 19) # TODO h's?
rp_points = (rp_bins[1:]+rp_bins[:-1])/2.0
"""
Explanation: ?? cat.calc_sigma_crit_inv
End of explanation
"""
sigma_crit = 4.7e3 # TODO, need to assume a source distribution
sigma_e= 0.36# 0.36
sigma_gamma=sigma_e/1.7 # where does the 1.7 come from here?
n_s= 8 # TODO need to assume a source distribution
shape_noise=sigma_crit**2*sigma_gamma**2/n_s#*cosmo.H_z(z=0.27)/cosmo.c
g_shot_noise=1./n_g
g_shot_noise, shape_noise
# TODO update with sim volume + Pi length
area=10000
area_comoving=area*(np.pi/180)**2*cosmo.comoving_distance(z=.27)**2
L_W=500
vol=area_comoving*L_W
vol=vol.value
L_W = 500 # ? i don't know the meaning of this number
vol = ((cat.Lbox/cat.h)**2)*L_W
taper_kw=dict({'large_k_lower':10,'large_k_upper':kmax,'low_k_lower':kmin,'low_k_upper':kmin*1.2})
rmin=.05
rmax=100
from hankel_transform import hankel_transform
HT=hankel_transform(rmin=rmin,rmax=rmax,kmax=kmax,j_nu=[2],n_zeros=int(2e5),kmin=kmin)
r,cov_ggkk =HT.projected_covariance(k_pk = k,pk1= p_g+ g_shot_noise,pk2=np.zeros_like(p_g)+shape_noise,j_nu=2,taper=True,**taper_kw)
#plt.imshow(cov_ggkk)
rp_re,cov_ggkk_re=HT.bin_cov(r=r,cov=cov_ggkk,r_bins=rp_bins)
#plt.imshow(cov_ggkk_re)
#corr=HT.corr_matrix(cov=cov_ggkk_re)
#plt.imshow(corr)
print rp_re
np.save('shape_noise_covmat.npy', cov_ggkk_re)
"""
Explanation: Below numbers are the defaults, also what's in Ben Wibking's paper. They're probably ok so long as I found out where they originate i.e. what n_z they correspond to.
End of explanation
"""
|
pdh21/XID_plus | docs/notebooks/examples/XID+_example_pyvo_prior.ipynb | mit | fields = ['AKARI-NEP',
'AKARI-SEP',
'Bootes',
'CDFS-SWIRE',
'COSMOS',
'EGS',
'ELAIS-N1',
'ELAIS-N2',
'ELAIS-S1',
'GAMA-09',
'GAMA-12',
'GAMA-15',
'HDF-N',
'Herschel-Stripe-82',
'Lockman-SWIRE',
'NGP',
'SA13',
'SGP',
'SPIRE-NEP',
'SSDF',
'XMM-13hr',
'XMM-LSS',
'xFLS']
field_use = fields[6]
print(field_use)
"""
Explanation: First we select the field that the sources we are considering are in. If the sources span multiple fields that each field will need to be run individually as the FIR maps from seperate fields cannot be easily combined.
End of explanation
"""
ras = [242,243]#enter your ra here as a list of numpy array
decs = [55,55] #enter your dec here as a list or numpy array
object_coords = SkyCoord(ra=ras*u.degree,dec=decs*u.degree)
ids = [] #add your ids here as a list or numpy array
if len(ids)==0:
ids = np.arange(0,len(ras),1)
"""
Explanation: Here you provide the coordinate of the objects you are planning to run XID+ on and their ID's if any
If no ids are provided then they will be numbered 1-N)
End of explanation
"""
#setup a connection to the HELP VO server at Sussex
search_radius = 60/3600 #distance away from object that the VO query will look for galaxies in degrees
#for SPIRE AND PACS we recommend 60" and for MIPS we reccomend 30"
service = vo.dal.TAPService("https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap")
for n,coords in enumerate(object_coords):
ra = coords.ra
dec = coords.dec
query_spire_pacs = """
SELECT ra, dec, help_id, flag_optnir_det, f_mips_24
FROM herschelhelp.main
WHERE (
herschelhelp.main.field = '{}' AND
herschelhelp.main.falg_optnir_det>=5 AND
herschelhelp.main.f_mips_24>20
) AND
WHERE CONTAINS(POINT('ICRS',ra, dec), CIRCLE('ICRS',{},{},{}))=1
""".format(field_use,ra,dec,search_radius)
query_mips = """
SELECT ra, dec, help_id, flag_optnir_det, f_irac_i1, f_irac_i2, f_irac_i3, f_irac_i4
FROM herschelhelp.main
WHERE (
herschelhelp.main.field = '{}' AND
herschelhelp.main.falg_optnir_det>=5 AND
) AND
WHERE CONTAINS(POINT('ICRS',ra, dec), CIRCLE('ICRS',{},{},{}))=1
""".format(field_use,ra,dec,search_radius)
try:
job = service.submit_job(query)
job.run()
while job.phase == "EXECUTING":
print("Job running")
sleep(5)
print('Job finsihed')
if n==0:
prior_help = job.fetch_result().to_table()
print('table created with {} rows'.format(len(table)))
else:
result = job.fetch_result().to_table()
prior_help = astropy.table.vstack([result,table],join_type='outer')
print('table editied, added {} rows'.format(len(result)))
done_fields.append(field)
except:
print('VO call failed')
job.delete()
print(len(prior_help))
prior_help[:5]
"""
Explanation: Run the pyvo query to create a table of all help sources within the desired radius of your objects
End of explanation
"""
cra = Column(ras,name='ra')
cdec = Column(decs,name='dec')
cids = Column(ids,name='help_id')
cdet = Column(np.zeros(len(ras))-99,name='flag_optnir_det')
cmips = Column(np.zeros(len(ras))*np.nan,name='f_mips_24')
prior_new = Table()
prior_new.add_columns([cra,cdec,cids,cdet,cmips])
prior_cat = vstack([prior_help,prior_new])
len(prior_cat)
prior_cat[:5]
"""
Explanation: Run the below cell if you are running XID+ on SPIRE or PACS maps
End of explanation
"""
#provides limits on teh flat prior used in XID based on the galaxies IRAC fluxes
MIPS_lower=np.full(len(prior_help),0.0)
MIPS_upper=np.full(len(prior_help),1E5)
for i in range(len(prior_cat)):
if np.isnan(prior_cat['f_irac_i4'][i])==False:
MIPS_lower[i]=prior_cat['f_irac_i4'][i]/500.0
MIPS_upper[i]=prior_cat['f_irac_i4'][i]*500.0
elif np.isnan(prior_cat['f_irac_i3'][i])==False:
MIPS_lower[i]=prior_cat['f_irac_i3'][i]/500.0
MIPS_upper[i]=prior_cat['f_irac_i3'][i]*500.0
elif np.isnan(prior_cat['f_irac_i2'][i])==False:
MIPS_lower[i]=prior_cat['f_irac_i2'][i]/500.0
MIPS_upper[i]=prior_cat['f_irac_i2'][i]*500.0
elif np.isnan(prior_cat['f_irac_i1'][i])==False:
MIPS_lower[i]=prior_cat['f_irac_i1'][i]/500.0
MIPS_upper[i]=prior_cat['f_irac_i1'][i]*500.0
mips_lower_col = Column(MIPS_lower,name='MIPS_lower')
mips_upper_col = Column(MIPS_upper,name='MIPS_upper')
prior_help.add_columns([mips_lower_col,mips_upper_col])
#add your IRAC fluxes here, if your objects don't have IRAC fluxes then they will be set to nan
i1_f = np.zeros(len(ras))*np.nan
i2_f = np.zeros(len(ras))*np.nan
i3_f = np.zeros(len(ras))*np.nan
i4_f = np.zeros(len(ras))*np.nan
cra = Column(ras,name='ra')
cdec = Column(decs,name='dec')
cids = Column(ids,name='help_id')
cdet = Column(np.zeros(len(ras))-99,name='flag_optnir_det')
ci1 = Column(i1_f,name='f_irac_i1')
ci2 = Column(i2_f,name='f_irac_i2')
ci3 = Column(i3_f,name='f_irac_i3')
ci4 = Column(i4_f,name='f_irac_i4')
MIPS_lower=np.full(len(lofar_prior),0.0)
MIPS_upper=np.full(len(lofar_prior),1E5)
for i in range(len(lofar_prior)):
if np.isnan(lofar_prior['f_irac_i4'][i])==False:
MIPS_lower[i]=lofar_prior['f_irac_i4'][i]/500.0
MIPS_upper[i]=lofar_prior['f_irac_i4'][i]*500.0
elif np.isnan(lofar_prior['f_irac_i3'][i])==False:
MIPS_lower[i]=lofar_prior['f_irac_i3'][i]/500.0
MIPS_upper[i]=lofar_prior['f_irac_i3'][i]*500.0
elif np.isnan(lofar_prior['f_irac_i2'][i])==False:
MIPS_lower[i]=lofar_prior['f_irac_i2'][i]/500.0
MIPS_upper[i]=lofar_prior['f_irac_i2'][i]*500.0
elif np.isnan(lofar_prior['f_irac_i1'][i])==False:
MIPS_lower[i]=lofar_prior['f_irac_i1'][i]/500.0
MIPS_upper[i]=lofar_prior['f_irac_i1'][i]*500.0
mips_lower_col = Column(MIPS_lower,name='MIPS_lower')
mips_upper_col = Column(MIPS_upper,name='MIPS_upper')
prior_new = Table()
prior_new.add_columns([cra,cdec,cids,cdet,ci1,ci2,ci3,ci4,mips_lower_col,mips_upper_col])
prior_cat = vstack([prior_help,prior_new])
len(prior_cat)
"""
Explanation: Run the below cells if you are running XID+ on MIPS maps
End of explanation
"""
#Read in the herschel images
imfolder='../../../../../HELP/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'
pswfits=imfolder+'ELAIS-N1_SPIRE250_v1.0.fits'#SPIRE 250 map
pmwfits=imfolder+'ELAIS-N1_SPIRE350_v1.0.fits'#SPIRE 350 map
plwfits=imfolder+'ELAIS-N1_SPIRE500_v1.0.fits'#SPIRE 500 map
#-----250-------------
hdulist = fits.open(pswfits)
im250phdu=hdulist[0].header
im250hdu=hdulist['image'].header
im250=hdulist['image'].data*1.0E3 #convert to mJy
nim250=hdulist['error'].data*1.0E3 #convert to mJy
w_250 = wcs.WCS(hdulist['image'].header)
pixsize250=3600.0*w_250.wcs.cd[1,1] #pixel size (in arcseconds)
hdulist.close()
#-----350-------------
hdulist = fits.open(pmwfits)
im350phdu=hdulist[0].header
im350hdu=hdulist['image'].header
im350=hdulist['image'].data*1.0E3 #convert to mJy
nim350=hdulist['error'].data*1.0E3 #convert to mJy
w_350 = wcs.WCS(hdulist['image'].header)
pixsize350=3600.0*w_350.wcs.cd[1,1] #pixel size (in arcseconds)
hdulist.close()
#-----500-------------
hdulist = fits.open(plwfits)
im500phdu=hdulist[0].header
im500hdu=hdulist['image'].header
im500=hdulist['image'].data*1.0E3 #convert to mJy
nim500=hdulist['error'].data*1.0E3 #convert to mJy
w_500 = wcs.WCS(hdulist['image'].header)
pixsize500=3600.0*w_500.wcs.cd[1,1] #pixel size (in arcseconds)
hdulist.close()
"""
Explanation: Now that we have created the prior we can run XID+
Load in the FIR maps
here we load in the SPIRE maps but you can substitue this with PACS and MIPS yourself
End of explanation
"""
moc=pymoc.util.catalog.catalog_to_moc(object_coords,search_radius,15)
"""
Explanation: Create a moc around each of your objects that will be used to cut doen the SPIRE image
End of explanation
"""
#---prior250--------
prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty map, wcs info and primary header
prior250.prior_cat(prior_cat['ra'],prior_cat['dec'],'prior_cat',ID=prior_cat['help_id'])#Set input catalogue
prior250.prior_bkg(-5.0,5)#Set prior on background (assumes Gaussian pdf with mu and sigma)
#---prior350--------
prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc)
prior350.prior_cat(prior_cat['ra'],prior_cat['dec'],'prior_cat',ID=prior_cat['help_id'])
prior350.prior_bkg(-5.0,5)
#---prior500--------
prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc)
prior500.prior_cat(prior_cat['ra'],prior_cat['dec'],'prior_cat',ID=prior_cat['help_id'])
prior500.prior_bkg(-5.0,5)
#pixsize array (size of pixels in arcseconds)
pixsize=np.array([pixsize250,pixsize350,pixsize500])
#point response function for the three bands
prfsize=np.array([18.15,25.15,36.3])
#use Gaussian2DKernel to create prf (requires stddev rather than fwhm hence pfwhm/2.355)
from astropy.convolution import Gaussian2DKernel
##---------fit using Gaussian beam-----------------------
prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101)
prf250.normalize(mode='peak')
prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101)
prf350.normalize(mode='peak')
prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101)
prf500.normalize(mode='peak')
pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in terms of pixel scale of map
pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in terms of pixel scale of map
pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in terms of pixel scale of map
prior250.set_prf(prf250.array,pind250,pind250)#requires PRF as 2d grid, and x and y bins for grid (in pixel scale)
prior350.set_prf(prf350.array,pind350,pind350)
prior500.set_prf(prf500.array,pind500,pind500)
print('fitting '+ str(prior250.nsrc)+' sources \n')
print('using ' + str(prior250.snpix)+', '+ str(prior350.snpix)+' and '+ str(prior500.snpix)+' pixels')
prior250.get_pointing_matrix()
prior350.get_pointing_matrix()
prior500.get_pointing_matrix()
prior250.upper_lim_map()
prior350.upper_lim_map()
prior500.upper_lim_map()
"""
Explanation: finish initalising the prior
End of explanation
"""
from xidplus.stan_fit import SPIRE
fit=SPIRE.all_bands(prior250,prior350,prior500,iter=1000)
posterior=xidplus.posterior_stan(fit,[prior250,prior350,prior500])
xidplus.save([prior250,prior350,prior500],posterior,'YOUR_FILE_NAME_HERE')
"""
Explanation: run XID+ and save the output
End of explanation
"""
|
jseabold/statsmodels | examples/notebooks/theta-model.ipynb | bsd-3-clause | import numpy as np
import pandas as pd
import pandas_datareader as pdr
import matplotlib.pyplot as plt
import seaborn as sns
plt.rc("figure",figsize=(16,8))
plt.rc("font",size=15)
plt.rc("lines",linewidth=3)
sns.set_style("darkgrid")
"""
Explanation: The Theta Model
The Theta model of Assimakopoulos & Nikolopoulos (2000) is a simple method for forecasting the involves fitting two $\theta$-lines, forecasting the lines using a Simple Exponential Smoother, and then combining the forecasts from the two lines to produce the final forecast. The model is implemented in steps:
Test for seasonality
Deseasonalize if seasonality detected
Estimate $\alpha$ by fitting a SES model to the data and $b_0$ by OLS.
Forecast the series
Reseasonalize if the data was deseasonalized.
The seasonality test examines the ACF at the seasonal lag $m$. If this lag is significantly different from zero then the data is deseasonalize using statsmodels.tsa.seasonal_decompose use either a multiplicative method (default) or additive.
The parameters of the model are $b_0$ and $\alpha$ where $b_0$ is estimated from the OLS regression
$$
X_t = a_0 + b_0 (t-1) + \epsilon_t
$$
and $\alpha$ is the SES smoothing parameter in
$$
\tilde{X}t = (1-\alpha) X_t + \alpha \tilde{X}{t-1}
$$
The forecasts are then
$$
\hat{X}{T+h|T} = \frac{\theta-1}{\theta} \hat{b}_0
\left[h - 1 + \frac{1}{\hat{\alpha}}
- \frac{(1-\hat{\alpha})^T}{\hat{\alpha}} \right]
+ \tilde{X}{T+h|T}
$$
Ultimately $\theta$ only plays a role in determining how much the trend is damped. If $\theta$ is very large, then the forecast of the model is identical to that from an Integrated Moving Average with a drift,
$$
X_t = X_{t-1} + b_0 + (\alpha-1)\epsilon_{t-1} + \epsilon_t.
$$
Finally, the forecasts are reseasonalized if needed.
This module is based on:
Assimakopoulos, V., & Nikolopoulos, K. (2000). The theta model: a decomposition
approach to forecasting. International journal of forecasting, 16(4), 521-530.
Hyndman, R. J., & Billah, B. (2003). Unmasking the Theta method.
International Journal of Forecasting, 19(2), 287-290.
Fioruci, J. A., Pellegrini, T. R., Louzada, F., & Petropoulos, F.
(2015). The optimized theta method. arXiv preprint arXiv:1503.03529.
Imports
We start with the standard set of imports and some tweaks to the default matplotlib style.
End of explanation
"""
reader = pdr.fred.FredReader(["HOUST"], start="1980-01-01", end="2020-04-01")
data = reader.read()
housing = data.HOUST
housing.index.freq = housing.index.inferred_freq
ax = housing.plot()
"""
Explanation: Load some Data
We will first look at housing starts using US data. This series is clearly seasonal but does not have a clear trend during the same.
End of explanation
"""
from statsmodels.tsa.forecasting.theta import ThetaModel
tm = ThetaModel(housing)
res = tm.fit()
print(res.summary())
"""
Explanation: We fit specify the model without any options and fit it. The summary shows that the data was deseasonalized using the multiplicative method. The drift is modest and negative, and the smoothing parameter is fairly low.
End of explanation
"""
forecasts = {"housing": housing}
for year in range(1995, 2020, 2):
sub = housing[:str(year)]
res = ThetaModel(sub).fit()
fcast = res.forecast(24)
forecasts[str(year)] = fcast
forecasts = pd.DataFrame(forecasts)
ax = forecasts["1995":].plot(legend=False)
children = ax.get_children()
children[0].set_linewidth(4)
children[0].set_alpha(0.3)
children[0].set_color("#000000")
ax.set_title("Housing Starts")
plt.tight_layout(pad=1.0)
"""
Explanation: The model is first and foremost a forecasting method. Forecasts are produced using the forecast method from fitted model. Below we produce a hedgehog plot by forecasting 2-years ahead every 2 years.
Note: the default $\theta$ is 2.
End of explanation
"""
tm = ThetaModel(np.log(housing), method="additive")
res = tm.fit(use_mle=True)
print(res.summary())
"""
Explanation: We could alternatively fir the log of the data. Here it makes more sense to force the deseasonalizing to use the additive method, if needed. We also fit the model parameters using MLE. This method fits the IMA
$$ X_t = X_{t-1} + \gamma\epsilon_{t-1} + \epsilon_t $$
where $\hat{\alpha}$ = $\min(\hat{\gamma}+1, 0.9998)$ using statsmodels.tsa.SARIMAX. The parameters are similar although the drift is closer to zero.
End of explanation
"""
res.forecast_components(12)
"""
Explanation: The forecast only depends on the forecast trend component,
$$
\hat{b}_0
\left[h - 1 + \frac{1}{\hat{\alpha}}
- \frac{(1-\hat{\alpha})^T}{\hat{\alpha}} \right],
$$
the forecast from the SES (which does not change with the horizon), and the seasonal. These three components are available using the forecast_components. This allows forecasts to be constructed using multiple choices of $\theta$ using the weight expression above.
End of explanation
"""
reader = pdr.fred.FredReader(["NA000349Q"], start="1980-01-01", end="2020-04-01")
pce = reader.read()
pce.columns = ["PCE"]
_ = pce.plot()
"""
Explanation: Personal Consumption Expenditure
We next look at personal consumption expenditure. This series has a clear seasonal component and a drift.
End of explanation
"""
mod = ThetaModel(np.log(pce))
res = mod.fit()
print(res.summary())
"""
Explanation: Since this series is always positive, we model the $\ln$.
End of explanation
"""
forecasts = pd.DataFrame({"ln PCE":np.log(pce.PCE),
"theta=1.2": res.forecast(12, theta=1.2),
"theta=2": res.forecast(12),
"theta=3": res.forecast(12, theta=3),
"No damping": res.forecast(12, theta=np.inf)
})
_ = forecasts.tail(36).plot()
plt.title("Forecasts of ln PCE")
plt.tight_layout(pad=1.0)
"""
Explanation: Next we explore differenced in the forecast as $\theta$ changes. When $\theta$ is close to 1, the drift is nearly absent. As $\theta$ increases, the drift becomes more obvious.
End of explanation
"""
ax = res.plot_predict(24, theta=2)
"""
Explanation: Finally, plot_predict can be used to visualize the predictions and prediction intervals which are constructed assuming the IMA is true.
End of explanation
"""
ln_pce = np.log(pce.PCE)
forecasts = {"ln PCE": ln_pce}
for year in range(1995,2020,3):
sub = ln_pce[:str(year)]
res = ThetaModel(sub).fit()
fcast = res.forecast(12)
forecasts[str(year)] = fcast
forecasts = pd.DataFrame(forecasts)
ax = forecasts["1995":].plot(legend=False)
children = ax.get_children()
children[0].set_linewidth(4)
children[0].set_alpha(0.3)
children[0].set_color("#000000")
ax.set_title("ln PCE")
plt.tight_layout(pad=1.0)
"""
Explanation: We conclude be producing a hedgehog plot using 2-year non-overlapping samples.
End of explanation
"""
|
bwinkel/cygrid | notebooks/04_sightline_gridding.ipynb | gpl-3.0 | %load_ext autoreload
%autoreload 2
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
"""
Explanation: Sightline gridding
We demonstrate the gridding of selected sightlines with cygrid. This can be particularly useful if you have some high-resolution data such as QSO absorption spectra and want to get accurate foreground values from a dataset with lower angular resolution.
We start by adjusting the notebook settings.
End of explanation
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import healpy as hp
from astropy.io import fits
from astropy.utils.misc import NumpyRNGContext
import cygrid
"""
Explanation: We attempt to limit our dependencies as much as possible, but astropy and healpy needs to be available on your machine if you want to re-run the calculations. We can highly recommend anaconda as a scientific python platform.
End of explanation
"""
NSIDE = 128
NPIX = hp.nside2npix(NSIDE)
"""
Explanation: Create dummy data
The properties of the map are given by the ordering and the nside of the map. For more details, check the paper by Gorski et al. (2005).
End of explanation
"""
# data and weights
with NumpyRNGContext(0):
# make sure to have "predictable" random numbers
input_data = np.random.randn(NPIX)
# coordinates
theta, phi = hp.pix2ang(NSIDE, np.arange(NPIX))
lons = np.rad2deg(phi)
lats = 90. - np.rad2deg(theta)
"""
Explanation: The data are just random draws from the standard normal distribution. For the weights, we choose uniform weighting. The coordinates can be easily calculated with healpy.
End of explanation
"""
print('pixel size: {:.1f}"'.format(3600 * hp.nside2resol(NSIDE)))
"""
Explanation: The pixel size for this NPIX is:
End of explanation
"""
hp.mollview(input_data, xsize=300)
"""
Explanation: A quick look confirms that our data looks just as expected.
End of explanation
"""
with NumpyRNGContext(0):
target_hpx_indices = np.random.randint(0, NPIX, 5)
theta, phi = hp.pix2ang(NSIDE,target_hpx_indices)
target_lons = np.rad2deg(phi)
target_lats = 90. - np.rad2deg(theta)
print('{:>8s} {:>8s}'.format('glon', 'glat'))
for glon, glat in zip(target_lons, target_lats):
print('{:8.4f} {:8.4f}'.format(glon, glat))
"""
Explanation: Gridding
We are now interested in the values of this map at a couple of given positions. It wouldn't make sense to use cygrid at all, if we were just interested in the values of the map at the given positions. Even when the positions are not exactly aligned with the HEALPix pixel centers, employing some interpolation routine would do a good job.
But let's assume that we would want to compare the values with another data set, whose angular resolution is much worse. Then it is reasonable to down-sample (i.e., lower the angular resolution by smoothing with a Gaussian kernel) our HEALPix map before extracting the sight-line values. With cygrid's sight-line gridder, this is done only for the vicinity of the requested positions, which can save a lot of computing time (only for large NSIDE, because healpy's smoothing function is very fast for small and moderate NSIDE due to the use of FFTs). cygrid would be at true advantage for most other projections, though.
In order to compare the results with healpy's smoothing routine (see below), we will use HEALPix pixel center coordinates without loss of generality.
End of explanation
"""
gridder = cygrid.SlGrid(target_lons, target_lats)
"""
Explanation: We initiate the gridder by specifying the target sightlines.
End of explanation
"""
kernelsize_fwhm = 1. # 1 degree
# see https://en.wikipedia.org/wiki/Full_width_at_half_maximum
kernelsize_sigma = kernelsize_fwhm / np.sqrt(8 * np.log(2))
sphere_radius = 4. * kernelsize_sigma
gridder.set_kernel(
'gauss1d',
(kernelsize_sigma,),
sphere_radius,
kernelsize_sigma / 2.
)
"""
Explanation: The gridding kernel is of key importance for the entire gridding process. cygrid allows you to specify the shape of the kernel (e.g. elliptical Gaussian or tapered sinc) and its size.
In our example, we use a symmetrical Gaussian (i.e. the major and minor axis of the kernel are identical). In that case, we need to furthermore specify kernelsize_sigma, the sphere_radius up to which the kernel will be computed, and the maximum acceptable healpix resolution for which we recommend kernelsize_sigma/2.
We refer to section 3.5 of the paper ('a minimal example') for a short discussion of the kernel parameters.
End of explanation
"""
gridder.grid(lons, lats, input_data)
"""
Explanation: After the kernel has been set, we perform the actual gridding by calling grid() with the coordinates and the data.
End of explanation
"""
sightlines = gridder.get_datacube()
"""
Explanation: To get the gridded data, we simply call get_datacube().
End of explanation
"""
smoothed_map = hp.sphtfunc.smoothing(
input_data,
fwhm=np.radians(kernelsize_fwhm),
)
smoothed_data = smoothed_map[target_hpx_indices]
print('{:>8s} {:>8s} {:>10s} {:>10s}'.format(
'glon', 'glat', 'cygrid', 'healpy')
)
for t in zip(
target_lons, target_lats,
sightlines, smoothed_data,
):
print('{:8.4f} {:8.4f} {:10.6f} {:10.6f}'.format(*t))
"""
Explanation: Finally, we get a list of our gridded sightlines within the chosen aperture.
We can compare this with the healpy smoothing operation:
End of explanation
"""
|
geography-munich/sciprog | material/sub/jrjohansson/Lecture-7-Revision-Control-Software.ipynb | apache-2.0 | from IPython.display import Image
"""
Explanation: Revision control software
J.R. Johansson (jrjohansson at gmail.com)
The latest version of this IPython notebook lecture is available at http://github.com/jrjohansson/scientific-python-lectures.
The other notebooks in this lecture series are indexed at http://jrjohansson.github.io.
End of explanation
"""
# create a new git repository called gitdemo:
!git init gitdemo
"""
Explanation: In any software development, one of the most important tools are revision control software (RCS).
They are used in virtually all software development and in all environments, by everyone and everywhere (no kidding!)
RCS can used on almost any digital content, so it is not only restricted to software development, and is also very useful for manuscript files, figures, data and notebooks!
There are two main purposes of RCS systems:
Keep track of changes in the source code.
Allow reverting back to an older revision if something goes wrong.
Work on several "branches" of the software concurrently.
Tags revisions to keep track of which version of the software that was used for what (for example, "release-1.0", "paper-A-final", ...)
Make it possible for serveral people to collaboratively work on the same code base simultaneously.
Allow many authors to make changes to the code.
Clearly communicating and visualizing changes in the code base to everyone involved.
Basic principles and terminology for RCS systems
In an RCS, the source code or digital content is stored in a repository.
The repository does not only contain the latest version of all files, but the complete history of all changes to the files since they were added to the repository.
A user can checkout the repository, and obtain a local working copy of the files. All changes are made to the files in the local working directory, where files can be added, removed and updated.
When a task has been completed, the changes to the local files are commited (saved to the repository).
If someone else has been making changes to the same files, a conflict can occur. In many cases conflicts can be resolved automatically by the system, but in some cases we might manually have to merge different changes together.
It is often useful to create a new branch in a repository, or a fork or clone of an entire repository, when we doing larger experimental development. The main branch in a repository is called often master or trunk. When work on a branch or fork is completed, it can be merged in to the master branch/repository.
With distributed RCSs such as GIT or Mercurial, we can pull and push changesets between different repositories. For example, between a local copy of there repository to a central online reposistory (for example on a community repository host site like github.com).
Some good RCS software
GIT (git) : http://git-scm.com/
Mercurial (hg) : http://mercurial.selenic.com/
In the rest of this lecture we will look at git, although hg is just as good and work in almost exactly the same way.
Installing git
On Linux:
$ sudo apt-get install git
On Mac (with macports):
$ sudo port install git
The first time you start to use git, you'll need to configure your author information:
$ git config --global user.name 'Robert Johansson'
$ git config --global user.email robert@riken.jp
Creating and cloning a repository
To create a brand new empty repository, we can use the command git init repository-name:
End of explanation
"""
!git clone https://github.com/qutip/qutip
"""
Explanation: If we want to fork or clone an existing repository, we can use the command git clone repository:
End of explanation
"""
!git clone gitdemo gitdemo2
"""
Explanation: Git clone can take a URL to a public repository, like above, or a path to a local directory:
End of explanation
"""
!git status
"""
Explanation: We can also clone private repositories over secure protocols such as SSH:
$ git clone ssh://myserver.com/myrepository
Status
Using the command git status we get a summary of the current status of the working directory. It shows if we have modified, added or removed files.
End of explanation
"""
%%file README
A file with information about the gitdemo repository.
!git status
"""
Explanation: In this case, only the current ipython notebook has been added. It is listed as an untracked file, and is therefore not in the repository yet.
Adding files and committing changes
To add a new file to the repository, we first create the file and then use the git add filename command:
End of explanation
"""
!git add README
!git status
"""
Explanation: After having added the file README, the command git status list it as an untracked file.
End of explanation
"""
!git commit -m "Added a README file" README
!git add Lecture-7-Revision-Control-Software.ipynb
!git commit -m "added notebook file" Lecture-7-Revision-Control-Software.ipynb
!git status
"""
Explanation: Now that it has been added, it is listed as a new file that has not yet been commited to the repository.
End of explanation
"""
%%file README
A file with information about the gitdemo repository.
A new line.
!git status
"""
Explanation: After committing the change to the repository from the local working directory, git status again reports that working directory is clean.
Commiting changes
When files that is tracked by GIT are changed, they are listed as modified by git status:
End of explanation
"""
!git commit -m "added one more line in README" README
!git status
"""
Explanation: Again, we can commit such changes to the repository using the git commit -m "message" command.
End of explanation
"""
%%file tmpfile
A short-lived file.
"""
Explanation: Removing files
To remove file that has been added to the repository, use git rm filename, which works similar to git add filename:
End of explanation
"""
!git add tmpfile
!git commit -m "adding file tmpfile" tmpfile
"""
Explanation: Add it:
End of explanation
"""
!git rm tmpfile
!git commit -m "remove file tmpfile" tmpfile
"""
Explanation: Remove it again:
End of explanation
"""
!git log
"""
Explanation: Commit logs
The messages that are added to the commit command are supposed to give a short (often one-line) description of the changes/additions/deletions in the commit. If the -m "message" is omitted when invoking the git commit message an editor will be opened for you to type a commit message (for example useful when a longer commit message is requried).
We can look at the revision log by using the command git log:
End of explanation
"""
%%file README
A file with information about the gitdemo repository.
README files usually contains installation instructions, and information about how to get started using the software (for example).
!git diff README
"""
Explanation: In the commit log, each revision is shown with a timestampe, a unique has tag that, and author information and the commit message.
Diffs
All commits results in a changeset, which has a "diff" describing the changes to the file associated with it. We can use git diff so see what has changed in a file:
End of explanation
"""
Image(filename='images/github-diff.png')
"""
Explanation: That looks quite cryptic but is a standard form for describing changes in files. We can use other tools, like graphical user interfaces or web based systems to get a more easily understandable diff.
In github (a web-based GIT repository hosting service) it can look like this:
End of explanation
"""
!git checkout -- README
!git status
"""
Explanation: Discard changes in the working directory
To discard a change (revert to the latest version in the repository) we can use the checkout command like this:
End of explanation
"""
!git log
!git checkout 1f26ad648a791e266fbb951ef5c49b8d990e6461
"""
Explanation: Checking out old revisions
If we want to get the code for a specific revision, we can use "git checkout" and giving it the hash code for the revision we are interested as argument:
End of explanation
"""
!cat README
"""
Explanation: Now the content of all the files like in the revision with the hash code listed above (first revision)
End of explanation
"""
!git checkout master
!cat README
!git status
"""
Explanation: We can move back to "the latest" (master) with the command:
End of explanation
"""
!git log
!git tag -a demotag1 -m "Code used for this and that purpuse"
!git tag -l
!git show demotag1
"""
Explanation: Tagging and branching
Tags
Tags are named revisions. They are useful for marking particular revisions for later references. For example, we can tag our code with the tag "paper-1-final" when when simulations for "paper-1" are finished and the paper submitted. Then we can always retreive the exactly the code used for that paper even if we continue to work on and develop the code for future projects and papers.
End of explanation
"""
!git branch expr1
"""
Explanation: To retreive the code in the state corresponding to a particular tag, we can use the git checkout tagname command:
$ git checkout demotag1
Branches
With branches we can create diverging code bases in the same repository. They are for example useful for experimental development that requires a lot of code changes that could break the functionality in the master branch. Once the development of a branch has reached a stable state it can always be merged back into the trunk. Branching-development-merging is a good development strategy when serveral people are involved in working on the same code base. But even in single author repositories it can often be useful to always keep the master branch in a working state, and always branch/fork before implementing a new feature, and later merge it back into the main trunk.
In GIT, we can create a new branch like this:
End of explanation
"""
!git branch
"""
Explanation: We can list the existing branches like this:
End of explanation
"""
!git checkout expr1
"""
Explanation: And we can switch between branches using checkout:
End of explanation
"""
%%file README
A file with information about the gitdemo repository.
README files usually contains installation instructions, and information about how to get started using the software (for example).
Experimental addition.
!git commit -m "added a line in expr1 branch" README
!git branch
!git checkout master
!git branch
"""
Explanation: Make a change in the new branch.
End of explanation
"""
!git checkout master
!git merge expr1
!git branch
"""
Explanation: We can merge an existing branch and all its changesets into another branch (for example the master branch) like this:
First change to the target branch:
End of explanation
"""
!git branch -d expr1
!git branch
!cat README
"""
Explanation: We can delete the branch expr1 now that it has been merged into the master:
End of explanation
"""
!git remote
!git remote show origin
"""
Explanation: pulling and pushing changesets between repositories
If the respository has been cloned from another repository, for example on github.com, it automatically remembers the address of the parant repository (called origin):
End of explanation
"""
!git pull origin
"""
Explanation: pull
We can retrieve updates from the origin repository by "pulling" changesets from "origin" to our repository:
End of explanation
"""
!git status
!git add Lecture-7-Revision-Control-Software.ipynb
!git commit -m "added lecture notebook about RCS" Lecture-7-Revision-Control-Software.ipynb
!git push
"""
Explanation: We can register addresses to many different repositories, and pull in different changesets from different sources, but the default source is the origin from where the repository was first cloned (and the work origin could have been omitted from the line above).
push
After making changes to our local repository, we can push changes to a remote repository using git push. Again, the default target repository is origin, so we can do:
End of explanation
"""
Image(filename='images/github-project-page.png')
"""
Explanation: Hosted repositories
Github.com is a git repository hosting site that is very popular with both open source projects (for which it is free) and private repositories (for which a subscription might be needed).
With a hosted repository it easy to collaborate with colleagues on the same code base, and you get a graphical user interface where you can browse the code and look at commit logs, track issues etc.
Some good hosted repositories are
Github : http://www.github.com
Bitbucket: http://www.bitbucket.org
End of explanation
"""
Image(filename='images/gitk.png')
"""
Explanation: Graphical user interfaces
There are also a number of graphical users interfaces for GIT. The available options vary a little bit from platform to platform:
http://git-scm.com/downloads/guis
End of explanation
"""
|
geilerloui/deep-learning | autoencoder/Convolutional_Autoencoder.ipynb | mit | %matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
"""
Explanation: Convolutional Autoencoder
Sticking with the MNIST dataset, let's improve our autoencoder's performance using convolutional layers. Again, loading modules and the data.
End of explanation
"""
learning_rate = 0.001
# Input and target placeholders
inputs_ = tf.placeholder(tf.float32, shape=(None, 28, 28, 1), name="inputs")
targets_ = tf.placeholder(tf.float32, shape=(None, 28, 28, 1), name="targets")
### Encoder
conv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding='same', activation=tf.nn.relu)#parameters in parentheses: kernel size
# Now 28x28x16
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same') # in parentheses: kernel size & strides size, the two divide by 2
# Now 14x14x16
conv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x8
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')
# Now 7x7x8
conv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x8
encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')
# Now 4x4x8
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7))
# Now 7x7x8
conv4 = tf.layers.conv2d(upsample1, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x8
upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14))
# Now 14x14x8
conv5 = tf.layers.conv2d(upsample2, 8, (3, 3), padding='same', activation=tf.nn.relu)
# Now 14x14x8
upsample3 = tf.image.resize_nearest_neighbor(conv5, (28, 28))
# Now 28x28x8
conv6 = tf.layers.conv2d(upsample3, 16, (3, 3), padding='same', activation=tf.nn.relu)
# Now 28x28x16
logits = tf.layers.conv2d(conv6, 1, (3, 3), padding='same', activation=None) #we don't add the logits cause we'll pass it in the cross entropy
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits, name="decoded")
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(0.001).minimize(cost)
"""
Explanation: Network Architecture
The encoder part of the network will be a typical convolutional pyramid. Each convolutional layer will be followed by a max-pooling layer to reduce the dimensions of the layers. The decoder though might be something new to you. The decoder needs to convert from a narrow representation to a wide reconstructed image. For example, the representation could be a 4x4x8 max-pool layer. This is the output of the encoder, but also the input to the decoder. We want to get a 28x28x1 image out from the decoder so we need to work our way back up from the narrow decoder input layer. A schematic of the network is shown below.
<img src='assets/convolutional_autoencoder.png' width=500px>
Here our final encoder layer has size 4x4x8 = 128. The original images have size 28x28 = 784, so the encoded vector is roughly 16% the size of the original image. These are just suggested sizes for each of the layers. Feel free to change the depths and sizes, but remember our goal here is to find a small representation of the input data.
What's going on with the decoder
Okay, so the decoder has these "Upsample" layers that you might not have seen before. First off, I'll discuss a bit what these layers aren't. Usually, you'll see transposed convolution layers used to increase the width and height of the layers. They work almost exactly the same as convolutional layers, but in reverse. A stride in the input layer results in a larger stride in the transposed convolution layer. For example, if you have a 3x3 kernel, a 3x3 patch in the input layer will be reduced to one unit in a convolutional layer. Comparatively, one unit in the input layer will be expanded to a 3x3 path in a transposed convolution layer. The TensorFlow API provides us with an easy way to create the layers, tf.nn.conv2d_transpose.
However, transposed convolution layers can lead to artifacts in the final images, such as checkerboard patterns. This is due to overlap in the kernels which can be avoided by setting the stride and kernel size equal. In this Distill article from Augustus Odena, et al, the authors show that these checkerboard artifacts can be avoided by resizing the layers using nearest neighbor or bilinear interpolation (upsampling) followed by a convolutional layer. In TensorFlow, this is easily done with tf.image.resize_images, followed by a convolution. Be sure to read the Distill article to get a better understanding of deconvolutional layers and why we're using upsampling.
Exercise: Build the network shown above. Remember that a convolutional layer with strides of 1 and 'same' padding won't reduce the height and width. That is, if the input is 28x28 and the convolution layer has stride = 1 and 'same' padding, the convolutional layer will also be 28x28. The max-pool layers are used the reduce the width and height. A stride of 2 will reduce the size by 2. Odena et al claim that nearest neighbor interpolation works best for the upsampling, so make sure to include that as a parameter in tf.image.resize_images or use tf.image.resize_nearest_neighbor.
End of explanation
"""
sess = tf.Session()
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
imgs = batch[0].reshape((-1, 28, 28, 1))
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
"""
Explanation: Training
As before, here we'll train the network. Instead of flattening the images though, we can pass them in as 28x28x1 arrays.
End of explanation
"""
learning_rate = 0.001
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 =
# Now 28x28x32
maxpool1 =
# Now 14x14x32
conv2 =
# Now 14x14x32
maxpool2 =
# Now 7x7x32
conv3 =
# Now 7x7x16
encoded =
# Now 4x4x16
### Decoder
upsample1 =
# Now 7x7x16
conv4 =
# Now 7x7x16
upsample2 =
# Now 14x14x16
conv5 =
# Now 14x14x32
upsample3 =
# Now 28x28x32
conv6 =
# Now 28x28x32
logits =
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded =
# Pass logits through sigmoid and calculate the cross-entropy loss
loss =
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 100
batch_size = 200
# Set's how much noise we're adding to the MNIST images
noise_factor = 0.5
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images from the batch
imgs = batch[0].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Noisy images as inputs, original images as targets
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
"""
Explanation: Denoising
As I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1. We'll use noisy images as input and the original, clean images as targets. Here's an example of the noisy images I generated and the denoised images.
Since this is a harder problem for the network, we'll want to use deeper convolutional layers here, more feature maps. I suggest something like 32-32-16 for the depths of the convolutional layers in the encoder, and the same depths going backward through the decoder. Otherwise the architecture is the same as before.
Exercise: Build the network for the denoising autoencoder. It's the same as before, but with deeper layers. I suggest 32-32-16 for the depths, but you can play with these numbers, or add more layers.
End of explanation
"""
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([noisy_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
"""
Explanation: Checking out the performance
Here I'm adding noise to the test images and passing them through the autoencoder. It does a suprisingly great job of removing the noise, even though it's sometimes difficult to tell what the original number is.
End of explanation
"""
|
ibm-cds-labs/pixiedust | notebook/Intro to PixieDust.ipynb | apache-2.0 | #!pip install --user --upgrade pixiedust
"""
Explanation: Hello PixieDust!
This sample notebook provides you with an introduction to many features included in PixieDust. You can find more information about PixieDust at https://pixiedust.github.io/pixiedust/. To ensure you are running the latest version of PixieDust uncomment and run the following cell. Do not run this cell if you installed PixieDust locally from source and want to continue to run PixieDust from source.
End of explanation
"""
import pixiedust
"""
Explanation: Import PixieDust
Run the following cell to import the PixieDust library. You may need to restart your kernel after importing. Follow the instructions, if any, after running the cell. Note: You must import PixieDust every time you restart your kernel.
End of explanation
"""
pixiedust.enableJobMonitor();
"""
Explanation: Enable the Spark Progress Monitor
PixieDust includes a Spark Progress Monitor bar that lets you track the status of your Spark jobs. You can find more info at https://pixiedust.github.io/pixiedust/sparkmonitor.html. Run the following cell to enable the Spark Progress Monitor:
End of explanation
"""
pixiedust.installPackage("graphframes:graphframes:0.1.0-spark1.6")
print("done")
"""
Explanation: Example use of the PackageManager
You can use the PackageManager component of Pixiedust to install and uninstall maven packages into your notebook kernel without editing configuration files. This component is essential when you run notebooks from a hosted cloud environment and do not have access to the configuration files. You can find more info at https://pixiedust.github.io/pixiedust/packagemanager.html. Run the following cell to install the GraphFrame package. You may need to restart your kernel after installing new packages. Follow the instructions, if any, after running the cell.
End of explanation
"""
pixiedust.printAllPackages()
"""
Explanation: Run the following cell to print out all installed packages:
End of explanation
"""
sqlContext=SQLContext(sc)
d1 = sqlContext.createDataFrame(
[(2010, 'Camping Equipment', 3),
(2010, 'Golf Equipment', 1),
(2010, 'Mountaineering Equipment', 1),
(2010, 'Outdoor Protection', 2),
(2010, 'Personal Accessories', 2),
(2011, 'Camping Equipment', 4),
(2011, 'Golf Equipment', 5),
(2011, 'Mountaineering Equipment',2),
(2011, 'Outdoor Protection', 4),
(2011, 'Personal Accessories', 2),
(2012, 'Camping Equipment', 5),
(2012, 'Golf Equipment', 5),
(2012, 'Mountaineering Equipment', 3),
(2012, 'Outdoor Protection', 5),
(2012, 'Personal Accessories', 3),
(2013, 'Camping Equipment', 8),
(2013, 'Golf Equipment', 5),
(2013, 'Mountaineering Equipment', 3),
(2013, 'Outdoor Protection', 8),
(2013, 'Personal Accessories', 4)],
["year","zone","unique_customers"])
display(d1)
"""
Explanation: Example use of the display() API
PixieDust lets you visualize your data in just a few clicks using the display() API. You can find more info at https://pixiedust.github.io/pixiedust/displayapi.html. The following cell creates a DataFrame and uses the display() API to create a bar chart:
End of explanation
"""
python_var = "Hello From Python"
python_num = 10
"""
Explanation: Example use of the Scala bridge
Data scientists working with Spark may occasionaly need to call out to one of the hundreds of libraries available on spark-packages.org which are written in Scala or Java. PixieDust provides a solution to this problem by letting users directly write and run scala code in its own cell. It also lets variables be shared between Python and Scala and vice-versa. You can find more info at https://pixiedust.github.io/pixiedust/scalabridge.html.
Start by creating a python variable that we'll use in scala:
End of explanation
"""
%%scala
println(python_var)
println(python_num+10)
val __scala_var = "Hello From Scala"
"""
Explanation: Create scala code that use the python_var and create a new variable that we'll use in Python:
End of explanation
"""
print(__scala_var)
"""
Explanation: Use the __scala_var from python:
End of explanation
"""
pixiedust.sampleData()
"""
Explanation: Sample Data
PixieDust includes a number of sample data sets. You can use these sample data sets to start playing with the display() API and other PixieDust features. You can find more info at https://pixiedust.github.io/pixiedust/loaddata.html. Run the following cell to view the available data sets:
End of explanation
"""
pixiedust.installPackage("com.databricks:spark-csv_2.10:1.5.0")
pixiedust.installPackage("org.apache.commons:commons-csv:0")
"""
Explanation: Example use of sample data
To use sample data locally run the following cell to install required packages. You may need to restart your kernel after running this cell.
End of explanation
"""
d2 = pixiedust.sampleData(1)
"""
Explanation: Run the following cell to get the first data set from the list. This will return a DataFrame and assign it to the variable d2:
End of explanation
"""
display(d2)
"""
Explanation: Pass the sample data set (d2) into the display() API:
End of explanation
"""
d3 = pixiedust.sampleData("https://openobjectstore.mybluemix.net/misc/milliondollarhomes.csv")
"""
Explanation: You can also download data from a CSV file into a DataFrame which you can use with the display() API:
End of explanation
"""
% pixiedustLog -l debug
"""
Explanation: PixieDust Log
PixieDust comes complete with logging to help you troubleshoot issues. You can find more info at https://pixiedust.github.io/pixiedust/logging.html. To access the log run the following cell:
End of explanation
"""
%%scala
val __scala_version = util.Properties.versionNumberString
import platform
print('PYTHON VERSON = ' + platform.python_version())
print('SPARK VERSON = ' + sc.version)
print('SCALA VERSON = ' + __scala_version)
"""
Explanation: Environment Info.
The following cells will print out information related to your notebook environment.
End of explanation
"""
|
jldinh/multicell | examples/05 - Gierer-Meinhardt.ipynb | mit | %matplotlib notebook
"""
Explanation: In this example, we will use Multicell to simulate the self-organization of a geometrical Turing pattern (Turing 1952; Note about other proposals and ways to produce spatial patterns), based on equations developed by Gierer and Meinhardt (Gierer and Meinhardt 1972). These equations describe a simple molecular mechanism that involves two chemical species, an activator and a repressor. The activator activates itself, as well as the repressor. The repressor represses the activator. Both species are diffusible, the activator within a short-range, and the repressor within a longer range.
Despite its simplicity, this mechanism has been successfully used to explain the formation of many different molecular spatial patterns in tissue structures (Meinhardt and Gierer 1974; Meinhardt book 1982; Kondo 2010;). In this section, we will implement the Gierer-Meinhardt equations (Gierer and Meinhardt 1972).
Preparation
End of explanation
"""
import multicell
import numpy as np
"""
Explanation: Imports
End of explanation
"""
sim = multicell.simulation_builder.generate_cell_grid_sim(20, 20, 1, 1e-3)
"""
Explanation: Problem definition
Simulation and tissue structure
End of explanation
"""
sim.register_cell_variable("a")
sim.register_cell_variable("h")
"""
Explanation: Biological species
We register two species: an activator a and an inhibitor h.
End of explanation
"""
def c_a2(c_a, **kwargs):
return c_a**2
sim.register_computed_variable("c_a2", c_a2)
"""
Explanation: Computed variables
The concentrations of a will be computed automatically for all cells. However, we will be going to use their squares multiple times per time step. To avoid raising the vector c_a to the square multiple times, we define a computed variable c_a2 that will be computed once per time step. The equation of c_a2 is defined using a Python function, which is then registered using the method register_computed_variable of the Simulation object.
End of explanation
"""
sim.set_constants({"mu_a": 1e-1, "mu_h": 2e-1, "rho_a": 1., "rho_h": 1., "q": 1., "H": 0.35, "A": 0., "D_h": 5., "D_a": 0.025})
"""
Explanation: Constants
End of explanation
"""
def da_dt(simulation, a, c_a, c_a2, c_h, D_a, mu_a, rho_a, A, q, adjacency_matrix, **kwargs):
return simulation.diffusion(D_a, c_a, adjacency_matrix) + rho_a * c_a2 / c_h / (1 + q**2 * c_a2) - mu_a * a + A
"""
Explanation: Differential equations
The formula of $\dfrac{da}{dt}$ is comprised of 4 additive terms: a diffusion term, an a and h-dependent synthesis term, a degradation term and a basal synthesis term.
End of explanation
"""
def dh_dt(simulation, h, c_a2, c_h, D_h, mu_h, rho_h, H, adjacency_matrix, **kwargs):
return simulation.diffusion(D_h, c_h, adjacency_matrix) + rho_h * c_a2 - mu_h * h + H
sim.set_ODE("a", da_dt)
sim.set_ODE("h", dh_dt)
"""
Explanation: The formula of $\dfrac{dh}{dt}$ is similarly built, except for the fact that the variable synthesis term is only a-dependent.
End of explanation
"""
sim.initialize_cell_variables()
"""
Explanation: Initial conditions
End of explanation
"""
a0 = np.full(sim.n_cells, 0.789)
h0 = np.full(sim.n_cells, 4.863)
sim.set_cell_variable("a", a0)
sim.set_cell_variable("h", h0)
"""
Explanation: We initialize initial quantities of matter to values that would be close to their steady state if there was no diffusion (i.e. if cells were independent). As cell volumes are all slightly different (the grid is noisy), concentrations will also all be slightly different and there is no need to randomize the initial quantities of matter.
End of explanation
"""
sim.set_duration(3200)
sim.set_time_steps(10, "linear")
"""
Explanation: Duration
End of explanation
"""
sim.register_renderer(multicell.rendering.MatplotlibRenderer, "c_a", {"max_cmap": 1.3, "view": (90, -90), "axes": False})
"""
Explanation: Rendering
End of explanation
"""
sim.renderer.display("c_a")
"""
Explanation: Visualization of the initial state
End of explanation
"""
sim.simulate()
sim.set_duration(1e7)
sim.set_time_steps(1)
sim.simulate()
"""
Explanation: Simulation
End of explanation
"""
|
atlury/deep-opencl | DL0110EN/4.3.3mist1layerassignmnt.ipynb | lgpl-3.0 | import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
import torch.nn.functional as F
import matplotlib.pylab as plt
import numpy as np
"""
Explanation: <div class="alert alert-block alert-info" style="margin-top: 20px">
<a href="http://cocl.us/pytorch_link_top"><img src = "http://cocl.us/Pytorch_top" width = 950, align = "center"></a>
<img src = "https://ibm.box.com/shared/static/ugcqz6ohbvff804xp84y4kqnvvk3bq1g.png" width = 200, align = "center">
<h1 align=center><font size = 5>Practice: Use the Sequential Constructor to Test the Test Sigmoid, Tanh, and Relu Activations Functions on the MNIST Dataset</font></h1>
# Table of Contents
In this lab, you will test Sigmoid, Tanh, and Relu activations functions on the MNIST dataset.
<div class="alert alert-block alert-info" style="margin-top: 20px">
<li><a href="#ref1">Neural Network Module and Training Function</a></li>
<li><a href="#ref2">Prepare Data </a></li>
<li><a href="#ref3">Define Criterion function</a></li>
<li><a href="#ref4">Practice:Test Sigmoid, Tanh, and Relu</a></li>
<li><a href="#ref4">Analyze Results</a></li>
<br>
<p></p>
Estimated Time Needed: <strong>25 min</strong>
</div>
<hr>
Import the following libraries:
End of explanation
"""
def train(model,criterion, train_loader,validation_loader, optimizer, epochs=100):
i=0
useful_stuff={'training_loss':[],'validation_accuracy':[]}
#n_epochs
for epoch in range(epochs):
for i,(x, y) in enumerate(train_loader):
#clear gradient
optimizer.zero_grad()
#make a prediction logits
z=model(x.view(-1,28*28))
# calculate loss
loss=criterion(z,y)
# calculate gradients of parameters
loss.backward()
# update parameters
optimizer.step()
useful_stuff['training_loss'].append(loss.data.item())
correct=0
for x, y in validation_loader:
#perform a prediction on the validation data
yhat=model(x.view(-1,28*28))
_,lable=torch.max(yhat,1)
correct+=(lable==y).sum().item()
accuracy=100*(correct/len(validation_dataset))
useful_stuff['validation_accuracy'].append(accuracy)
return useful_stuff
"""
Explanation: <a id="ref1"></a>
<h2 align=center>Neural Network Module and Training Function </h2>
Define a function to train the model. In this case, the function returns a Python dictionary to store the training loss and accuracy on the validation data.
End of explanation
"""
train_dataset=dsets.MNIST(root='./data', train=True, download=True, transform=transforms.ToTensor())
"""
Explanation: <a id="ref2"></a>
<h2 align=center>Prepare Data </h2>
Load the training dataset by setting the parameter <code>train</code> to <code>True</code> and convert it to a tensor by placing a transform object in the argument <code>transform</code>:
End of explanation
"""
validation_dataset=dsets.MNIST(root='./data', train=False, download=True, transform=transforms.ToTensor())
"""
Explanation: Load the testing dataset by setting the parameter <code>train</code> to <code>False</code> and convert it to a tensor by placing a transform object in the argument <code>transform</code>:
End of explanation
"""
criterion=nn.CrossEntropyLoss()
"""
Explanation: Create the criterion function:
End of explanation
"""
train_loader=torch.utils.data.DataLoader(dataset=train_dataset,batch_size=2000,shuffle=True)
validation_loader=torch.utils.data.DataLoader(dataset=validation_dataset,batch_size=5000,shuffle=False)
"""
Explanation: Create the training-data loader and the validation-data loader objects:
End of explanation
"""
criterion=nn.CrossEntropyLoss()
"""
Explanation: <a id="ref3"></a>
<h2 align=center>Criterion Function</h2>
Create the criterion function:
End of explanation
"""
input_dim=28*28
hidden_dim=100
output_dim=10
"""
Explanation: <a id="ref4"></a>
<h2 align=center>Test Sigmoid, Tanh, and Relu and Train the Model</h2>
Use the following parameters to construct the model:
End of explanation
"""
learning_rate=0.01
optimizer=torch.optim.SGD(model.parameters(),lr=learning_rate)
training_results=train(model,criterion, train_loader,validation_loader, optimizer, epochs=30)
"""
Explanation: Use nn.Sequential to build a one hidden layer neural <code>model</code> network with a sigmoid activation to classify the 10 digits from the MNIST dataset.
End of explanation
"""
optimizer=torch.optim.SGD(model_Tanh.parameters(),lr=learning_rate)
training_results_tanch=train(model_Tanh,criterion, train_loader,validation_loader, optimizer, epochs=30)
"""
Explanation: Double-click here for the solution.
<!--
model=nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.Sigmoid(),
nn.Linear(hidden_dim, output_dim),
)
-->
Train the network by using the Tanh activations function:
Use nn.Sequential to build a one hidden layer neural <code>model_Tanh</code> network with a Tanh activation to classify the 10 digits from the MNIST dataset.
End of explanation
"""
optimizer=torch.optim.SGD(model_Tanh.parameters(),lr=learning_rate)
training_results_tanch=train(model_Tanh,criterion, train_loader,validation_loader, optimizer, epochs=30)
"""
Explanation: Double-click here for the solution.
<!--
model_Tanh=nn.Sequential(
torch.nn.Linear(input_dim, hidden_dim),
nn.Tanh(),nn.Linear(hidden_dim, output_dim),
)
-->
End of explanation
"""
optimizer=torch.optim.SGD(modelRelu.parameters(),lr=learning_rate)
training_results_tanch=train(modelRelu,criterion, train_loader,validation_loader, optimizer, epochs=30)
"""
Explanation: Use nn.Sequential to build a one hidden layer neural <code>modelRelu</code> network with a Rulu activation to classify the 10 digits from the MNIST dataset.
End of explanation
"""
plt.plot(training_results_tanch['training_loss'],label='tanh')
plt.plot(training_results['training_loss'],label='sim')
plt.plot(training_results_relu['training_loss'],label='relu')
plt.ylabel('loss')
plt.title('training loss iterations')
plt.legend()
"""
Explanation: <!--
modelRelu=nn.Sequential(
torch.nn.Linear(input_dim, hidden_dim),
nn.Tanh(),nn.Linear(hidden_dim, output_dim),
)
-->
Double-click here for the solution.
<!--
modelRelu=torch.nn.Sequential(
torch.nn.Linear(input_dim, hidden_dim),
nn.ReLU(),nn.Linear(hidden_dim, output_dim),)
-->
<a id="ref5"></a>
<h2 align=center>Analyze Results</h2>
Compare the training loss for each activation:
End of explanation
"""
plt.plot(training_results_tanch['validation_accuracy'],label='tanh')
plt.plot(training_results['validation_accuracy'],label='sigmoid')
plt.plot(training_results_relu['validation_accuracy'],label='relu')
plt.ylabel('validation accuracy')
plt.xlabel('epochs ')
plt.legend()
"""
Explanation: Compare the validation loss for each model:
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/launching_into_ml/solutions/improve_data_quality.ipynb | apache-2.0 | # Use the chown command to change the ownership of the repository to user
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
"""
Explanation: Improving Data Quality
Learning Objectives
Resolve missing values
Convert the Date feature column to a datetime format
Rename a feature column, remove a value from a feature column
Create one-hot encoding features
Understand temporal feature conversions
Introduction
Recall that machine learning models can only consume numeric data, and that numeric data should be "1"s or "0"s. Data is said to be "messy" or "untidy" if it is missing attribute values, contains noise or outliers, has duplicates, wrong data, upper/lower case column names, and is essentially not ready for ingestion by a machine learning algorithm.
This notebook presents and solves some of the most common issues of "untidy" data. Note that different problems will require different methods, and they are beyond the scope of this notebook.
Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook.
End of explanation
"""
# Importing necessary tensorflow library and printing the TF version.
import tensorflow as tf
print("TensorFlow version: ",tf.version.VERSION)
import os
# Here we'll import Pandas and Numpy data processing libraries
import pandas as pd
import numpy as np
from datetime import datetime
# Use matplotlib for visualizing the model
import matplotlib.pyplot as plt
# Use seaborn for data visualization
import seaborn as sns
%matplotlib inline
"""
Explanation: Import Libraries
End of explanation
"""
# Creating directory to store dataset
if not os.path.isdir("../data/transport"):
os.makedirs("../data/transport")
# Download the raw .csv data by copying the data from a cloud storage bucket.
!gsutil cp gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/untidy_vehicle_data_toy.csv ../data/transport
# ls shows the working directory's contents.
# Using the -l parameter will lists files with assigned permissions
!ls -l ../data/transport
"""
Explanation: Load the Dataset
The dataset is based on California's Vehicle Fuel Type Count by Zip Code report. The dataset has been modified to make the data "untidy" and is thus a synthetic representation that can be used for learning purposes.
End of explanation
"""
# Reading "untidy_vehicle_data_toy.csv" file using the read_csv() function included in the pandas library.
df_transport = pd.read_csv('../data/transport/untidy_vehicle_data_toy.csv')
# Output the first five rows.
df_transport.head()
"""
Explanation: Read Dataset into a Pandas DataFrame
Next, let's read in the dataset just copied from the cloud storage bucket and create a Pandas DataFrame. We also add a Pandas .head() function to show you the top 5 rows of data in the DataFrame. Head() and Tail() are "best-practice" functions used to investigate datasets.
End of explanation
"""
# The .info() function will display the concise summary of an dataframe.
df_transport.info()
"""
Explanation: DataFrame Column Data Types
DataFrames may have heterogenous or "mixed" data types, that is, some columns are numbers, some are strings, and some are dates etc. Because CSV files do not contain information on what data types are contained in each column, Pandas infers the data types when loading the data, e.g. if a column contains only numbers, Pandas will set that column’s data type to numeric: integer or float.
Run the next cell to see information on the DataFrame.
End of explanation
"""
# Let's print out the first and last five rows of each column.
print(df_transport,5)
"""
Explanation: From what the .info() function shows us, we have six string objects and one float object. We can definitely see more of the "string" object values now!
End of explanation
"""
# We can use .describe() to see some summary statistics for the numeric fields in our dataframe.
df_transport.describe()
"""
Explanation: Summary Statistics
At this point, we have only one column which contains a numerical value (e.g. Vehicles). For features which contain numerical values, we are often interested in various statistical measures relating to those values. Note, that because we only have one numeric feature, we see only one summary stastic - for now.
End of explanation
"""
# The .groupby() function is used for spliting the data into groups based on some criteria.
grouped_data = df_transport.groupby(['Zip Code','Model Year','Fuel','Make','Light_Duty','Vehicles'])
# Get the first entry for each month.
df_transport.groupby('Fuel').first()
"""
Explanation: Let's investigate a bit more of our data by using the .groupby() function.
End of explanation
"""
df_transport.isnull().sum()
"""
Explanation: Checking for Missing Values
Missing values adversely impact data quality, as they can lead the machine learning model to make inaccurate inferences about the data. Missing values can be the result of numerous factors, e.g. "bits" lost during streaming transmission, data entry, or perhaps a user forgot to fill in a field. Note that Pandas recognizes both empty cells and “NaN” types as missing values.
Let's show the null values for all features in the DataFrame.
End of explanation
"""
print (df_transport['Date'])
print (df_transport['Date'].isnull())
print (df_transport['Make'])
print (df_transport['Make'].isnull())
print (df_transport['Model Year'])
print (df_transport['Model Year'].isnull())
"""
Explanation: To see a sampling of which values are missing, enter the feature column name. You'll notice that "False" and "True" correpond to the presence or abscence of a value by index number.
End of explanation
"""
# In Python shape() is used in pandas to give the number of rows/columns.
# The number of rows is given by .shape[0]. The number of columns is given by .shape[1].
# Thus, shape() consists of an array having two arguments -- rows and columns
print ("Rows : " ,df_transport.shape[0])
print ("Columns : " ,df_transport.shape[1])
print ("\nFeatures : \n" ,df_transport.columns.tolist())
print ("\nUnique values : \n",df_transport.nunique())
print ("\nMissing values : ", df_transport.isnull().sum().values.sum())
"""
Explanation: What can we deduce about the data at this point?
Let's summarize our data by row, column, features, unique, and missing values.
End of explanation
"""
# Output the last five rows in the dataset.
df_transport.tail()
"""
Explanation: Let's see the data again -- this time the last five rows in the dataset.
End of explanation
"""
# The isnull() method is used to check and manage NULL values in a data frame.
# TODO 1a
df_transport.isnull().sum()
"""
Explanation: What Are Our Data Quality Issues?
Data Quality Issue #1:
Missing Values:
Each feature column has multiple missing values. In fact, we have a total of 18 missing values.
Data Quality Issue #2:
Date DataType: Date is shown as an "object" datatype and should be a datetime. In addition, Date is in one column. Our business requirement is to see the Date parsed out to year, month, and day.
Data Quality Issue #3:
Model Year: We are only interested in years greater than 2006, not "<2006".
Data Quality Issue #4:
Categorical Columns: The feature column "Light_Duty" is categorical and has a "Yes/No" choice. We cannot feed values like this into a machine learning model. In addition, we need to "one-hot encode the remaining "string"/"object" columns.
Data Quality Issue #5:
Temporal Features: How do we handle year, month, and day?
Data Quality Issue #1:
Resolving Missing Values
Most algorithms do not accept missing values. Yet, when we see missing values in our dataset, there is always a tendency to just "drop all the rows" with missing values. Although Pandas will fill in the blank space with “NaN", we should "handle" them in some way.
While all the methods to handle missing values is beyond the scope of this lab, there are a few methods you should consider. For numeric columns, use the "mean" values to fill in the missing numeric values. For categorical columns, use the "mode" (or most frequent values) to fill in missing categorical values.
In this lab, we use the .apply and Lambda functions to fill every column with its own most frequent value. You'll learn more about Lambda functions later in the lab.
Let's check again for missing values by showing how many rows contain NaN values for each feature column.
End of explanation
"""
# Here we are using the apply function with lambda.
# We can use the apply() function to apply the lambda function to both rows and columns of a dataframe.
# TODO 1b
df_transport = df_transport.apply(lambda x:x.fillna(x.value_counts().index[0]))
"""
Explanation: Run the cell to apply the lambda function.
End of explanation
"""
# The isnull() method is used to check and manage NULL values in a data frame.
# TODO 1c
df_transport.isnull().sum()
"""
Explanation: Let's check again for missing values.
End of explanation
"""
# The date column is indeed shown as a string object. We can convert it to the datetime datatype with the to_datetime() function in Pandas.
# TODO 2a
df_transport['Date'] = pd.to_datetime(df_transport['Date'],
format='%m/%d/%Y')
# Date is now converted and will display the concise summary of an dataframe.
# TODO 2b
df_transport.info()
# Now we will parse Date into three columns that is year, month, and day.
df_transport['year'] = df_transport['Date'].dt.year
df_transport['month'] = df_transport['Date'].dt.month
df_transport['day'] = df_transport['Date'].dt.day
#df['hour'] = df['date'].dt.hour - you could use this if your date format included hour.
#df['minute'] = df['date'].dt.minute - you could use this if your date format included minute.
# The .info() function will display the concise summary of an dataframe.
df_transport.info()
"""
Explanation: Data Quality Issue #2:
Convert the Date Feature Column to a Datetime Format
End of explanation
"""
# Here, we are creating a new dataframe called "grouped_data" and grouping by on the column "Make"
grouped_data = df_transport.groupby(['Make'])
# Get the first entry for each month.
df_transport.groupby('month').first()
"""
Explanation: Let's confirm the Date parsing. This will also give us a another visualization of the data.
End of explanation
"""
# Here we will visualize our data using the figure() function in the pyplot module of matplotlib's library -- which is used to create a new figure.
plt.figure(figsize=(10,6))
# Seaborn's .jointplot() displays a relationship between 2 variables (bivariate) as well as 1D profiles (univariate) in the margins. This plot is a convenience class that wraps JointGrid.
sns.jointplot(x='month',y='Vehicles',data=df_transport)
# The title() method in matplotlib module is used to specify title of the visualization depicted and displays the title using various attributes.
plt.title('Vehicles by Month')
"""
Explanation: Now that we have Dates as a integers, let's do some additional plotting.
End of explanation
"""
# Let's remove all the spaces for feature columns by renaming them.
# TODO 3a
df_transport.rename(columns = { 'Date': 'date', 'Zip Code':'zipcode', 'Model Year': 'modelyear', 'Fuel': 'fuel', 'Make': 'make', 'Light_Duty': 'lightduty', 'Vehicles': 'vehicles'}, inplace = True)
# Output the first two rows.
df_transport.head(2)
"""
Explanation: Data Quality Issue #3:
Rename a Feature Column and Remove a Value.
Our feature columns have different "capitalizations" in their names, e.g. both upper and lower "case". In addition, there are "spaces" in some of the column names. In addition, we are only interested in years greater than 2006, not "<2006".
We can also resolve the "case" problem too by making all the feature column names lower case.
End of explanation
"""
# Here, we create a copy of the dataframe to avoid copy warning issues.
# TODO 3b
df = df_transport.loc[df_transport.modelyear != '<2006'].copy()
# Here we will confirm that the modelyear value '<2006' has been removed by doing a value count.
df['modelyear'].value_counts(0)
"""
Explanation: Note: Next we create a copy of the dataframe to avoid the "SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame" warning. Run the cell to remove the value '<2006' from the modelyear feature column.
End of explanation
"""
# Lets count the number of "Yes" and"No's" in the 'lightduty' feature column.
df['lightduty'].value_counts(0)
# Let's convert the Yes to 1 and No to 0.
# The .apply takes a function and applies it to all values of a Pandas series (e.g. lightduty).
df.loc[:,'lightduty'] = df['lightduty'].apply(lambda x: 0 if x=='No' else 1)
df['lightduty'].value_counts(0)
# Confirm that "lightduty" has been converted.
df.head()
"""
Explanation: Data Quality Issue #4:
Handling Categorical Columns
The feature column "lightduty" is categorical and has a "Yes/No" choice. We cannot feed values like this into a machine learning model. We need to convert the binary answers from strings of yes/no to integers of 1/0. There are various methods to achieve this. We will use the "apply" method with a lambda expression. Pandas. apply() takes a function and applies it to all values of a Pandas series.
What is a Lambda Function?
Typically, Python requires that you define a function using the def keyword. However, lambda functions are anonymous -- which means there is no need to name them. The most common use case for lambda functions is in code that requires a simple one-line function (e.g. lambdas only have a single expression).
As you progress through the Course Specialization, you will see many examples where lambda functions are being used. Now is a good time to become familiar with them.
End of explanation
"""
# Making dummy variables for categorical data with more inputs.
data_dummy = pd.get_dummies(df[['zipcode','modelyear', 'fuel', 'make']], drop_first=True)
# Output the first five rows.
data_dummy.head()
# Merging (concatenate) original data frame with 'dummy' dataframe.
# TODO 4a
df = pd.concat([df,data_dummy], axis=1)
df.head()
# Dropping attributes for which we made dummy variables. Let's also drop the Date column.
# TODO 4b
df = df.drop(['date','zipcode','modelyear', 'fuel', 'make'], axis=1)
# Confirm that 'zipcode','modelyear', 'fuel', and 'make' have been dropped.
df.head()
"""
Explanation: One-Hot Encoding Categorical Feature Columns
Machine learning algorithms expect input vectors and not categorical features. Specifically, they cannot handle text or string values. Thus, it is often useful to transform categorical features into vectors.
One transformation method is to create dummy variables for our categorical features. Dummy variables are a set of binary (0 or 1) variables that each represent a single class from a categorical feature. We simply encode the categorical variable as a one-hot vector, i.e. a vector where only one element is non-zero, or hot. With one-hot encoding, a categorical feature becomes an array whose size is the number of possible choices for that feature.
Panda provides a function called "get_dummies" to convert a categorical variable into dummy/indicator variables.
End of explanation
"""
# Let's print the unique values for "month", "day" and "year" in our dataset.
print ('Unique values of month:',df.month.unique())
print ('Unique values of day:',df.day.unique())
print ('Unique values of year:',df.year.unique())
"""
Explanation: Data Quality Issue #5:
Temporal Feature Columns
Our dataset now contains year, month, and day feature columns. Let's convert the month and day feature columns to meaningful representations as a way to get us thinking about changing temporal features -- as they are sometimes overlooked.
Note that the Feature Engineering course in this Specialization will provide more depth on methods to handle year, month, day, and hour feature columns.
End of explanation
"""
# Here we map each temporal variable onto a circle such that the lowest value for that variable appears right next to the largest value. We compute the x- and y- component of that point using the sin and cos trigonometric functions.
df['day_sin'] = np.sin(df.day*(2.*np.pi/31))
df['day_cos'] = np.cos(df.day*(2.*np.pi/31))
df['month_sin'] = np.sin((df.month-1)*(2.*np.pi/12))
df['month_cos'] = np.cos((df.month-1)*(2.*np.pi/12))
# Let's drop month, and day
# TODO 5
df = df.drop(['month','day','year'], axis=1)
# scroll left to see the converted month and day coluumns.
df.tail(4)
"""
Explanation: Don't worry, this is the last time we will use this code, as you can develop an input pipeline to address these temporal feature columns in TensorFlow and Keras - and it is much easier! But, sometimes you need to appreciate what you're not going to encounter as you move through the course!
Run the cell to view the output.
End of explanation
"""
|
tensorflow/tensorflow | tensorflow/lite/g3doc/models/convert/metadata_writer_tutorial.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2021 The TensorFlow Authors.
End of explanation
"""
!pip install tflite-support-nightly
"""
Explanation: TensorFlow Lite Metadata Writer API
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/lite/models/convert/metadata_writer_tutorial"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/models/convert/metadata_writer_tutorial.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/models/convert/metadata_writer_tutorial.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/models/convert/metadata_writer_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
TensorFlow Lite Model Metadata is a standard model description format. It contains rich semantics for general model information, inputs/outputs, and associated files, which makes the model self-descriptive and exchangeable.
Model Metadata is currently used in the following two primary use cases:
1. Enable easy model inference using TensorFlow Lite Task Library and codegen tools. Model Metadata contains the mandatory information required during inference, such as label files in image classification, sampling rate of the audio input in audio classification, and tokenizer type to process input string in Natural Language models.
Enable model creators to include documentation, such as description of model inputs/outputs or how to use the model. Model users can view these documentation via visualization tools such as Netron.
TensorFlow Lite Metadata Writer API provides an easy-to-use API to create Model Metadata for popular ML tasks supported by the TFLite Task Library. This notebook shows examples on how the metadata should be populated for the following tasks below:
Image classifiers
Object detectors
Image segmenters
Natural language classifiers
Audio classifiers
Metadata writers for BERT natural language classifiers and BERT question answerers are coming soon.
If you want to add metadata for use cases that are not supported, please use the Flatbuffers Python API. See the tutorials here.
Prerequisites
Install the TensorFlow Lite Support Pypi package.
End of explanation
"""
from tflite_support.metadata_writers import image_classifier
from tflite_support.metadata_writers import writer_utils
"""
Explanation: Create Model Metadata for Task Library and Codegen
<a name=image_classifiers></a>
Image classifiers
See the image classifier model compatibility requirements for more details about the supported model format.
Step 1: Import the required packages.
End of explanation
"""
!curl -L https://github.com/tensorflow/tflite-support/raw/master/tensorflow_lite_support/metadata/python/tests/testdata/image_classifier/mobilenet_v2_1.0_224.tflite -o mobilenet_v2_1.0_224.tflite
!curl -L https://github.com/tensorflow/tflite-support/raw/master/tensorflow_lite_support/metadata/python/tests/testdata/image_classifier/labels.txt -o mobilenet_labels.txt
"""
Explanation: Step 2: Download the example image classifier, mobilenet_v2_1.0_224.tflite, and the label file.
End of explanation
"""
ImageClassifierWriter = image_classifier.MetadataWriter
_MODEL_PATH = "mobilenet_v2_1.0_224.tflite"
# Task Library expects label files that are in the same format as the one below.
_LABEL_FILE = "mobilenet_labels.txt"
_SAVE_TO_PATH = "mobilenet_v2_1.0_224_metadata.tflite"
# Normalization parameters is required when reprocessing the image. It is
# optional if the image pixel values are in range of [0, 255] and the input
# tensor is quantized to uint8. See the introduction for normalization and
# quantization parameters below for more details.
# https://www.tensorflow.org/lite/models/convert/metadata#normalization_and_quantization_parameters)
_INPUT_NORM_MEAN = 127.5
_INPUT_NORM_STD = 127.5
# Create the metadata writer.
writer = ImageClassifierWriter.create_for_inference(
writer_utils.load_file(_MODEL_PATH), [_INPUT_NORM_MEAN], [_INPUT_NORM_STD],
[_LABEL_FILE])
# Verify the metadata generated by metadata writer.
print(writer.get_metadata_json())
# Populate the metadata into the model.
writer_utils.save_file(writer.populate(), _SAVE_TO_PATH)
"""
Explanation: Step 3: Create metadata writer and populate.
End of explanation
"""
from tflite_support.metadata_writers import object_detector
from tflite_support.metadata_writers import writer_utils
"""
Explanation: <a name=object_detectors></a>
Object detectors
See the object detector model compatibility requirements for more details about the supported model format.
Step 1: Import the required packages.
End of explanation
"""
!curl -L https://github.com/tensorflow/tflite-support/raw/master/tensorflow_lite_support/metadata/python/tests/testdata/object_detector/ssd_mobilenet_v1.tflite -o ssd_mobilenet_v1.tflite
!curl -L https://github.com/tensorflow/tflite-support/raw/master/tensorflow_lite_support/metadata/python/tests/testdata/object_detector/labelmap.txt -o ssd_mobilenet_labels.txt
"""
Explanation: Step 2: Download the example object detector, ssd_mobilenet_v1.tflite, and the label file.
End of explanation
"""
ObjectDetectorWriter = object_detector.MetadataWriter
_MODEL_PATH = "ssd_mobilenet_v1.tflite"
# Task Library expects label files that are in the same format as the one below.
_LABEL_FILE = "ssd_mobilenet_labels.txt"
_SAVE_TO_PATH = "ssd_mobilenet_v1_metadata.tflite"
# Normalization parameters is required when reprocessing the image. It is
# optional if the image pixel values are in range of [0, 255] and the input
# tensor is quantized to uint8. See the introduction for normalization and
# quantization parameters below for more details.
# https://www.tensorflow.org/lite/models/convert/metadata#normalization_and_quantization_parameters)
_INPUT_NORM_MEAN = 127.5
_INPUT_NORM_STD = 127.5
# Create the metadata writer.
writer = ObjectDetectorWriter.create_for_inference(
writer_utils.load_file(_MODEL_PATH), [_INPUT_NORM_MEAN], [_INPUT_NORM_STD],
[_LABEL_FILE])
# Verify the metadata generated by metadata writer.
print(writer.get_metadata_json())
# Populate the metadata into the model.
writer_utils.save_file(writer.populate(), _SAVE_TO_PATH)
"""
Explanation: Step 3: Create metadata writer and populate.
End of explanation
"""
from tflite_support.metadata_writers import image_segmenter
from tflite_support.metadata_writers import writer_utils
"""
Explanation: <a name=image_segmenters></a>
Image segmenters
See the image segmenter model compatibility requirements for more details about the supported model format.
Step 1: Import the required packages.
End of explanation
"""
!curl -L https://github.com/tensorflow/tflite-support/raw/master/tensorflow_lite_support/metadata/python/tests/testdata/image_segmenter/deeplabv3.tflite -o deeplabv3.tflite
!curl -L https://github.com/tensorflow/tflite-support/raw/master/tensorflow_lite_support/metadata/python/tests/testdata/image_segmenter/labelmap.txt -o deeplabv3_labels.txt
"""
Explanation: Step 2: Download the example image segmenter, deeplabv3.tflite, and the label file.
End of explanation
"""
ImageSegmenterWriter = image_segmenter.MetadataWriter
_MODEL_PATH = "deeplabv3.tflite"
# Task Library expects label files that are in the same format as the one below.
_LABEL_FILE = "deeplabv3_labels.txt"
_SAVE_TO_PATH = "deeplabv3_metadata.tflite"
# Normalization parameters is required when reprocessing the image. It is
# optional if the image pixel values are in range of [0, 255] and the input
# tensor is quantized to uint8. See the introduction for normalization and
# quantization parameters below for more details.
# https://www.tensorflow.org/lite/models/convert/metadata#normalization_and_quantization_parameters)
_INPUT_NORM_MEAN = 127.5
_INPUT_NORM_STD = 127.5
# Create the metadata writer.
writer = ImageSegmenterWriter.create_for_inference(
writer_utils.load_file(_MODEL_PATH), [_INPUT_NORM_MEAN], [_INPUT_NORM_STD],
[_LABEL_FILE])
# Verify the metadata generated by metadata writer.
print(writer.get_metadata_json())
# Populate the metadata into the model.
writer_utils.save_file(writer.populate(), _SAVE_TO_PATH)
"""
Explanation: Step 3: Create metadata writer and populate.
End of explanation
"""
from tflite_support.metadata_writers import nl_classifier
from tflite_support.metadata_writers import metadata_info
from tflite_support.metadata_writers import writer_utils
"""
Explanation: <a name=nl_classifiers></a>
Natural language classifiers
See the natural language classifier model compatibility requirements for more details about the supported model format.
Step 1: Import the required packages.
End of explanation
"""
!curl -L https://github.com/tensorflow/tflite-support/raw/master/tensorflow_lite_support/metadata/python/tests/testdata/nl_classifier/movie_review.tflite -o movie_review.tflite
!curl -L https://github.com/tensorflow/tflite-support/raw/master/tensorflow_lite_support/metadata/python/tests/testdata/nl_classifier/labels.txt -o movie_review_labels.txt
!curl -L https://storage.googleapis.com/download.tensorflow.org/models/tflite_support/nl_classifier/vocab.txt -o movie_review_vocab.txt
"""
Explanation: Step 2: Download the example natural language classifier, movie_review.tflite, the label file, and the vocab file.
End of explanation
"""
NLClassifierWriter = nl_classifier.MetadataWriter
_MODEL_PATH = "movie_review.tflite"
# Task Library expects label files and vocab files that are in the same formats
# as the ones below.
_LABEL_FILE = "movie_review_labels.txt"
_VOCAB_FILE = "movie_review_vocab.txt"
# NLClassifier supports tokenize input string using the regex tokenizer. See
# more details about how to set up RegexTokenizer below:
# https://github.com/tensorflow/tflite-support/blob/master/tensorflow_lite_support/metadata/python/metadata_writers/metadata_info.py#L130
_DELIM_REGEX_PATTERN = r"[^\w\']+"
_SAVE_TO_PATH = "moview_review_metadata.tflite"
# Create the metadata writer.
writer = nl_classifier.MetadataWriter.create_for_inference(
writer_utils.load_file(_MODEL_PATH),
metadata_info.RegexTokenizerMd(_DELIM_REGEX_PATTERN, _VOCAB_FILE),
[_LABEL_FILE])
# Verify the metadata generated by metadata writer.
print(writer.get_metadata_json())
# Populate the metadata into the model.
writer_utils.save_file(writer.populate(), _SAVE_TO_PATH)
"""
Explanation: Step 3: Create metadata writer and populate.
End of explanation
"""
from tflite_support.metadata_writers import audio_classifier
from tflite_support.metadata_writers import metadata_info
from tflite_support.metadata_writers import writer_utils
"""
Explanation: <a name=audio_classifiers></a>
Audio classifiers
See the audio classifier model compatibility requirements for more details about the supported model format.
Step 1: Import the required packages.
End of explanation
"""
!curl -L https://github.com/tensorflow/tflite-support/raw/master/tensorflow_lite_support/metadata/python/tests/testdata/audio_classifier/yamnet_wavin_quantized_mel_relu6.tflite -o yamnet.tflite
!curl -L https://github.com/tensorflow/tflite-support/raw/master/tensorflow_lite_support/metadata/python/tests/testdata/audio_classifier/yamnet_521_labels.txt -o yamnet_labels.txt
"""
Explanation: Step 2: Download the example audio classifier, yamnet.tflite, and the label file.
End of explanation
"""
AudioClassifierWriter = audio_classifier.MetadataWriter
_MODEL_PATH = "yamnet.tflite"
# Task Library expects label files that are in the same format as the one below.
_LABEL_FILE = "yamnet_labels.txt"
# Expected sampling rate of the input audio buffer.
_SAMPLE_RATE = 16000
# Expected number of channels of the input audio buffer. Note, Task library only
# support single channel so far.
_CHANNELS = 1
_SAVE_TO_PATH = "yamnet_metadata.tflite"
# Create the metadata writer.
writer = AudioClassifierWriter.create_for_inference(
writer_utils.load_file(_MODEL_PATH), _SAMPLE_RATE, _CHANNELS, [_LABEL_FILE])
# Verify the metadata generated by metadata writer.
print(writer.get_metadata_json())
# Populate the metadata into the model.
writer_utils.save_file(writer.populate(), _SAVE_TO_PATH)
"""
Explanation: Step 3: Create metadata writer and populate.
End of explanation
"""
from tflite_support.metadata_writers import image_classifier
from tflite_support.metadata_writers import metadata_info
from tflite_support.metadata_writers import writer_utils
from tflite_support import metadata_schema_py_generated as _metadata_fb
"""
Explanation: Create Model Metadata with semantic information
You can fill in more descriptive information about the model and each tensor through the Metadata Writer API to help improve model understanding. It can be done through the 'create_from_metadata_info' method in each metadata writer. In general, you can fill in data through the parameters of 'create_from_metadata_info', i.e. general_md, input_md, and output_md. See the example below to create a rich Model Metadata for image classifers.
Step 1: Import the required packages.
End of explanation
"""
!curl -L https://github.com/tensorflow/tflite-support/raw/master/tensorflow_lite_support/metadata/python/tests/testdata/image_classifier/mobilenet_v2_1.0_224.tflite -o mobilenet_v2_1.0_224.tflite
!curl -L https://github.com/tensorflow/tflite-support/raw/master/tensorflow_lite_support/metadata/python/tests/testdata/image_classifier/labels.txt -o mobilenet_labels.txt
"""
Explanation: Step 2: Download the example image classifier, mobilenet_v2_1.0_224.tflite, and the label file.
End of explanation
"""
model_buffer = writer_utils.load_file("mobilenet_v2_1.0_224.tflite")
# Create general model information.
general_md = metadata_info.GeneralMd(
name="ImageClassifier",
version="v1",
description=("Identify the most prominent object in the image from a "
"known set of categories."),
author="TensorFlow Lite",
licenses="Apache License. Version 2.0")
# Create input tensor information.
input_md = metadata_info.InputImageTensorMd(
name="input image",
description=("Input image to be classified. The expected image is "
"128 x 128, with three channels (red, blue, and green) per "
"pixel. Each element in the tensor is a value between min and "
"max, where (per-channel) min is [0] and max is [255]."),
norm_mean=[127.5],
norm_std=[127.5],
color_space_type=_metadata_fb.ColorSpaceType.RGB,
tensor_type=writer_utils.get_input_tensor_types(model_buffer)[0])
# Create output tensor information.
output_md = metadata_info.ClassificationTensorMd(
name="probability",
description="Probabilities of the 1001 labels respectively.",
label_files=[
metadata_info.LabelFileMd(file_path="mobilenet_labels.txt",
locale="en")
],
tensor_type=writer_utils.get_output_tensor_types(model_buffer)[0])
"""
Explanation: Step 3: Create model and tensor information.
End of explanation
"""
ImageClassifierWriter = image_classifier.MetadataWriter
# Create the metadata writer.
writer = ImageClassifierWriter.create_from_metadata_info(
model_buffer, general_md, input_md, output_md)
# Verify the metadata generated by metadata writer.
print(writer.get_metadata_json())
# Populate the metadata into the model.
writer_utils.save_file(writer.populate(), _SAVE_TO_PATH)
"""
Explanation: Step 4: Create metadata writer and populate.
End of explanation
"""
from tflite_support import metadata
displayer = metadata.MetadataDisplayer.with_model_file("mobilenet_v2_1.0_224_metadata.tflite")
print("Metadata populated:")
print(displayer.get_metadata_json())
print("Associated file(s) populated:")
for file_name in displayer.get_packed_associated_file_list():
print("file name: ", file_name)
print("file content:")
print(displayer.get_associated_file_buffer(file_name))
"""
Explanation: Read the metadata populated to your model.
You can display the metadata and associated files in a TFLite model through the following code:
End of explanation
"""
|
vsporeddy/bigbang | examples/Plot Activity.ipynb | gpl-2.0 | %matplotlib inline
"""
Explanation: This notebook shows how BigBang can help you explore a mailing list archive.
First, use this IPython magic to tell the notebook to display matplotlib graphics inline. This is a nice way to display results.
End of explanation
"""
import bigbang.mailman as mailman
import bigbang.graph as graph
import bigbang.process as process
from bigbang.parse import get_date
#from bigbang.functions import *
from bigbang.archive import Archive
"""
Explanation: Import the BigBang modules as needed. These should be in your Python environment if you've installed BigBang correctly.
End of explanation
"""
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import numpy as np
import math
import pytz
import pickle
import os
pd.options.display.mpl_style = 'default' # pandas has a set of preferred graph formatting options
"""
Explanation: Also, let's import a number of other dependencies we'll use later.
End of explanation
"""
urls = ["ipython-dev",
"ipython-user"]
archives = [Archive(url,archive_dir="../archives",mbox=True) for url in urls]
activities = [arx.get_activity() for arx in archives]
archives[0].data
"""
Explanation: Now let's load the data for analysis.
End of explanation
"""
window = 100
"""
Explanation: This variable is for the range of days used in computing rolling averages.
End of explanation
"""
plt.figure(figsize=(12.5, 7.5))
for i, activity in enumerate(activities):
colors = 'rgbkm'
ta = activity.sum(1)
rmta = pd.rolling_mean(ta,window)
rmtadna = rmta.dropna()
plt.plot_date(rmtadna.index,
rmtadna.values,
colors[i],
label=mailman.get_list_name(urls[i]) + ' activity',xdate=True)
plt.legend()
plt.savefig("activites-marked.png")
plt.show()
arx.data
"""
Explanation: For each of the mailing lists we are looking at, plot the rolling average of number of emails sent per day.
End of explanation
"""
a = activities[0] # for the first mailing list
ta = a.sum(0) # sum along the first axis
ta.sort()
ta[-10:].plot(kind='barh')
"""
Explanation: Now, let's see: who are the authors of the most messages to one particular list?
End of explanation
"""
import Levenshtein
distancedf = process.matricize(a.columns[:100], lambda a,b: Levenshtein.distance(a,b)) # calculate the edit distance between the two From titles
df = distancedf.astype(int) # specify that the values in the matrix are integers
fig = plt.figure(figsize=(18, 18))
plt.pcolor(df)
#plt.yticks(np.arange(0.5, len(df.index), 1), df.index) # these lines would show labels, but that gets messy
#plt.xticks(np.arange(0.5, len(df.columns), 1), df.columns)
plt.show()
"""
Explanation: This might be useful for seeing the distribution (does the top message sender dominate?) or for identifying key participants to talk to.
Many mailing lists will have some duplicate senders: individuals who use multiple email addresses or are recorded as different senders when using the same email address. We want to identify those potential duplicates in order to get a more accurate representation of the distribution of senders.
To begin with, let's do a naive calculation of the similarity of the From strings, based on the Levenshtein distance.
This can take a long time for a large matrix, so we will truncate it for purposes of demonstration.
End of explanation
"""
levdf = process.sorted_lev(a) # creates a slightly more nuanced edit distance matrix
# and sorts by rows/columns that have the best candidates
levdf_corner = levdf.iloc[:25,:25] # just take the top 25
fig = plt.figure(figsize=(15, 12))
plt.pcolor(levdf_corner)
plt.yticks(np.arange(0.5, len(levdf_corner.index), 1), levdf_corner.index)
plt.xticks(np.arange(0.5, len(levdf_corner.columns), 1), levdf_corner.columns, rotation='vertical')
plt.colorbar()
plt.show()
"""
Explanation: The dark blue diagonal is comparing an entry to itself (we know the distance is zero in that case), but a few other dark blue patches suggest there are duplicates even using this most naive measure.
Below is a variant of the visualization for inspecting the particular apparent duplicates.
End of explanation
"""
consolidates = []
# gather pairs of names which have a distance of less than 10
for col in levdf.columns:
for index, value in levdf.loc[levdf[col] < 10, col].iteritems():
if index != col: # the name shouldn't be a pair for itself
consolidates.append((col, index))
print str(len(consolidates)) + ' candidates for consolidation.'
c = process.consolidate_senders_activity(a, consolidates)
print 'We removed: ' + str(len(a.columns) - len(c.columns)) + ' columns.'
"""
Explanation: For this still naive measure (edit distance on a normalized string), it appears that there are many duplicates in the <10 range, but that above that the edit distance of short email addresses at common domain names can take over.
End of explanation
"""
lev_c = process.sorted_lev(c)
levc_corner = lev_c.iloc[:25,:25]
fig = plt.figure(figsize=(15, 12))
plt.pcolor(levc_corner)
plt.yticks(np.arange(0.5, len(levc_corner.index), 1), levc_corner.index)
plt.xticks(np.arange(0.5, len(levc_corner.columns), 1), levc_corner.columns, rotation='vertical')
plt.colorbar()
plt.show()
"""
Explanation: We can create the same color plot with the consolidated dataframe to see how the distribution has changed.
End of explanation
"""
fig, axes = plt.subplots(nrows=2, figsize=(15, 12))
ta = a.sum(0) # sum along the first axis
ta.sort()
ta[-20:].plot(kind='barh',ax=axes[0], title='Before consolidation')
tc = c.sum(0)
tc.sort()
tc[-20:].plot(kind='barh',ax=axes[1], title='After consolidation')
plt.show()
"""
Explanation: Of course, there are still some duplicates, mostly people who are using the same name, but with a different email address at an unrelated domain name.
How does our consolidation affect the graph of distribution of senders?
End of explanation
"""
|
qutip/qutip-notebooks | examples/energy-levels.ipynb | lgpl-3.0 | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from numpy import pi
from qutip import *
"""
Explanation: QuTiP example: Energy-levels of a quantum systems as a function of a single parameter
J.R. Johansson and P.D. Nation
For more information about QuTiP see http://qutip.org
End of explanation
"""
def compute(w1list, w2, w3, g12, g13):
# Pre-compute operators for the hamiltonian
sz1 = tensor(sigmaz(), qeye(2), qeye(2))
sx1 = tensor(sigmax(), qeye(2), qeye(2))
sz2 = tensor(qeye(2), sigmaz(), qeye(2))
sx2 = tensor(qeye(2), sigmax(), qeye(2))
sz3 = tensor(qeye(2), qeye(2), sigmaz())
sx3 = tensor(qeye(2), qeye(2), sigmax())
idx = 0
evals_mat = np.zeros((len(w1list),2*2*2))
for w1 in w1list:
# evaluate the Hamiltonian
H = w1 * sz1 + w2 * sz2 + w3 * sz3 + g12 * sx1 * sx2 + g13 * sx1 * sx3
# find the energy eigenvalues of the composite system
evals, ekets = H.eigenstates()
evals_mat[idx,:] = np.real(evals)
idx += 1
return evals_mat
w1 = 1.0 * 2 * pi # atom 1 frequency: sweep this one
w2 = 0.9 * 2 * pi # atom 2 frequency
w3 = 1.1 * 2 * pi # atom 3 frequency
g12 = 0.05 * 2 * pi # atom1-atom2 coupling strength
g13 = 0.05 * 2 * pi # atom1-atom3 coupling strength
w1list = np.linspace(0.75, 1.25, 50) * 2 * pi # atom 1 frequency range
evals_mat = compute(w1list, w2, w3, g12, g13)
fig, ax = plt.subplots(figsize=(12,6))
for n in [1,2,3]:
ax.plot(w1list / (2*pi), (evals_mat[:,n]-evals_mat[:,0]) / (2*pi), 'b')
ax.set_xlabel('Energy splitting of atom 1')
ax.set_ylabel('Eigenenergies')
ax.set_title('Energy spectrum of three coupled qubits');
"""
Explanation: Energy spectrum of three coupled qubits
End of explanation
"""
from qutip.ipynbtools import version_table
version_table()
"""
Explanation: Versions
End of explanation
"""
|
fastai/fastai | dev_nbs/course/lesson3-planet.ipynb | apache-2.0 | %matplotlib inline
from fastai.vision.all import *
from nbdev.showdoc import *
"""
Explanation: Multi-label prediction with Planet Amazon dataset
End of explanation
"""
# ! {sys.executable} -m pip install kaggle --upgrade
"""
Explanation: Getting the data
The planet dataset isn't available on the fastai dataset page due to copyright restrictions. You can download it from Kaggle however. Let's see how to do this by using the Kaggle API as it's going to be pretty useful to you if you want to join a competition or use other Kaggle datasets later on.
First, install the Kaggle API by uncommenting the following line and executing it, or by executing it in your terminal (depending on your platform you may need to modify this slightly to either add source activate fastai or similar, or prefix pip with a path. Have a look at how conda install is called for your platform in the appropriate Returning to work section of https://course.fast.ai/. (Depending on your environment, you may also need to append "--user" to the command.)
End of explanation
"""
# ! mkdir -p ~/.kaggle/
# ! mv kaggle.json ~/.kaggle/
# For Windows, uncomment these two commands
# ! mkdir %userprofile%\.kaggle
# ! move kaggle.json %userprofile%\.kaggle
"""
Explanation: Then you need to upload your credentials from Kaggle on your instance. Login to kaggle and click on your profile picture on the top left corner, then 'My account'. Scroll down until you find a button named 'Create New API Token' and click on it. This will trigger the download of a file named 'kaggle.json'.
Upload this file to the directory this notebook is running in, by clicking "Upload" on your main Jupyter page, then uncomment and execute the next two commands (or run them in a terminal). For Windows, uncomment the last two commands.
End of explanation
"""
path = Config().data/'planet'
path.mkdir(parents=True, exist_ok=True)
path
#! kaggle competitions download -c planet-understanding-the-amazon-from-space -f train-jpg.tar.7z -p {path}
#! kaggle competitions download -c planet-understanding-the-amazon-from-space -f train_v2.csv -p {path}
#! unzip -q -n {path}/train_v2.csv.zip -d {path}
"""
Explanation: You're all set to download the data from planet competition. You first need to go to its main page and accept its rules, and run the two cells below (uncomment the shell commands to download and unzip the data). If you get a 403 forbidden error it means you haven't accepted the competition rules yet (you have to go to the competition page, click on Rules tab, and then scroll to the bottom to find the accept button).
End of explanation
"""
# ! conda install --yes --prefix {sys.prefix} -c haasad eidl7zip
"""
Explanation: To extract the content of this file, we'll need 7zip, so uncomment the following line if you need to install it (or run sudo apt install p7zip-full in your terminal).
End of explanation
"""
#! 7za -bd -y -so x {path}/train-jpg.tar.7z | tar xf - -C {path.as_posix()}
"""
Explanation: And now we can unpack the data (uncomment to run - this might take a few minutes to complete).
End of explanation
"""
df = pd.read_csv(path/'train_v2.csv')
df.head()
"""
Explanation: Multiclassification
Contrary to the pets dataset studied in last lesson, here each picture can have multiple labels. If we take a look at the csv file containing the labels (in 'train_v2.csv' here) we see that each 'image_name' is associated to several tags separated by spaces.
End of explanation
"""
tfms = aug_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0., size=128)
planet = DataBlock(blocks=(ImageBlock, MultiCategoryBlock),
get_x=ColReader(0, pref=str(path/"train-jpg")+"/", suff='.jpg'),
get_y=ColReader(1, label_delim=' '),
splitter=RandomSplitter(seed=42),
batch_tfms=tfms+[Normalize.from_stats(*imagenet_stats)])
"""
Explanation: To put this in a DataLoaders while using the data block API, to do this we need to indicate:
- the types of our inputs/targets (here image and multi-label categories) through a thing called blocks
- how to get our xs and ys from the dataframe through a ColReader
- how to split out data between training and validation
Since we have satellite images, it makes sense to use all kinds of flip, we limit the amount of lighting/zoom and remove the warping.
End of explanation
"""
dls = planet.dataloaders(df, bs=64, path=path)
"""
Explanation: Since we have satellite images, it makes sense to use all kinds of flip, we limit the amount of lighting/zoom and remove the warping.
End of explanation
"""
dls.show_batch(max_n=9, figsize=(12,9))
"""
Explanation: show_batch still works, and show us the different labels separated by ;.
End of explanation
"""
arch = resnet50
acc_02 = partial(accuracy_multi, thresh=0.2)
f_score = FBetaMulti(2, thresh=0.2, average='samples')
learn = vision_learner(dls, arch, metrics=[acc_02, f_score])
"""
Explanation: To create a Learner we use the same function as in lesson 1. Our base architecture is resnet50 again, but the metrics are a little bit differeent: we use accuracy_thresh instead of accuracy. In lesson 1, we determined the predicition for a given class by picking the final activation that was the biggest, but here, each activation can be 0. or 1. accuracy_thresh selects the ones that are above a certain threshold (0.5 by default) and compares them to the ground truth.
As for Fbeta, it's the metric that was used by Kaggle on this competition. See here for more details.
End of explanation
"""
learn.lr_find()
"""
Explanation: We use the LR Finder to pick a good learning rate.
End of explanation
"""
lr = 0.01
learn.fit_one_cycle(5, slice(lr))
learn.save('stage-1-rn50')
"""
Explanation: Then we can fit the head of our network.
End of explanation
"""
learn.unfreeze()
learn.lr_find()
learn.fit_one_cycle(5, slice(1e-5, lr/5))
learn.save('stage-2-rn50')
tfms = aug_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0., size=256)
np.random.seed(42)
dls = planet.dataloaders(df, bs=64, path=path, batch_tfms=tfms+[Normalize.from_stats(*imagenet_stats)])
learn.dls = dls
learn.freeze()
learn.lr_find()
lr=1e-2/2
learn.fit_one_cycle(5, slice(lr))
learn.save('stage-1-256-rn50')
learn.unfreeze()
learn.fit_one_cycle(5, slice(1e-5, lr/5))
learn.recorder.plot_loss()
learn.save('stage-2-256-rn50')
"""
Explanation: ...And fine-tune the whole model:
End of explanation
"""
#learn.export()
"""
Explanation: You won't really know how you're going until you submit to Kaggle, since the leaderboard isn't using the same subset as we have for training. But as a guide, 50th place (out of 938 teams) on the private leaderboard was a score of 0.930.
End of explanation
"""
#! kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg.tar.7z -p {path}
#! 7za -bd -y -so x {path}/test-jpg.tar.7z | tar xf - -C {path}
#! kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg-additional.tar.7z -p {path}
#! 7za -bd -y -so x {path}/test-jpg-additional.tar.7z | tar xf - -C {path}
test_items = get_image_files(path/'test-jpg') + get_image_files(path/'test-jpg-additional')
len(test_items)
dl = learn.dls.test_dl(test_items, rm_type_tfms=1, bs=64)
preds, _ = learn.get_preds(dl=dl)
preds.shape
thresh = 0.2
labelled_preds = [' '.join([learn.dls.vocab[i] for i,p in enumerate(pred) if p > thresh]) for pred in preds.numpy()]
labelled_preds[:5]
fnames = [f.name[:-4] for f in test_items]
df = pd.DataFrame({'image_name':fnames, 'tags':labelled_preds}, columns=['image_name', 'tags'])
df.to_csv(path/'submission.csv', index=False)
! kaggle competitions submit planet-understanding-the-amazon-from-space -f {path/'submission.csv'} -m "My submission"
"""
Explanation: Submitting to Kaggle
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.