repo_name stringlengths 6 77 | path stringlengths 8 215 | license stringclasses 15
values | content stringlengths 335 154k |
|---|---|---|---|
Rotvig/cs231n | Project/DCGAN.ipynb | mit | #Import the libraries we will need.
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
import tensorflow.contrib.slim as slim
import os
import scipy.misc
import scipy
"""
Explanation: Deep Convolutional Generative Adversarial Network (DCGAN) Tutorial
This tutorials walks through an implementation of DCGAN as described in Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks.
To learn more about generative adversarial networks, see my Medium post on them.
End of explanation
"""
mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)
"""
Explanation: We will be using the MNIST dataset. input_data is a library that downloads the dataset and uzips it automatically.
End of explanation
"""
#This function performns a leaky relu activation, which is needed for the discriminator network.
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
#The below functions are taken from carpdem20's implementation https://github.com/carpedm20/DCGAN-tensorflow
#They allow for saving sample images from the generator to follow progress
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imsave(images, size, path):
return scipy.misc.imsave(path, merge(images, size))
def inverse_transform(images):
return (images+1.)/2.
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w] = image
return img
"""
Explanation: Helper Functions
End of explanation
"""
def generator(z):
zP = slim.fully_connected(z,4*4*256,normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_project',weights_initializer=initializer)
zCon = tf.reshape(zP,[-1,4,4,256])
gen1 = slim.convolution2d_transpose(\
zCon,num_outputs=64,kernel_size=[5,5],stride=[2,2],\
padding="SAME",normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_conv1', weights_initializer=initializer)
gen2 = slim.convolution2d_transpose(\
gen1,num_outputs=32,kernel_size=[5,5],stride=[2,2],\
padding="SAME",normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_conv2', weights_initializer=initializer)
gen3 = slim.convolution2d_transpose(\
gen2,num_outputs=16,kernel_size=[5,5],stride=[2,2],\
padding="SAME",normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_conv3', weights_initializer=initializer)
g_out = slim.convolution2d_transpose(\
gen3,num_outputs=1,kernel_size=[32,32],padding="SAME",\
biases_initializer=None,activation_fn=tf.nn.tanh,\
scope='g_out', weights_initializer=initializer)
return g_out
"""
Explanation: Defining the Adversarial Networks
Generator Network
The generator takes a vector of random numbers and transforms it into a 32x32 image. Each layer in the network involves a strided transpose convolution, batch normalization, and rectified nonlinearity. Tensorflow's slim library allows us to easily define each of these layers.
End of explanation
"""
def discriminator(bottom, reuse=False):
dis1 = slim.convolution2d(bottom,16,[4,4],stride=[2,2],padding="SAME",\
biases_initializer=None,activation_fn=lrelu,\
reuse=reuse,scope='d_conv1',weights_initializer=initializer)
dis2 = slim.convolution2d(dis1,32,[4,4],stride=[2,2],padding="SAME",\
normalizer_fn=slim.batch_norm,activation_fn=lrelu,\
reuse=reuse,scope='d_conv2', weights_initializer=initializer)
dis3 = slim.convolution2d(dis2,64,[4,4],stride=[2,2],padding="SAME",\
normalizer_fn=slim.batch_norm,activation_fn=lrelu,\
reuse=reuse,scope='d_conv3',weights_initializer=initializer)
d_out = slim.fully_connected(slim.flatten(dis3),1,activation_fn=tf.nn.sigmoid,\
reuse=reuse,scope='d_out', weights_initializer=initializer)
return d_out
"""
Explanation: Discriminator Network
The discriminator network takes as input a 32x32 image and transforms it into a single valued probability of being generated from real-world data. Again we use tf.slim to define the convolutional layers, batch normalization, and weight initialization.
End of explanation
"""
tf.reset_default_graph()
z_size = 100 #Size of z vector used for generator.
#This initializaer is used to initialize all the weights of the network.
initializer = tf.truncated_normal_initializer(stddev=0.02)
#These two placeholders are used for input into the generator and discriminator, respectively.
z_in = tf.placeholder(shape=[None,z_size],dtype=tf.float32) #Random vector
real_in = tf.placeholder(shape=[None,32,32,1],dtype=tf.float32) #Real images
Gz = generator(z_in) #Generates images from random z vectors
Dx = discriminator(real_in) #Produces probabilities for real images
Dg = discriminator(Gz,reuse=True) #Produces probabilities for generator images
#These functions together define the optimization objective of the GAN.
d_loss = -tf.reduce_mean(tf.log(Dx) + tf.log(1.-Dg)) #This optimizes the discriminator.
g_loss = -tf.reduce_mean(tf.log(Dg)) #This optimizes the generator.
tvars = tf.trainable_variables()
#The below code is responsible for applying gradient descent to update the GAN.
trainerD = tf.train.AdamOptimizer(learning_rate=0.0002,beta1=0.5)
trainerG = tf.train.AdamOptimizer(learning_rate=0.0002,beta1=0.5)
d_grads = trainerD.compute_gradients(d_loss,tvars[9:]) #Only update the weights for the discriminator network.
g_grads = trainerG.compute_gradients(g_loss,tvars[0:9]) #Only update the weights for the generator network.
update_D = trainerD.apply_gradients(d_grads)
update_G = trainerG.apply_gradients(g_grads)
"""
Explanation: Connecting them together
End of explanation
"""
batch_size = 128 #Size of image batch to apply at each iteration.
iterations = 500000 #Total number of iterations to use.
sample_directory = './figs' #Directory to save sample images from generator in.
model_directory = './models' #Directory to save trained model to.
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
for i in range(iterations):
zs = np.random.uniform(-1.0,1.0,size=[batch_size,z_size]).astype(np.float32) #Generate a random z batch
xs,_ = mnist.train.next_batch(batch_size) #Draw a sample batch from MNIST dataset.
xs = (np.reshape(xs,[batch_size,28,28,1]) - 0.5) * 2.0 #Transform it to be between -1 and 1
xs = np.lib.pad(xs, ((0,0),(2,2),(2,2),(0,0)),'constant', constant_values=(-1, -1)) #Pad the images so the are 32x32
_,dLoss = sess.run([update_D,d_loss],feed_dict={z_in:zs,real_in:xs}) #Update the discriminator
_,gLoss = sess.run([update_G,g_loss],feed_dict={z_in:zs}) #Update the generator, twice for good measure.
_,gLoss = sess.run([update_G,g_loss],feed_dict={z_in:zs})
if i % 10 == 0:
print "Gen Loss: " + str(gLoss) + " Disc Loss: " + str(dLoss)
z2 = np.random.uniform(-1.0,1.0,size=[batch_size,z_size]).astype(np.float32) #Generate another z batch
newZ = sess.run(Gz,feed_dict={z_in:z2}) #Use new z to get sample images from generator.
if not os.path.exists(sample_directory):
os.makedirs(sample_directory)
#Save sample generator images for viewing training progress.
save_images(np.reshape(newZ[0:36],[36,32,32]),[6,6],sample_directory+'/fig'+str(i)+'.png')
if i % 1000 == 0 and i != 0:
if not os.path.exists(model_directory):
os.makedirs(model_directory)
saver.save(sess,model_directory+'/model-'+str(i)+'.cptk')
print "Saved Model"
"""
Explanation: Training the network
Now that we have fully defined our network, it is time to train it!
End of explanation
"""
sample_directory = './figs' #Directory to save sample images from generator in.
model_directory = './models' #Directory to load trained model from.
batch_size_sample = 36
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
#Reload the model.
print 'Loading Model...'
ckpt = tf.train.get_checkpoint_state(path)
saver.restore(sess,ckpt.model_checkpoint_path)
zs = np.random.uniform(-1.0,1.0,size=[batch_size_sample,z_size]).astype(np.float32) #Generate a random z batch
newZ = sess.run(Gz,feed_dict={z_in:z2}) #Use new z to get sample images from generator.
if not os.path.exists(sample_directory):
os.makedirs(sample_directory)
save_images(np.reshape(newZ[0:batch_size_sample],[36,32,32]),[6,6],sample_directory+'/fig'+str(i)+'.png')
"""
Explanation: Using a trained network
Once we have a trained model saved, we may want to use it to generate new images, and explore the representation it has learned.
End of explanation
"""
|
mrustl/flopy | examples/Notebooks/flopy3_mnw2package_example.ipynb | bsd-3-clause | import sys
import os
import numpy as np
try:
import pandas as pd
except:
pass
import flopy
"""
Explanation: MNW2 package example
End of explanation
"""
m = flopy.modflow.Modflow('mnw2example', model_ws='temp')
dis = flopy.modflow.ModflowDis(nrow=5, ncol=5, nlay=3, nper=3, top=10, botm=0, model=m)
"""
Explanation: Make an MNW2 package from scratch
End of explanation
"""
node_data = pd.DataFrame([[1, 1, 9.5, 7.1, 'well1', 'skin', -1, 0, 0, 0, 1., 2., 5., 6.2],
[1, 1, 7.1, 5.1, 'well1', 'skin', -1, 0, 0, 0, 0.5, 2., 5., 6.2],
[3, 3, 9.1, 3.7, 'well2', 'skin', -1, 0, 0, 0, 1., 2., 5., 4.1]],
columns=['i', 'j', 'ztop', 'zbotm', 'wellid', 'losstype', 'pumploc', 'qlimit', 'ppflag', 'pumpcap',
'rw', 'rskin', 'kskin', 'zpump'])
node_data
"""
Explanation: MNW2 information by node
(this could be prepared externally from well reconds and read in from a csv or excel file)
* this table has two multi-node wells, the first (well1) consisting of two nodes that are manually specified
(where the variable rw is specified by node)
* node that some variables that are constant for the whole well are also included (losstype, zpump, etc.)
End of explanation
"""
node_data = node_data.to_records()
node_data
"""
Explanation: convert the DataFrame to a rec array for compatibility with flopy
End of explanation
"""
stress_period_data = pd.DataFrame([[0, 'well1', 0],
[1, 'well1', 100.0],
[0, 'well2', 0],
[1, 'well2', 1000.]], columns=['per', 'wellid', 'qdes'])
stress_period_data
pers = stress_period_data.groupby('per')
stress_period_data = {i: pers.get_group(i).to_records() for i in [0, 1]}
stress_period_data
"""
Explanation: Stress period information
(could also be developed externally)
End of explanation
"""
mnw2 = flopy.modflow.ModflowMnw2(model=m, mnwmax=2,
node_data=node_data,
stress_period_data=stress_period_data,
itmp=[2, 2, -1], # reuse second per pumping for last stress period
)
# "nodtot" is computed automatically
mnw2.nodtot
pd.DataFrame(mnw2.node_data)
pd.DataFrame(mnw2.stress_period_data[0])
pd.DataFrame(mnw2.stress_period_data[1])
tmp = flopy.modflow.ModflowMnw2(model=m,
itmp=[1, 1, -1], # reuse second per pumping for last stress period
)
"""
Explanation: Make ModflowMnw2 package object
note that extraneous columns in node_data and stress_period_data are ignored
if itmp is positive, it must equal the number of active wells being specified in stress_period_data, otherwise the package class will raise an error.
End of explanation
"""
node_data = tmp.get_empty_node_data(3)
node_data
"""
Explanation: empty node_data and stress_period_data tables can also be generated by the package class, and then filled
End of explanation
"""
mnw2.mnw
mnw2.mnw['well1'].__dict__
"""
Explanation: Mnw objects
at the base of the flopy mnw2 module is the Mnw object class, which describes a single multi-node well.
A list or dict of Mnw objects can be used to build a package (using the example above):
flopy.modflow.ModflowMnw2(model=m, mnwmax=2,
mnw=<dict or list of Mnw objects>,
itmp=[1, 1, -1], # reuse second per pumping for last stress period
)
or if node_data and stress_period_data are supplied, the Mnw objects are created on initialization of the ModflowMnw2 class instance, and assigned to the .mnw attribute, as items in a dictionary keyed by wellid.
End of explanation
"""
pd.DataFrame(mnw2.mnw['well1'].node_data)
"""
Explanation: Note that Mnw object attributes for variables that vary by node are lists (e.g. rw above)
Each Mnw object has its own node_data and stress_period_data
End of explanation
"""
pd.DataFrame(mnw2.mnw['well2'].stress_period_data)
"""
Explanation: Instead of a dict keyed by stress period, Mnw.stress_period_data is a recarray with pumping data listed by stress period for that well
note that data for period 2, where itmp < 1, is shown (was copied from s.p. 1 during construction of the Mnw object)
End of explanation
"""
mnw2fromobj = flopy.modflow.ModflowMnw2(model=m, mnwmax=2,
mnw=mnw2.mnw,
itmp=[2, 2, -1], # reuse second per pumping for last stress period
)
pd.DataFrame(mnw2fromobj.node_data)
pd.DataFrame(mnw2fromobj.stress_period_data[0])
pd.DataFrame(mnw2fromobj.stress_period_data[1])
"""
Explanation: Build the same package using only the Mnw objects
End of explanation
"""
per1 = flopy.modflow.ModflowMnw2.get_empty_stress_period_data(itmp=2)
per1
"""
Explanation: By default, the node_data and stress_period_data tables attached to the ModflowMnw2 package class are definitive
on writing of the package output (mnw2.write_file()), the Mnw objects are regenerated from the tables. This setting is controlled by the default argument use_tables=True. To write the package file using the Mnw objects (ignoring the tables), use mnw2.write_file(use_tables=False).
End of explanation
"""
mnw2.write_file(os.path.join('temp/test.mnw2'))
junk = [print(l.strip('\n')) for l in open('temp/test.mnw2').readlines()]
"""
Explanation: Write an MNW2 package file and inspect the results
End of explanation
"""
path = os.path.join('..', '..', 'examples', 'data', 'mf2005_test')
cpth = os.path.join('..', '..', 'autotest', 'temp')
m = flopy.modflow.Modflow('MNW2-Fig28', model_ws=cpth)
dis = flopy.modflow.ModflowDis.load(path + '/MNW2-Fig28.dis', m)
m.get_package_list()
mnw2pth = os.path.join(path, 'MNW2-Fig28.mnw2')
mnw2 = flopy.modflow.ModflowMnw2.load(mnw2pth, m)
pd.DataFrame(mnw2.node_data)
pd.DataFrame(mnw2.stress_period_data[0])
mnw2.mnw
pd.DataFrame(mnw2.mnw['Well-A'].stress_period_data)
path = os.path.join('..', '..', 'examples', 'data', 'mnw2_examples')
cpth = os.path.join('temp')
m = flopy.modflow.Modflow('br', model_ws=cpth)
mnw2 = flopy.modflow.ModflowMnw2.load(path + '/BadRiver_cal.mnw2', m)
df = pd.DataFrame(mnw2.node_data)
df.loc[:, df.sum(axis=0) != 0]
"""
Explanation: Load some example MNW2 packages
End of explanation
"""
|
jottenlips/aima-python | search.ipynb | mit | from search import *
"""
Explanation: Solving problems by Searching
This notebook serves as supporting material for topics covered in Chapter 3 - Solving Problems by Searching and Chapter 4 - Beyond Classical Search from the book Artificial Intelligence: A Modern Approach. This notebook uses implementations from search.py module. Let's start by importing everything from search module.
End of explanation
"""
%psource Problem
"""
Explanation: Review
Here, we learn about problem solving. Building goal-based agents that can plan ahead to solve problems, in particular navigation problem / route finding problem. First, we will start the problem solving by precicly defining problems and their solutions. We will look at several general-purpose search algorithms. Broadly, search algorithms are classified into two types:
Uninformed search algorithms: Search algorithms which explores the search space without having any information aboout the problem other than its definition.
Examples:
Breadth First Search
Depth First Search
Depth Limited Search
Iterative Deepening Search
Informed search algorithms: These type of algorithms leverage any information (hueristics, path cost) on the problem to search through the search space to find the solution efficiently.
Examples:
Best First Search
Uniform Cost Search
A* Search
Recursive Best First Search
Don't miss the visualisations of these algorithms solving route-finding problem defined on romania map at the end of this notebook.
Problem
Let's see how we define a Problem. Run the next cell to see how abstract class Problem is defined in the search module.
End of explanation
"""
%psource GraphProblem
"""
Explanation: The Problem class has six methods.
__init__(self, initial, goal) : This is what is called a constructor and is the first method called when you create an instance of class. initial specifies the initial state of our search problem. It represents the start state from where our agent begins its task of exploration to find the goal state(s) which is given in the goal parameter.
actions(self, state) : This method returns all the possible actions agent can execute in the given state state.
result(self, state, action) : This returns the resulting state if action action is taken in the state state. This Problem class only deals with deterministic outcomes. So we know for sure what every action in a state would result to.
goal_test(self, state) : Given a graph state, it checks if it is a terminal state. If the state is indeed a goal state, value of True is returned. Else, of course, False is returned.
path_cost(self, c, state1, action, state2) : Return the cost of the path that arrives at state2 as a result of taking action from state1, assuming total cost of c to get up to state1.
value(self, state) : This acts as a bit of extra information in problems where we try to optimize a value when we cannot do a goal test.
We will use the abstract class Problem to define out real problem named GraphProblem. You can see how we defing GraphProblem by running the next cell.
End of explanation
"""
romania_map = UndirectedGraph(dict(
Arad=dict(Zerind=75, Sibiu=140, Timisoara=118),
Bucharest=dict(Urziceni=85, Pitesti=101, Giurgiu=90, Fagaras=211),
Craiova=dict(Drobeta=120, Rimnicu=146, Pitesti=138),
Drobeta=dict(Mehadia=75),
Eforie=dict(Hirsova=86),
Fagaras=dict(Sibiu=99),
Hirsova=dict(Urziceni=98),
Iasi=dict(Vaslui=92, Neamt=87),
Lugoj=dict(Timisoara=111, Mehadia=70),
Oradea=dict(Zerind=71, Sibiu=151),
Pitesti=dict(Rimnicu=97),
Rimnicu=dict(Sibiu=80),
Urziceni=dict(Vaslui=142)))
romania_map.locations = dict(
Arad=(91, 492), Bucharest=(400, 327), Craiova=(253, 288),
Drobeta=(165, 299), Eforie=(562, 293), Fagaras=(305, 449),
Giurgiu=(375, 270), Hirsova=(534, 350), Iasi=(473, 506),
Lugoj=(165, 379), Mehadia=(168, 339), Neamt=(406, 537),
Oradea=(131, 571), Pitesti=(320, 368), Rimnicu=(233, 410),
Sibiu=(207, 457), Timisoara=(94, 410), Urziceni=(456, 350),
Vaslui=(509, 444), Zerind=(108, 531))
"""
Explanation: Now it's time to define our problem. We will define it by passing initial, goal, graph to GraphProblem. So, our problem is to find the goal state starting from the given initial state on the provided graph. Have a look at our romania_map, which is an Undirected Graph containing a dict of nodes as keys and neighbours as values.
End of explanation
"""
romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)
"""
Explanation: It is pretty straight forward to understand this romania_map. The first node Arad has three neighbours named Zerind, Sibiu, Timisoara. Each of these nodes are 75, 140, 118 units apart from Arad respectively. And the same goes with other nodes.
And romania_map.locations contains the positions of each of the nodes. We will use the straight line distance (which is different from the one provided in romania_map) between two cities in algorithms like A*-search and Recursive Best First Search.
Define a problem:
Hmm... say we want to start exploring from Arad and try to find Bucharest in our romania_map. So, this is how we do it.
End of explanation
"""
romania_locations = romania_map.locations
print(romania_locations)
"""
Explanation: Romania map visualisation
Let's have a visualisation of Romania map [Figure 3.2] from the book and see how different searching algorithms perform / how frontier expands in each search algorithm for a simple problem named romania_problem.
Have a look at romania_locations. It is a dictionary defined in search module. We will use these location values to draw the romania graph using networkx.
End of explanation
"""
%matplotlib inline
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib import lines
from ipywidgets import interact
import ipywidgets as widgets
from IPython.display import display
import time
"""
Explanation: Let's start the visualisations by importing necessary modules. We use networkx and matplotlib to show the map in notebook and we use ipywidgets to interact with the map to see how the searching algorithm works.
End of explanation
"""
# initialise a graph
G = nx.Graph()
# use this while labeling nodes in the map
node_labels = dict()
# use this to modify colors of nodes while exploring the graph.
# This is the only dict we send to `show_map(node_colors)` while drawing the map
node_colors = dict()
for n, p in romania_locations.items():
# add nodes from romania_locations
G.add_node(n)
# add nodes to node_labels
node_labels[n] = n
# node_colors to color nodes while exploring romania map
node_colors[n] = "white"
# we'll save the initial node colors to a dict to use later
initial_node_colors = dict(node_colors)
# positions for node labels
node_label_pos = {k:[v[0],v[1]-10] for k,v in romania_locations.items()}
# use thi whiel labeling edges
edge_labels = dict()
# add edges between cities in romania map - UndirectedGraph defined in search.py
for node in romania_map.nodes():
connections = romania_map.get(node)
for connection in connections.keys():
distance = connections[connection]
# add edges to the graph
G.add_edge(node, connection)
# add distances to edge_labels
edge_labels[(node, connection)] = distance
"""
Explanation: Let's get started by initializing an empty graph. We will add nodes, place the nodes in their location as shown in the book, add edges to the graph.
End of explanation
"""
def show_map(node_colors):
# set the size of the plot
plt.figure(figsize=(18,13))
# draw the graph (both nodes and edges) with locations from romania_locations
nx.draw(G, pos = romania_locations, node_color = [node_colors[node] for node in G.nodes()])
# draw labels for nodes
node_label_handles = nx.draw_networkx_labels(G, pos = node_label_pos, labels = node_labels, font_size = 14)
# add a white bounding box behind the node labels
[label.set_bbox(dict(facecolor='white', edgecolor='none')) for label in node_label_handles.values()]
# add edge lables to the graph
nx.draw_networkx_edge_labels(G, pos = romania_locations, edge_labels=edge_labels, font_size = 14)
# add a legend
white_circle = lines.Line2D([], [], color="white", marker='o', markersize=15, markerfacecolor="white")
orange_circle = lines.Line2D([], [], color="white", marker='o', markersize=15, markerfacecolor="orange")
red_circle = lines.Line2D([], [], color="white", marker='o', markersize=15, markerfacecolor="red")
gray_circle = lines.Line2D([], [], color="white", marker='o', markersize=15, markerfacecolor="gray")
plt.legend((white_circle, orange_circle, red_circle, gray_circle),
('Un-explored', 'Frontier', 'Currently exploring', 'Explored'),
numpoints=1,prop={'size':16}, loc=(.8,.75))
# show the plot. No need to use in notebooks. nx.draw will show the graph itself.
plt.show()
"""
Explanation: We have completed building our graph based on romania_map and its locations. It's time to display it here in the notebook. This function show_map(node_colors) helps us do that. We will be calling this function later on to display the map at each and every interval step while searching using variety of algorithms from the book.
End of explanation
"""
show_map(node_colors)
"""
Explanation: We can simply call the function with node_colors dictionary object to display it.
End of explanation
"""
def final_path_colors(problem, solution):
"returns a node_colors dict of the final path provided the problem and solution"
# get initial node colors
final_colors = dict(initial_node_colors)
# color all the nodes in solution and starting node to green
final_colors[problem.initial] = "green"
for node in solution:
final_colors[node] = "green"
return final_colors
def display_visual(user_input, algorithm=None, problem=None):
if user_input == False:
def slider_callback(iteration):
# don't show graph for the first time running the cell calling this function
try:
show_map(all_node_colors[iteration])
except:
pass
def visualize_callback(Visualize):
if Visualize is True:
button.value = False
global all_node_colors
iterations, all_node_colors, node = algorithm(problem)
solution = node.solution()
all_node_colors.append(final_path_colors(problem, solution))
slider.max = len(all_node_colors) - 1
for i in range(slider.max + 1):
slider.value = i
# time.sleep(.5)
slider = widgets.IntSlider(min=0, max=1, step=1, value=0)
slider_visual = widgets.interactive(slider_callback, iteration = slider)
display(slider_visual)
button = widgets.ToggleButton(value = False)
button_visual = widgets.interactive(visualize_callback, Visualize = button)
display(button_visual)
if user_input == True:
node_colors = dict(initial_node_colors)
if algorithm == None:
algorithms = {"Breadth First Tree Search": breadth_first_tree_search, "Breadth First Search": breadth_first_search, "Uniform Cost Search": uniform_cost_search, "A-star Search": astar_search}
algo_dropdown = widgets.Dropdown(description = "Search algorithm: ", options = sorted(list(algorithms.keys())), value = "Breadth First Tree Search")
display(algo_dropdown)
def slider_callback(iteration):
# don't show graph for the first time running the cell calling this function
try:
show_map(all_node_colors[iteration])
except:
pass
def visualize_callback(Visualize):
if Visualize is True:
button.value = False
problem = GraphProblem(start_dropdown.value, end_dropdown.value, romania_map)
global all_node_colors
if algorithm == None:
user_algorithm = algorithms[algo_dropdown.value]
# print(user_algorithm)
# print(problem)
iterations, all_node_colors, node = user_algorithm(problem)
solution = node.solution()
all_node_colors.append(final_path_colors(problem, solution))
slider.max = len(all_node_colors) - 1
for i in range(slider.max + 1):
slider.value = i
# time.sleep(.5)
start_dropdown = widgets.Dropdown(description = "Start city: ", options = sorted(list(node_colors.keys())), value = "Arad")
display(start_dropdown)
end_dropdown = widgets.Dropdown(description = "Goal city: ", options = sorted(list(node_colors.keys())), value = "Fagaras")
display(end_dropdown)
button = widgets.ToggleButton(value = False)
button_visual = widgets.interactive(visualize_callback, Visualize = button)
display(button_visual)
slider = widgets.IntSlider(min=0, max=1, step=1, value=0)
slider_visual = widgets.interactive(slider_callback, iteration = slider)
display(slider_visual)
"""
Explanation: Voila! You see, the romania map as shown in the Figure[3.2] in the book. Now, see how different searching algorithms perform with our problem statements.
Searching algorithms visualisations
In this section, we have visualisations of the following searching algorithms:
Breadth First Tree Search - Implemented
Depth First Tree Search
Depth First Graph Search
Breadth First Search - Implemented
Best First Graph Search
Uniform Cost Search - Implemented
Depth Limited Search
Iterative Deepening Search
A*-Search - Implemented
Recursive Best First Search
We add the colors to the nodes to have a nice visualisation when displaying. So, these are the different colors we are using in these visuals:
* Un-explored nodes - <font color='black'>white</font>
* Frontier nodes - <font color='orange'>orange</font>
* Currently exploring node - <font color='red'>red</font>
* Already explored nodes - <font color='gray'>gray</font>
Now, we will define some helper methods to display interactive buttons ans sliders when visualising search algorithms.
End of explanation
"""
def tree_search(problem, frontier):
"""Search through the successors of a problem to find a goal.
The argument frontier should be an empty queue.
Don't worry about repeated paths to a state. [Figure 3.7]"""
# we use these two variables at the time of visualisations
iterations = 0
all_node_colors = []
node_colors = dict(initial_node_colors)
frontier.append(Node(problem.initial))
node_colors[Node(problem.initial).state] = "orange"
iterations += 1
all_node_colors.append(dict(node_colors))
while frontier:
node = frontier.pop()
# modify the currently searching node to red
node_colors[node.state] = "red"
iterations += 1
all_node_colors.append(dict(node_colors))
if problem.goal_test(node.state):
# modify goal node to green after reaching the goal
node_colors[node.state] = "green"
iterations += 1
all_node_colors.append(dict(node_colors))
return(iterations, all_node_colors, node)
frontier.extend(node.expand(problem))
for n in node.expand(problem):
node_colors[n.state] = "orange"
iterations += 1
all_node_colors.append(dict(node_colors))
# modify the color of explored nodes to gray
node_colors[node.state] = "gray"
iterations += 1
all_node_colors.append(dict(node_colors))
return None
def breadth_first_tree_search(problem):
"Search the shallowest nodes in the search tree first."
iterations, all_node_colors, node = tree_search(problem, FIFOQueue())
return(iterations, all_node_colors, node)
"""
Explanation: Breadth first tree search
We have a working implementation in search module. But as we want to interact with the graph while it is searching, we need to modify the implementation. Here's the modified breadth first tree search.
End of explanation
"""
all_node_colors = []
romania_problem = GraphProblem('Arad', 'Fagaras', romania_map)
display_visual(user_input = False, algorithm = breadth_first_tree_search, problem = romania_problem)
"""
Explanation: Now, we use ipywidgets to display a slider, a button and our romania map. By sliding the slider we can have a look at all the intermediate steps of a particular search algorithm. By pressing the button Visualize, you can see all the steps without interacting with the slider. These two helper functions are the callback function which are called when we interact with slider and the button.
End of explanation
"""
def breadth_first_search(problem):
"[Figure 3.11]"
# we use these two variables at the time of visualisations
iterations = 0
all_node_colors = []
node_colors = dict(initial_node_colors)
node = Node(problem.initial)
node_colors[node.state] = "red"
iterations += 1
all_node_colors.append(dict(node_colors))
if problem.goal_test(node.state):
node_colors[node.state] = "green"
iterations += 1
all_node_colors.append(dict(node_colors))
return(iterations, all_node_colors, node)
frontier = FIFOQueue()
frontier.append(node)
# modify the color of frontier nodes to blue
node_colors[node.state] = "orange"
iterations += 1
all_node_colors.append(dict(node_colors))
explored = set()
while frontier:
node = frontier.pop()
node_colors[node.state] = "red"
iterations += 1
all_node_colors.append(dict(node_colors))
explored.add(node.state)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
if problem.goal_test(child.state):
node_colors[child.state] = "green"
iterations += 1
all_node_colors.append(dict(node_colors))
return(iterations, all_node_colors, child)
frontier.append(child)
node_colors[child.state] = "orange"
iterations += 1
all_node_colors.append(dict(node_colors))
node_colors[node.state] = "gray"
iterations += 1
all_node_colors.append(dict(node_colors))
return None
all_node_colors = []
romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)
display_visual(user_input = False, algorithm = breadth_first_search, problem = romania_problem)
"""
Explanation: Breadth first search
Let's change all the node_colors to starting position and define a different problem statement.
End of explanation
"""
def best_first_graph_search(problem, f):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have breadth-first search.
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
# we use these two variables at the time of visualisations
iterations = 0
all_node_colors = []
node_colors = dict(initial_node_colors)
f = memoize(f, 'f')
node = Node(problem.initial)
node_colors[node.state] = "red"
iterations += 1
all_node_colors.append(dict(node_colors))
if problem.goal_test(node.state):
node_colors[node.state] = "green"
iterations += 1
all_node_colors.append(dict(node_colors))
return(iterations, all_node_colors, node)
frontier = PriorityQueue(min, f)
frontier.append(node)
node_colors[node.state] = "orange"
iterations += 1
all_node_colors.append(dict(node_colors))
explored = set()
while frontier:
node = frontier.pop()
node_colors[node.state] = "red"
iterations += 1
all_node_colors.append(dict(node_colors))
if problem.goal_test(node.state):
node_colors[node.state] = "green"
iterations += 1
all_node_colors.append(dict(node_colors))
return(iterations, all_node_colors, node)
explored.add(node.state)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
frontier.append(child)
node_colors[child.state] = "orange"
iterations += 1
all_node_colors.append(dict(node_colors))
elif child in frontier:
incumbent = frontier[child]
if f(child) < f(incumbent):
del frontier[incumbent]
frontier.append(child)
node_colors[child.state] = "orange"
iterations += 1
all_node_colors.append(dict(node_colors))
node_colors[node.state] = "gray"
iterations += 1
all_node_colors.append(dict(node_colors))
return None
def uniform_cost_search(problem):
"[Figure 3.14]"
iterations, all_node_colors, node = best_first_graph_search(problem, lambda node: node.path_cost)
return(iterations, all_node_colors, node)
all_node_colors = []
romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)
display_visual(user_input = False, algorithm = uniform_cost_search, problem = romania_problem)
"""
Explanation: Uniform cost search
Let's change all the node_colors to starting position and define a different problem statement.
End of explanation
"""
def best_first_graph_search(problem, f):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have breadth-first search.
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
# we use these two variables at the time of visualisations
iterations = 0
all_node_colors = []
node_colors = dict(initial_node_colors)
f = memoize(f, 'f')
node = Node(problem.initial)
node_colors[node.state] = "red"
iterations += 1
all_node_colors.append(dict(node_colors))
if problem.goal_test(node.state):
node_colors[node.state] = "green"
iterations += 1
all_node_colors.append(dict(node_colors))
return(iterations, all_node_colors, node)
frontier = PriorityQueue(min, f)
frontier.append(node)
node_colors[node.state] = "orange"
iterations += 1
all_node_colors.append(dict(node_colors))
explored = set()
while frontier:
node = frontier.pop()
node_colors[node.state] = "red"
iterations += 1
all_node_colors.append(dict(node_colors))
if problem.goal_test(node.state):
node_colors[node.state] = "green"
iterations += 1
all_node_colors.append(dict(node_colors))
return(iterations, all_node_colors, node)
explored.add(node.state)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
frontier.append(child)
node_colors[child.state] = "orange"
iterations += 1
all_node_colors.append(dict(node_colors))
elif child in frontier:
incumbent = frontier[child]
if f(child) < f(incumbent):
del frontier[incumbent]
frontier.append(child)
node_colors[child.state] = "orange"
iterations += 1
all_node_colors.append(dict(node_colors))
node_colors[node.state] = "gray"
iterations += 1
all_node_colors.append(dict(node_colors))
return None
def astar_search(problem, h=None):
"""A* search is best-first graph search with f(n) = g(n)+h(n).
You need to specify the h function when you call astar_search, or
else in your Problem subclass."""
h = memoize(h or problem.h, 'h')
iterations, all_node_colors, node = best_first_graph_search(problem, lambda n: n.path_cost + h(n))
return(iterations, all_node_colors, node)
all_node_colors = []
romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)
display_visual(user_input = False, algorithm = astar_search, problem = romania_problem)
all_node_colors = []
# display_visual(user_input = True, algorithm = breadth_first_tree_search)
display_visual(user_input = True)
penguin_problem = GraphProblem('Start', 'Penguin', museum_graph)
penguin_ucs_node = uniform_cost_search(penguin_problem)
penguin_ucs_node.solution()
"""
Explanation: A* search
Let's change all the node_colors to starting position and define a different problem statement.
End of explanation
"""
|
phoebe-project/phoebe2-docs | development/tutorials/21_22_ld_coeffs_source.ipynb | gpl-3.0 | import phoebe
b = phoebe.default_binary()
b.add_dataset('lc', dataset='lc01')
print(b.filter(qualifier='ld*', dataset='lc01'))
"""
Explanation: 2.1 - 2.2 Migration: ld_coeffs_source
PHOEBE 2.2 introduces the capability to interpolate limb-darkening coefficients for a given ld_func (i.e. linear, quadratic, etc). In order to do so, there is now a new parameter called ld_coeffs_source which will default to 'auto'. The ld_coeffs parameter will not be visibile, unless ld_func is some value other than the default value of 'interp' AND ld_coeffs_source is manually set to 'none'. Any script in which ld_coeffs was set manually, will now require an additional line setting ld_coeffs_source to 'none' (or alternatively removing the line setting ld_coeffs and instead relying on the new capability to interpolate).
Below is an example exhibiting the new behavior.
End of explanation
"""
print(b.filter(qualifier='ld*bol'))
"""
Explanation: By default, ld_func is set to 'interp'. This will interpolate the limb-darkening directly, without requiring a specific law/function.
Note, however, that the bolometric limb-darkening does not have 'interp' as an option. Bolometric limb-darkening is only used for irradiation/reflection, and must be set manually.
End of explanation
"""
print(b.get_parameter('ld_func', component='primary').choices)
"""
Explanation: Back to the dataset-specific limb-darkening, we can see the available options besides 'interp'.
End of explanation
"""
b.set_value_all('ld_func', 'linear')
print(b.filter(qualifier='ld*', dataset='lc01'))
"""
Explanation: And if we set the value of ld_func to anything other than 'interp', we'll now see new parameters for ld_coeffs_source. In PHOEBE 2.1, this would expose the ld_coeffs parameters instead. However, in PHOEBE 2.2+, limb-darkening will be interpolated automatically by default, requiring one extra step to manually set the coefficients.
End of explanation
"""
print(b.get_parameter('ld_coeffs_source', component='primary').choices)
"""
Explanation: Here we see there are several options available for ld_coeffs_source. See the limb-darkening tutorial for more details.
End of explanation
"""
b.set_value('ld_coeffs_source', component='primary', value='none')
print(b.filter(qualifier='ld*', dataset='lc01'))
"""
Explanation: To manually set the coefficients, we must also set ld_coeffs_source to be 'none'.
End of explanation
"""
print(b.run_checks())
"""
Explanation: Now that ld_coeffs is visible, run_checks will fail if they are not of the correct length.
End of explanation
"""
b.set_value('ld_coeffs', component='primary', value=[0.5])
print(b.filter(qualifier='ld*', dataset='lc01'))
print(b.run_checks())
"""
Explanation: By manually setting the value of ld_coeffs to an appropriate value, the checks should pass.
End of explanation
"""
|
machow/siuba | examples/architecture/003-fast-mutate.ipynb | mit | %%capture
import pandas as pd
pd.set_option("display.max_rows", 5)
from siuba import _
from siuba.data import mtcars
g_cyl = mtcars.groupby("cyl")
## Both snippets below raise an error.... :/
g_cyl.mpg + g_cyl.mpg
g_cyl.add(g_cyl.mpg)
"""
Explanation: Pandas fast mutate architecture
(Published 27 Oct 2019)
Problem: series operations are type invariant under grouping
What is type variance?
In spirit, most pandas operations are one of two functions.
f_elwise(a, [b]) - takes up to two series, returns a result of the same length.
f_agg(a, [b]) - takes up to two series, returns a result whose length is the number of groupings in the data.
Assuming that a SeriesGroupBy was built as a subtype of a Series object,
in the Liskov Substitution sense,
this would mean that..
f_elwise(SeriesGroupBy, SeriesGroupBy) -> Series
could easily support versions that are...
contravariant on input type - e.g. f_elwise(Series, ...) -> ...
covariant on output type - e.g. f_elwise(..., ...) -> SeriesGroupBy
This would be extremely convenient, since it means that defining a function like f_add = f_elwise(...), would support all operations in the python code below...
```python
from siuba.data import mtcars
g_cyl = mtcars.groupby('cyl')
assume this creates the function f_add
f_add = f_elwise('add')
mpg2 = f_add(mtcars.mpg, mtcars.mpg) # -> Series
g_cyl_mpg2 = f_add(g_cyl.mpg, g_cyl.mpg) # -> SeriesGroupBy
```
What does this look like in pandas?
The reality is that pandas SeriesGroupBy objects are not subtypes of a Series.
More than that, they do not support addition.
End of explanation
"""
# two ways to do it f_elwise
ser_mpg2 = mtcars.mpg + mtcars.mpg
ser_mpg2 = g_cyl.mpg.obj + g_cyl.mpg.obj
# doing grouped aggregate
g_cyl.mpg.mean()
"""
Explanation: How are grouped operations currently handled in pandas?
f_elwise(a, [b]) - is handled using ungrouped pandas object (e.g. Series), or by using the grouped series .obj attribute.
f_agg(a, [b]) - is handled using custom SeriesGroupBy methods
This is shown below (note that all results are Series).
End of explanation
"""
degroup = lambda ser: getattr(ser, "obj", ser)
f_add = lambda x, y: degroup(x) + degroup(y)
f_add(g_cyl.mpg, f_add(g_cyl.mpg, 1))
"""
Explanation: What about composing f_elwise and f_agg operations?
Let's take this in two steps
composing f_elwise operations alone
composing them with f_agg operations
1) f_elwise(a, f_elwise(b, [c]))
In this case, since the result could be a Series, or a SeriesGroupBy,
it shouldn't be a problem.
End of explanation
"""
from pandas.core import algorithms
def broadcast_agg_result(grouper, result, obj):
# Simplified broadcasting from g_cyl.mpg.transform('mean')
ids, _, ngroup = grouper.group_info
out = algorithms.take_1d(result._values, ids)
return pd.Series(out, index=obj.index, name=obj.name)
f_mean = lambda x: broadcast_agg_result(x.grouper, x.mean(), degroup(x))
f_add(f_mean(g_cyl.mpg), f_mean(g_cyl.hp))
"""
Explanation: Also, as noted in the first section, we are returning a Series here, but functions returning a SeriesGroupBy should also be compatible (so long as we enforce liskov substitution..).
2) f_elwise(f_agg(a), f_agg(b)) -> same length result
Suppose we wanted to add the mean mpg of each group, to each row of mpg in the original data.
In our system written above, this would look like...
```python
f_mean = f_agg('mean')
f_add = f_elwise('add')
res = f_add(g_cyl.mpg, f_mean(g_cyl.mpg))
```
Remember that for f_add, we laid out in the first section that it should allow functions to be substituted in that take a SeriesGroupBy (or parent type) and returns a Series (or subtype).
End of explanation
"""
f_add(g_cyl.mpg, f_add(f_mean(g_cyl.mpg), f_mean(g_cyl.hp)))
"""
Explanation: Notice we can keep going with this, since
f_add(SeriesGroupBy, SeriesGroupby) -> Series
f_mean(SeriesGroupBy, SeriesGroupby) -> Series
we are making SeriesGroupBy a subtype of Series
End of explanation
"""
from pandas.core.groupby import SeriesGroupBy
from pandas.core import algorithms
# Define Agg Result ----
def create_agg_result(ser, orig_object, orig_grouper):
# since pandas groupby method is hard-coded to create a SeriesGroupBy, mock
# AggResult below by making it a SeriesGroupBy whose grouper has 2 extra attributes
obj = ser.groupby(ser.index)
obj.grouper.orig_grouper = orig_grouper
obj.grouper.orig_object = orig_object
return obj
def is_agg_result(x):
return hasattr(x, "grouper") and hasattr(x.grouper, "orig_grouper")
# Handling Grouped Operations ----
def regroup(ser, grouper):
return ser.groupby(grouper)
def degroup(ser):
# returns tuple of (Series or literal, Grouper or None)
# because we can't rely on type checking, use hasattr instead
return getattr(ser, "obj", ser), getattr(ser, "grouper", None)
def f_mean(x):
# SeriesGroupBy -> AggResult
return create_agg_result(x.mean(), x.obj, x.grouper)
def broadcast_agg_result(g_ser, compare=None):
"""Returns a tuple of (Series, final op grouper)"""
if not isinstance(g_ser, SeriesGroupBy):
return g_ser, compare.grouper
# NOTE: now only applying for agg_result
if not is_agg_result(g_ser):
return degroup(g_ser)
if g_ser.grouper.orig_grouper is compare.grouper:
orig = g_ser.grouper.orig_object
grouper = g_ser.grouper.orig_grouper
# Simplified broadcasting from g_cyl.mpg.transform('mean') implementation
ids, _, ngroup = grouper.group_info
out = algorithms.take_1d(g_ser.obj._values, ids)
return pd.Series(out, index=orig.index, name=orig.name), grouper
return degroup(g_ser)
# Define operations ----
def f_add(x, y):
# SeriesGroupBy, SeriesGroupBy -> ""
broad_x, grouper = broadcast_agg_result(x, y)
broad_y, __ = broadcast_agg_result(y, x)
res = broad_x + broad_y
return regroup(res, grouper)
grouped_agg = f_add(f_mean(g_cyl.mpg), f_mean(g_cyl.hp))
# Notice, only 1 result per group
grouped_agg.obj
grouped_mutate = f_add(g_cyl.mpg, grouped_agg)
grouped_mutate.obj
"""
Explanation: However, there is are two problems here...
adding two means, or a number to a mean, shouldn't need to broadcast to the length of the data
in the code above, f_mean(Series) will return a single value (e.g. 1.2)!
The main issue is that a Series is implicitly a single group. To get around this, f_elwise should decide when to broadcast, and all operations should return SeriesGroupBy.
3) f_elwise(f_agg(a), f_agg(b)) -> agg length result
Above, we had the aggregate return a result the same length as the original data. But this goes against our initial description that f_agg returns a result whose length is the number of groupings.
In this case, we need to think more about f_agg's type signature.
To do this let's consider a new type, AggGroupBy, where...
AggGroupBy is a subtype of SeriesGroupBy
AggGroupBy has 1 row per grouping.
f_agg(a, [b]), with type signature f_agg(SeriesGroupBy) -> AggGroupBy
Finally let's make this drastically simplifying requirement
any operation must take as input either the output of another operation, a literal, or a series using the same grouping.
This means that if our operations return grouped Series, then we don't need to worry about the Series case any more. For example, under this system these operations are allowed...
f_agg(g_cyl.mpg)
f_elwise(g_cyl.mpg, 1)
f_elwise(f_agg(g_cyl.mpg), g_cyl.mpg)
End of explanation
"""
|
boffi/boffi.github.io | dati_2020/01/Resonance.ipynb | mit | def x_normalized(t, z):
wn = w = 2*pi
wd = wn*sqrt(1-z*z)
# Clough Penzien p. 43
A = z/sqrt(1-z*z)
return (-cos(wd*t)-A*sin(wd*t))*exp(-z*wn*t) + cos(w*t)
"""
Explanation: Resonant excitation
We want to study the behaviour of an undercritically damped SDOF system when it is
subjected to a harmonic force $p(t) = p_o \sin\omega_nt$, i.e., when the excitation frequency equals the free vibration frequency of the system.
Of course, $\beta=1$, $D(\beta,\zeta)|{\beta=1}=\displaystyle\frac{1}{2\zeta}$
and $\theta=\pi/2$, hence $$\xi(t)=\Delta{st}\,\frac{1}{2\zeta}\cos\omega_nt.$$
Starting from rest conditions, we have
$$\frac{x(t)}{\Delta_{st}} = \exp(-\zeta\omega_n t)\left(
-\frac{\omega_n}{2\omega_D}\sin(\omega_n t)
-\frac{1}{2\zeta}\cos(\omega_n t)\right) + \frac{1}{2\zeta}\cos(\omega_n t)$$
and, multiplying both sides by $2\zeta$
\begin{align}
x(t)\frac{2\zeta}{\Delta_{st}} = \bar{x}(t)& =
\exp(-\zeta\omega_n t)\left(
-\zeta\frac{\omega_n}{\omega_D}\sin(\omega_n t)
-\cos(\omega_n t)\right) + \cos(\omega_n t)\
& = \exp(-\zeta\omega_n t)\left(
-\frac{\zeta}{\sqrt{1-\zeta^2}}\sin(\omega_n t)
-\cos(\omega_n t)\right) + \cos(\omega_n t).
\end{align}
We have now a normalized function of time that grows, oscillating, from 0 to 1,
where the free parameters are just $\omega_n$ and $\zeta$.
To go further, we set arbitrarily $\omega_n=2\pi$ (our plots will be nicer...)
and have just a dependency on $t$ and $\zeta$.
Eventually, we define a function of $\zeta$ that returns a function of $t$ only,
here it is...
End of explanation
"""
t = np.linspace(0,20,1001)
print(t)
"""
Explanation: Above we compute some constants that depend on $\zeta$,
i.e., the damped frequency and the coefficient in
front of the sine term, then we define a function of time
in terms of these constants and of $\zeta$ itself.
Because we are going to use this function with a vector argument,
the last touch is to vectorize the function just before returning it
to the caller.
Plotting our results
We start by using a function defined in the numpy aka np module to
generate a vector whose entries are 1001 equispaced real numbers, starting from
zero and up to 20, inclusive of both ends, and assigning the name t to this vector.
End of explanation
"""
zetas = (.02, .05, .10, .20)
print(zetas)
"""
Explanation: We want to see what happens for different values of $\zeta$, so we create
a list of values and assign the name zetas to this list.
End of explanation
"""
for z in zetas:
plt.plot(t, x_normalized(t, z))
plt.ylim((-1.0, 1.0))
plt.title(r'$\zeta=%4.2f$'%(z,))
plt.show()
"""
Explanation: Now, the real plotting:
z takes in turn each of the values in zetas,
then we generate a function of time for the current z
we generate a plot with a line that goes through the point
(a(0),b(0)), (a(1),b(1)), (a(2),b(2)), ...
where, in our case, a is the vector t and b is the vector
returned from the vectorized function bar_x
we make a slight adjustement to the extreme values of the y-axis
of the plot
we give a title to the plot
we FORCE (plt.show()) the plot to be produced.
End of explanation
"""
t = np.linspace(0,5,501)
for z in zetas:
plt.plot(t, x_normalized(t, z)/(2*z), label=r'$\zeta=%4.2f$'%(z,))
plt.legend(ncol=5,loc='lower center', fancybox=1, shadow=1, framealpha=.95)
plt.grid()
"""
Explanation: Wait a minute!
So, after all this work, we have that the greater the damping, the smaller the
number of cycles that's needed to reach the maximum value of the response...
Yes, it's exactly like that, and there is a reason. Think of it.
.
.
.
.
.
.
.
We have normalized the response functions to have always a maximum absolute
value of one, but in effect the max values are different, and a heavily damped
system needs less cycles to reach steady-state because the maximum value is much,
much smaller.
Let's plot the unnormalized (well, there's still the $\Delta_{st}$ normalization)
responses.
Note the differences with above:
we focus on a shorter interval of time and, in each step
we don't add a title
we don't force the creation of a distinct plot in each cycle,
we add a label to each curve
at the end of the cycle,
we ask for the generation of a legend that uses the labels
we specified to generate a, well, a legend for the curves
we ask to plot all the properly labeled curves using plt.plot().
End of explanation
"""
|
google/eng-edu | ml/cc/prework/fr/hello_world.ipynb | apache-2.0 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2017 Google LLC.
End of explanation
"""
from __future__ import print_function
import tensorflow as tf
c = tf.constant('Hello, world!')
with tf.Session() as sess:
print(sess.run(c))
"""
Explanation: # Travail préalable : Hello World
Objectif de formation : Exécuter un programme TensorFlow dans le navigateur.
Voici un programme TensorFlow "Hello World" :
End of explanation
"""
|
metpy/MetPy | v0.11/_downloads/e1a6a28aa03f7e0f88631b525fc7c40d/Hodograph_Inset.ipynb | bsd-3-clause | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, Hodograph, SkewT
from metpy.units import units
"""
Explanation: Hodograph Inset
Layout a Skew-T plot with a hodograph inset into the plot.
End of explanation
"""
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('may4_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed'
), how='all').reset_index(drop=True)
"""
Explanation: Upper air data can be obtained using the siphon package, but for this example we will use
some of MetPy's sample data.
End of explanation
"""
hght = df['height'].values * units.hPa
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
add_metpy_logo(fig, 115, 100)
# Grid for plots
skew = SkewT(fig, rotation=45)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Good bounds for aspect ratio
skew.ax.set_xlim(-50, 60)
# Create a hodograph
ax_hod = inset_axes(skew.ax, '40%', '40%', loc=1)
h = Hodograph(ax_hod, component_range=80.)
h.add_grid(increment=20)
h.plot_colormapped(u, v, hght)
# Show the plot
plt.show()
"""
Explanation: We will pull the data out of the example dataset into individual variables and
assign units.
End of explanation
"""
|
cosmolejo/Fisica-Experimental-3 | Calculo_Error/Poisson/.ipynb_checkpoints/Poisson-checkpoint.ipynb | gpl-3.0 | dado = np.array([5, 3, 3, 2, 5, 1, 2, 3, 6, 2, 1, 3, 6, 6, 2, 2, 5, 6, 4, 2, 1, 3, 4, 2, 2, 5, 3, 3,
2, 2, 2, 1, 6, 2, 2, 6, 1, 3, 3, 3, 4, 4, 6, 6, 1, 2, 2, 6, 1, 4, 2, 5, 3, 6, 6, 3,
5, 2, 2, 4, 2, 2, 4, 4, 3, 3, 1, 2, 6, 1, 3, 3, 5, 4, 6, 6, 4, 2, 5, 6, 1, 4, 5, 4, 3, 5,
4, 1, 4, 6, 6, 6, 3, 1, 5, 6, 4, 3, 4, 6, 3, 5, 2, 6, 3, 6, 1, 4, 3, 4, 1])
suma = np.array([8, 5, 6, 5, 8, 4, 12, 4, 11, 6, 4, 6, 7, 6, 4, 3, 8, 8, 4, 6, 8, 12, 3, 8, 5, 7, 9, 9,
7, 6, 4, 8, 6, 3, 7, 6, 9, 12, 6, 11, 5, 9, 8, 5, 10, 12, 4, 11, 7, 10, 8, 8, 9, 7, 7, 5])
prob = 10./36 # probabilidad de sacar una suma inferior a 6
#prob = 6./21 # probabilidad de sacar una suma inferior a 6
#np.where(suma[0:8]<6)
"""
Explanation: ANÁLISIS ESTADÍSTICO DE DATOS: Distribuciones discretas
Material en construcción, no ha sido revisado por pares.
Última revisión: agosto 2016, Edgar Rueda
Referencias bibliográficas
García, F. J. G., López, N. C., & Calvo, J. Z. (2009). Estadística básica para estudiantes de ciencias.
Squires, G. L. (2001). Practical physics. Cambridge university press.
Conjunto de datos
Para esta sección haremos uso de dos conjuntos de datos, el primero se obtiene a partir del lanzamiento de un dado. El segundo conjunto corresponde a la suma de los dados por cada lanzamiento (se lanzan dos dados al mismo tiempo).
End of explanation
"""
mediaS = suma.size*prob # media de la distribución binomial
devS = np.sqrt(suma.size*prob*(1.-prob)) # desviación estándar de la distribución binomial
real = np.where(suma<6) # where entrega la info en un tuple de una posición donde está el array
real = real[0] # extraemos la información del tuple en la posición uno y la guardamos en real
duda = 16 # x, número de éxitos cuya probabilidad se quiere conocer
Prob = 0 # probabilidad de tener un número de éxitos inferior o igual a duda
for cont in range(0,duda):
Prob = Prob + (math.factorial(suma.size)/(math.factorial(cont)*math.factorial(suma.size - cont))) \
*prob**cont*(1.-prob)**(suma.size-cont)
print('La probabilidad de que la suma sea inferior a 6 es %.2f' % prob)
print('Número total de pruebas igual a %d' % suma.size)
print('Suma promedio igual a %.1f' %mediaS)
print('Desviación estándar de la suma = %.1f' % devS)
print('Número de veces que suma menos de 6 en la muestra es %.1f' % real.size)
print('La probabilidad de que el número de éxitos en una muestra de %d sea \
inferior o igual a %d, donde el éxito es que la suma sea inferior a 6, es %.4f' %(suma.size,duda,Prob))
"""
Explanation: PARA RECORDAR
Distribución binomial
Se denomina proceso de Bernoulli aquel experimento que consiste en repetir n veces una prueba, cada una independiente, donde el resultado se clasifica como éxito o fracaso (excluyente). La probabilidad de éxito se denota por $p$. Se define la $\textbf{variable aleatoria binomial}$ como la función que dá el número de éxitos en un proceso de Bernoulli. La variable aleatoria $X$ tomará valores $X = {0,1,2,...,n}$ para un experimento con n pruebas.
La distribución binomial (distribución de probabilidad) se representa como:
$$f(x) = P(X = x) = b(x;n,p)$$
Note que para calcular la probabilidad, debido a la independiencia de las pruebas, basta con multiplicar la probabilidad de los éxitos por la probabilidad de los fracasos, $p^x q^{n-x}$, y este valor multiplicarlo por el número posible de disposiciones en los que salgan los éxitos (permutaciones),
$$b(x;n,p) = \frac{n!}{x!(n-x)!}p^x q^{n-x}$$
La probabilidad de que $X$ sea menor a un valor $x$ determinado es:
$$P(X \leq x) = B(x;n,p) = \sum_{r = 0}^x b(r;n,p)$$
La media es $\mu = np$ y la desviación estándar es $\sigma = \sqrt{npq}$ donde $q = 1 - p$.
Una propiedad importante de la distribución binomial es que será simétrica si $p=q$, y con asimetría a la derecha cuando $p<q$.
Del conjunto de datos que se obtienen de la suma de dos dados, tenemos:
End of explanation
"""
n = suma.size
p = prob
x = np.arange(0,30)
histB = stats.binom.pmf(x, n, p)
plt.figure(1)
plt.rcParams['figure.figsize'] = 20, 6 # para modificar el tamaño de la figura
plt.plot(x, histB, 'bo', ms=8, label='Distribucion binomial')
plt.xlabel('Numero de exitos')
plt.ylabel('Probabilidad')
ProbB = np.sum(histB[0:duda])
print('Probabilidad de que en solo %d ocasiones la suma sea inferior a 6 es %.4f' %(duda,ProbB))
"""
Explanation: Usando la función binom de python podemos graficar la función de distribución binomial para este caso.
End of explanation
"""
Ima = misc.imread('HDF-bw.jpg') # Se lee la imagen como matriz en escala de 8 bit
plt.rcParams['figure.figsize'] = 20, 6 # para modificar el tamaño de la figura
Imab = Ima[100:500,100:700,1] # La imagen original tenía tres canales (RGB); se elige un canal y se recorta
plt.figure(2)
plt.imshow(Imab, cmap='gray')
"""
Explanation: $\textbf{FIGURA 1.}$ Distribución binomial para el ejemplo.
Efectivamente, se obtuvo la misma probabilidad. Note que si se desconoce la probabilidad $p$ esta se puede determinar si se conoce que la distribución es binomial. Una vez se tiene la probabilidad de éxito se pueden determinar las probabilidades para cualquier cantidad de pruebas.
La distribución binomial es de gran utilidad en campos científicos como el control de calidad y las aplicaciones médicas.
Distribución de Poisson
En un experimento aleatorio en el que se busque medir el número de sucesos o resultados de un tipo que ocurren en un intervalo continuo (número de fotones que llegan a un detector en intervalos de tiempo iguales, número de estrellas en cuadrículas idénticas en el cielo, número de fotones en un modo en un oscilador mecánico cuántico, energía total en un oscilador armónico mecánico cuántico), se le conocerá como proceso de Poisson, y deberá cumplir las siguientes reglas:
Los resultados de cada intervalo son independientes.
La probabilidad de que un resultado ocurra en un intervalo pequeño es proporcional al tamaño del intervalo. La probabilidad es constante por lo que se puede definir un valor medio de resultados por unidad de intervalo. El proceso es estable.
La probabilidad de que ocurra más de un resultado en un intervalo lo suficientemente pequeño es despreciable. El intervalo es tán pequeño que a lo sumo se espera solo un suceso (resultado).
La distribución de Poisson es un caso límite de la distribución binomial cuando el número de eventos $N$ tiende a infinito y la probabilidad de acierto $p$ tiende a cero (ver libro de Squire para la deducción).
La $\textbf{variable aleatoria de Poisson}$ se define como el número de resultados que aparecen en un experimento que sigue el proceso de Poisson. La distribución de probabilidad asociada se denomina distribución de Poisson y depende solo del parámetro medio de resultados $\lambda$.
$$X = (0,1,2,...)$$
$$f(x) = P(X=x) = p(x;\lambda)$$
La expresión para la distribución se obtiene a partir de la binomial (mirar libro de Garcia):
$$p(x;\lambda) = \frac{\lambda^x}{x!} e^{- \lambda}$$
con media $\lambda$ y desviación estándar $\sqrt{\lambda}$.
End of explanation
"""
plt.rcParams['figure.figsize'] = 18, 15 # para modificar el tamaño de la figura
fil, col = Imab.shape # número de filas y columnas de la imagen
numlado = 7 # Número de imágenes por lado
contar = 1
plt.figure(5)
for enfil in range(1,numlado+1):
for encol in range(1,numlado+1):
plt.subplot(numlado,numlado,contar)
plt.imshow(Imab[(enfil-1)*np.int(fil/numlado):enfil*np.int(fil/numlado), \
(encol-1)*np.int(col/numlado):encol*np.int(col/numlado)],cmap='gray')
frame1 = plt.gca()
frame1.axes.get_yaxis().set_visible(False)
frame1.axes.get_xaxis().set_visible(False)
contar = contar + 1
# Para el caso de 7x7 imágenes en gal se presentan el número de galaxias contadas
gal = np.array([2., 3., 6., 5., 4., 9., 10., \
2., 3., 7., 1., 3., 1., 6., \
6., 5., 4., 3., 4., 2., 4., \
4., 6., 3., 3., 4., 3., 2., \
5., 4., 2., 2., 6., 5., 9., \
4., 7., 2., 3., 3., 3., 5., \
6., 3., 4., 7., 4., 6., 7.])
la = np.mean(gal) # Valor promedio del conjunto de datos
# Distribución del conjunto de datos. La primera fila es el número de galaxias, la segunda es el número de veces que
# se repite dicho número de galaxias
distriGal = np.array([[0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.],[0., 1., 8., 11., 10., 5., 6., 4., 0., 2., 1.]])
print('Valor promedio del conjunto de datos = %.2f' % la)
plt.figure(figsize=(16,9))
plt.plot(distriGal[0,:],distriGal[1,:]/gal.size,'r*',ms=10,label='Distribución datos con promedio %.1f' % la)
plt.legend()
plt.xlabel('Número de galaxias en el intervalo')
plt.ylabel('Rata de ocurrencia')
plt.grid()
"""
Explanation: $\textbf{FIGURA 2.}$ Galaxias en el espacio profundo.
End of explanation
"""
num = 2. # Número de galaxias que se espera encontrar
prob = (la**num*np.exp(-la)/math.factorial(num))*100 # Probabilidad de encontrar dicho número de galaxias
x = np.arange(0,20) # rango de datos: número de galaxias
histP = stats.poisson.pmf(x, la) # función de probabilidad de Poisson
ProbP = (np.sum(histP[0:int(num)+1]))*100 # Probabilidad acumulada
print('Promedio de galaxias en el área estudiada = %.2f' % la)
print('La probabilidad de que se observe en la imagen del espacio profundo %d galaxias es = %.1f%%' % (num,prob))
print('Probabilidad de observar hasta %d galaxias = %.1f%%' %(num,ProbP))
"""
Explanation: Si decimos que la distribución que se determinó en el paso anterior es una distribución de Poisson (suposición), podemos decir cosas como:
End of explanation
"""
plt.figure(figsize=(16,9))
plt.plot(x, histP, 'bo', ms=8, label='Distribución de Poisson con $\lambda=$ %.1f' % la)
plt.plot(distriGal[0,:],distriGal[1,:]/gal.size,'r*',ms=10,label='Conjunto de datos con promedio %.1f' % la)
plt.xlabel('Numero de galaxias (sucesos)')
plt.ylabel('Rata de ocurrencia')
plt.legend()
plt.grid()
"""
Explanation: Comparemos ahora la distribución obtenida con la correspondiente distribución de Poisson:
End of explanation
"""
plt.figure(4)
plt.rcParams['figure.figsize'] = 12, 6 # para modificar el tamaño de la figura
probP = np.zeros(20)
for la in range(1,10,2):
for num in range(0,20):
probP[num] = la**num*np.exp(-la)/math.factorial(num)
plt.plot(probP,marker='.',ms=15,label='$\lambda = %d$' %la)
mu = la # media aritmética
sigma = np.sqrt(la) # desviación estándar
x = np.arange(0,20,1)
f = (1./np.sqrt(2*np.pi*sigma**2))*np.exp(-(x-mu)**2/(2*sigma**2))
plt.plot(f,marker='*',ms=10,color='black',label='$ \overline{x} = %d , \ \sigma = %.1f$'%(mu,sigma))
plt.xlabel('Evento')
plt.ylabel('Probabilidad')
plt.legend()
"""
Explanation: $\textbf{FIGURA 3.}$ Distribución de Poisson ideal con respecto a la generada por los datos.
Finalmente observemos como la distribución de Poisson tiende a la forma de una distribución normal.
End of explanation
"""
|
postBG/DL_project | intro-to-rnns/Anna_KaRNNa_Exercises.ipynb | mit | import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
"""
Explanation: Anna KaRNNa
In this notebook, we'll build a character-wise RNN trained on Anna Karenina, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.
This network is based off of Andrej Karpathy's post on RNNs and implementation in Torch. Also, some information here at r2rt and from Sherjil Ozair on GitHub. Below is the general architecture of the character-wise RNN.
<img src="assets/charseq.jpeg" width="500">
End of explanation
"""
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
"""
Explanation: First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
End of explanation
"""
text[:100]
"""
Explanation: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
End of explanation
"""
encoded[:100]
"""
Explanation: And we can see the characters encoded as integers.
End of explanation
"""
len(vocab)
"""
Explanation: Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
End of explanation
"""
def get_batches(arr, n_seqs, n_steps):
'''Create a generator that returns batches of size
n_seqs x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
n_seqs: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
# Get the batch size and number of batches we can make
batch_size = n_seqs * n_steps # 우리가 리턴할 batch 의 크기 (즉, 한 batch안에 몇 개의 character가 있는지)
n_batches = len(arr) // batch_size # 우리가 만들 batch 들의 갯수
# Keep only enough characters to make full batches
arr = arr[:batch_size * n_batches]
# Reshape into n_seqs rows
arr = arr.reshape((n_seqs, -1))
for n in range(0, arr.shape[1], n_steps):
# The features
x = arr[:, n:n+n_steps]
# The targets, shifted by one
y = np.zeros_like(x)
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0] # You'll usually see the first input character used as the last target character
yield x, y
"""
Explanation: Making training mini-batches
Here is where we'll make our mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:
<img src="assets/sequence_batching@1x.png" width=500px>
<br>
We have our text encoded as integers as one long array in encoded. Let's create a function that will give us an iterator for our batches. I like using generator functions to do this. Then we can pass encoded into this function and get our batch generator.
The first thing we need to do is discard some of the text so we only have completely full batches. Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences) and $M$ is the number of steps. Then, to get the number of batches we can make from some array arr, you divide the length of arr by the batch size. Once you know the number of batches and the batch size, you can get the total number of characters to keep.
After that, we need to split arr into $N$ sequences. You can do this using arr.reshape(size) where size is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences (n_seqs below), let's make that the size of the first dimension. For the second dimension, you can use -1 as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$ where $K$ is the number of batches.
Now that we have this array, we can iterate through it to get our batches. The idea is each batch is a $N \times M$ window on the array. For each subsequent batch, the window moves over by n_steps. We also want to create both the input and target arrays. Remember that the targets are the inputs shifted over one character. You'll usually see the first input character used as the last target character, so something like this:
python
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
where x is the input batch and y is the target batch.
The way I like to do this window is use range to take steps of size n_steps from $0$ to arr.shape[1], the total number of steps in each sequence. That way, the integers you get from range always point to the start of a batch, and each window is n_steps wide.
Exercise: Write the code for creating batches in the function below. The exercises in this notebook will not be easy. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, type out the solution code yourself.
위의 사진으로 보면
$N$ = 2 = n_seqs
$M$ = 3 = n_steps
End of explanation
"""
batches = get_batches(encoded, 10, 50)
x, y = next(batches)
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
"""
Explanation: Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
End of explanation
"""
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
return inputs, targets, keep_prob
"""
Explanation: If you implemented get_batches correctly, the above output should look something like
```
x
[[55 63 69 22 6 76 45 5 16 35]
[ 5 69 1 5 12 52 6 5 56 52]
[48 29 12 61 35 35 8 64 76 78]
[12 5 24 39 45 29 12 56 5 63]
[ 5 29 6 5 29 78 28 5 78 29]
[ 5 13 6 5 36 69 78 35 52 12]
[63 76 12 5 18 52 1 76 5 58]
[34 5 73 39 6 5 12 52 36 5]
[ 6 5 29 78 12 79 6 61 5 59]
[ 5 78 69 29 24 5 6 52 5 63]]
y
[[63 69 22 6 76 45 5 16 35 35]
[69 1 5 12 52 6 5 56 52 29]
[29 12 61 35 35 8 64 76 78 28]
[ 5 24 39 45 29 12 56 5 63 29]
[29 6 5 29 78 28 5 78 29 45]
[13 6 5 36 69 78 35 52 12 43]
[76 12 5 18 52 1 76 5 58 52]
[ 5 73 39 6 5 12 52 36 5 78]
[ 5 29 78 12 79 6 61 5 59 63]
[78 69 29 24 5 6 52 5 63 76]]
``
although the exact numbers will be different. Check to make sure the data is shifted over one step fory`.
Building the model
Below is where you'll build the network. We'll break it up into parts so it's easier to reason about each bit. Then we can connect them up into the whole network.
<img src="assets/charRNN.png" width=500px>
Inputs
First off we'll create our input placeholders. As usual we need placeholders for the training data and the targets. We'll also create a placeholder for dropout layers called keep_prob. This will be a scalar, that is a 0-D tensor. To make a scalar, you create a placeholder without giving it a size.
Exercise: Create the input placeholders in the function below.
End of explanation
"""
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
'''
### Build the LSTM Cell
# Use a basic LSTM cell
lstms = [tf.contrib.rnn.BasicLSTMCell(lstm_size) for _ in range(num_layers)]
# Add dropout to the cell outputs
drops = [tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob) for lstm in lstms]
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell(drops)
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
"""
Explanation: LSTM Cell
Here we will create the LSTM cell we'll use in the hidden layer. We'll use this cell as a building block for the RNN. So we aren't actually defining the RNN here, just the type of cell we'll use in the hidden layer.
We first create a basic LSTM cell with
python
lstm = tf.contrib.rnn.BasicLSTMCell(num_units)
where num_units is the number of units in the hidden layers in the cell. Then we can add dropout by wrapping it with
python
tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
You pass in a cell and it will automatically add dropout to the inputs or outputs. Finally, we can stack up the LSTM cells into layers with tf.contrib.rnn.MultiRNNCell. With this, you pass in a list of cells and it will send the output of one cell into the next cell. For example,
python
tf.contrib.rnn.MultiRNNCell([cell]*num_layers)
This might look a little weird if you know Python well because this will create a list of the same cell object. However, TensorFlow will create different weight matrices for all cell objects. Even though this is actually multiple LSTM cells stacked on each other, you can treat the multiple layers as one cell.
We also need to create an initial cell state of all zeros. This can be done like so
python
initial_state = cell.zero_state(batch_size, tf.float32)
Exercise: Below, implement the build_lstm function to create these LSTM cells and the initial state.
End of explanation
"""
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
lstm_output: List of output tensors from the LSTM layer
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# Concatenate lstm_output over axis 1 (the columns)
seq_output = tf.concat(lstm_output, axis=1)
# Reshape seq_output to a 2D tensor with lstm_size columns
x = tf.reshape(seq_output, [-1, in_size])
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
# Create the weight and bias variables here
softmax_w = tf.Variable(tf.truncated_normal([in_size, out_size], stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.add(tf.matmul(x, softmax_w), softmax_b)
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name="predictions")
return out, logits
"""
Explanation: RNN Output
Here we'll create the output layer. We need to connect the output of the RNN cells to a full connected layer with a softmax output. The softmax output gives us a probability distribution we can use to predict the next character, so we want this layer to have size $C$, the number of classes/characters we have in our text.
If our input has batch size $N$, number of steps $M$, and the hidden layer has $L$ hidden units, then the output is a 3D tensor with size $N \times M \times L$. The output of each LSTM cell has size $L$, we have $M$ of them, one for each sequence step, and we have $N$ sequences. So the total size is $N \times M \times L$.
We are using the same fully connected layer, the same weights, for each of the outputs. Then, to make things easier, we should reshape the outputs into a 2D tensor with shape $(M * N) \times L$. That is, one row for each sequence and step, where the values of each row are the output from the LSTM cells. We get the LSTM output as a list, lstm_output. First we need to concatenate this whole list into one array with tf.concat. Then, reshape it (with tf.reshape) to size $(M * N) \times L$.
One we have the outputs reshaped, we can do the matrix multiplication with the weights. We need to wrap the weight and bias variables in a variable scope with tf.variable_scope(scope_name) because there are weights being created in the LSTM cells. TensorFlow will throw an error if the weights created here have the same names as the weights created in the LSTM cells, which they will be default. To avoid this, we wrap the variables in a variable scope so we can give them unique names.
Exercise: Implement the output layer in the function below.
End of explanation
"""
def build_loss(logits, targets, lstm_size, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
lstm_size: Number of LSTM hidden units
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per sequence per step
y_one_hot = tf.one_hot(targets, num_classes)
y_reshaped = tf.reshape(y_one_hot, logits.get_shape()) # tf.reshape(y_one_hot, [-1, lstm_size])
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(labels=y_reshaped, logits=logits)
loss = tf.reduce_mean(loss)
return loss
"""
Explanation: Training loss
Next up is the training loss. We get the logits and targets and calculate the softmax cross-entropy loss. First we need to one-hot encode the targets, we're getting them as encoded characters. Then, reshape the one-hot targets so it's a 2D tensor with size $(MN) \times C$ where $C$ is the number of classes/characters we have. Remember that we reshaped the LSTM outputs and ran them through a fully connected layer with $C$ units. So our logits will also have size $(MN) \times C$.
Then we run the logits and targets through tf.nn.softmax_cross_entropy_with_logits and find the mean to get the loss.
Exercise: Implement the loss calculation in the function below.
End of explanation
"""
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
"""
Explanation: Optimizer
Here we build the optimizer. Normal RNNs have have issues gradients exploding and disappearing. LSTMs fix the disappearance problem, but the gradients can still grow without bound. To fix this, we can clip the gradients above some threshold. That is, if a gradient is larger than that threshold, we set it to the threshold. This will ensure the gradients never grow overly large. Then we use an AdamOptimizer for the learning step.
End of explanation
"""
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN with tf.nn.dynamic_rnn
outputs, state = tf.nn.dynamic_rnn(cell, inputs=x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
self.loss = build_loss(self.logits, self.targets, lstm_size, num_classes)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
"""
Explanation: Build the network
Now we can put all the pieces together and build a class for the network. To actually run data through the LSTM cells, we will use tf.nn.dynamic_rnn. This function will pass the hidden and cell states across LSTM cells appropriately for us. It returns the outputs for each LSTM cell at each step for each sequence in the mini-batch. It also gives us the final LSTM state. We want to save this state as final_state so we can pass it to the first LSTM cell in the the next mini-batch run. For tf.nn.dynamic_rnn, we pass in the cell and initial state we get from build_lstm, as well as our input sequences. Also, we need to one-hot encode the inputs before going into the RNN.
Exercise: Use the functions you've implemented previously and tf.nn.dynamic_rnn to build the network.
End of explanation
"""
batch_size = 100 # Sequences per batch
num_steps = 100 # Number of sequence steps per batch
lstm_size = 512 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.001 # Learning rate
keep_prob = 0.5 # Dropout keep probability
"""
Explanation: Hyperparameters
Here are the hyperparameters for the network.
batch_size - Number of sequences running through the network in one pass.
num_steps - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
lstm_size - The number of units in the hidden layers.
num_layers - Number of hidden LSTM layers to use
learning_rate - Learning rate for training
keep_prob - The dropout keep probability when training. If you're network is overfitting, try decreasing this.
Here's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to where it originally came from.
Tips and Tricks
Monitoring Validation Loss vs. Training Loss
If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
If your training loss is much lower than validation loss then this means the network might be overfitting. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
If your training/validation loss are about equal then your model is underfitting. Increase the size of your model (either number of layers or the raw number of neurons per layer)
Approximate number of parameters
The two most important parameters that control the model are lstm_size and num_layers. I would advise that you always use num_layers of either 2/3. The lstm_size can be adjusted based on how much data you have. The two important quantities to keep track of here are:
The number of parameters in your model. This is printed when you start training.
The size of your dataset. 1MB file is approximately 1 million characters.
These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make lstm_size larger.
I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
Best models strategy
The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
End of explanation
"""
epochs = 20
# Save every N iterations
save_every_n = 200
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
end = time.time()
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end-start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
"""
Explanation: Time for training
This is typical training code, passing inputs and targets into the network, then running the optimizer. Here we also get back the final LSTM state for the mini-batch. Then, we pass that state back into the network so the next batch can continue the state from the previous batch. And every so often (set by save_every_n) I save a checkpoint.
Here I'm saving checkpoints with the format
i{iteration number}_l{# hidden layer units}.ckpt
Exercise: Set the hyperparameters above to train the network. Watch the training loss, it should be consistently dropping. Also, I highly advise running this on a GPU.
End of explanation
"""
tf.train.get_checkpoint_state('checkpoints')
"""
Explanation: Saved checkpoints
Read up on saving and loading checkpoints here: https://www.tensorflow.org/programmers_guide/variables
End of explanation
"""
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
"""
Explanation: Sampling
Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.
The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.
End of explanation
"""
tf.train.latest_checkpoint('checkpoints')
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i600_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i1200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
"""
Explanation: Here, pass in the path to a checkpoint and sample from the network.
End of explanation
"""
|
tensorflow/lucid | notebooks/differentiable-parameterizations/appendix/colab_gl.ipynb | apache-2.0 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2018 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
"""
!pip install -q lucid>=0.2.3
!pip install -q moviepy
import numpy as np
import json
import moviepy.editor as mvp
from google.colab import files
import lucid.misc.io.showing as show
from lucid.misc.gl.glcontext import create_opengl_context
# Now it's safe to import OpenGL and EGL functions
import OpenGL.GL as gl
# create_opengl_context() creates GL context that is attached to an
# offscreen surface of specified size. Note that rendering to buffers
# of different size and format is still possible with OpenGL Framebuffers.
#
# Users are expected to directly use EGL calls in case more advanced
# context management is required.
WIDTH, HEIGHT = 640, 480
create_opengl_context((WIDTH, HEIGHT))
# OpenGL context is available here.
print(gl.glGetString(gl.GL_VERSION))
print(gl.glGetString(gl.GL_VENDOR))
#print(gl.glGetString(gl.GL_EXTENSIONS))
# Let's render something!
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glBegin(gl.GL_TRIANGLES)
gl.glColor3f(1.0, 0.0, 0.0)
gl.glVertex2f(0, 1)
gl.glColor3f(0.0, 1.0, 0.0)
gl.glVertex2f(-1, -1)
gl.glColor3f(0.0, 0.0, 1.0)
gl.glVertex2f(1, -1)
gl.glEnd()
# Read the result
img_buf = gl.glReadPixelsub(0, 0, WIDTH, HEIGHT, gl.GL_RGB, gl.GL_UNSIGNED_BYTE)
img = np.frombuffer(img_buf, np.uint8).reshape(HEIGHT, WIDTH, 3)[::-1]
show.image(img/255.0)
"""
Explanation: Using OpenGL with Colab Cloud GPUs
This notebook demonstrates obtaining OpenGL context on GPU Colab kernels.
End of explanation
"""
shader_id = 'Xtf3Rn' # https://www.shadertoy.com/view/Xtf3Rn
shader_json = !curl -s 'https://www.shadertoy.com/shadertoy' \
-H 'Referer: https://www.shadertoy.com/view/$shader_id' \
--data 's=%7B%20%22shaders%22%20%3A%20%5B%22$shader_id%22%5D%20%7D'
shader_data = json.loads(''.join(shader_json))[0]
assert len(shader_data['renderpass']) == 1, "Only single pass shareds are supported"
assert len(shader_data['renderpass'][0]['inputs']) == 0, "Input channels are not supported"
shader_code = shader_data['renderpass'][0]['code']
from OpenGL.GL import shaders
vertexPositions = np.float32([[-1, -1], [1, -1], [-1, 1], [1, 1]])
VERTEX_SHADER = shaders.compileShader("""
#version 330
layout(location = 0) in vec4 position;
out vec2 UV;
void main()
{
UV = position.xy*0.5+0.5;
gl_Position = position;
}
""", gl.GL_VERTEX_SHADER)
FRAGMENT_SHADER = shaders.compileShader("""
#version 330
out vec4 outputColor;
in vec2 UV;
uniform sampler2D iChannel0;
uniform vec3 iResolution;
vec4 iMouse = vec4(0);
uniform float iTime = 0.0;
""" + shader_code + """
void main()
{
mainImage(outputColor, UV*iResolution.xy);
}
""", gl.GL_FRAGMENT_SHADER)
shader = shaders.compileProgram(VERTEX_SHADER, FRAGMENT_SHADER)
time_loc = gl.glGetUniformLocation(shader, 'iTime')
res_loc = gl.glGetUniformLocation(shader, 'iResolution')
def render_frame(time):
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
with shader:
gl.glUniform1f(time_loc, time)
gl.glUniform3f(res_loc, WIDTH, HEIGHT, 1.0)
gl.glEnableVertexAttribArray(0);
gl.glVertexAttribPointer(0, 2, gl.GL_FLOAT, False, 0, vertexPositions)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, 4)
img_buf = gl.glReadPixels(0, 0, WIDTH, HEIGHT, gl.GL_RGB, gl.GL_UNSIGNED_BYTE)
img = np.frombuffer(img_buf, np.uint8).reshape(HEIGHT, WIDTH, 3)[::-1]
return img
show.image(render_frame(10.0)/255.0, format='jpeg')
"""
Explanation: Render ShaderToy videos on GPU
We now have the full power of modern OpenGL in our hands! Let's do something interesting with it!
Fetching the source and rendering the amaizing shader by Kali from ShaderToy. You can also substitute a different shader_id, but note that only single-pass shaders that don't use textures are supported by the code below.
End of explanation
"""
clip = mvp.VideoClip(render_frame, duration=10.0)
clip.write_videofile('out.mp4', fps=60)
files.download('out.mp4')
"""
Explanation: Use MoviePy to generate a video.
End of explanation
"""
|
GoogleCloudPlatform/gcp-getting-started-lab-jp | data_analytics/sample.ipynb | apache-2.0 | %%bigquery
SELECT
COUNT(DISTINCT station_id) as cnt
FROM
`bigquery-public-data.new_york.citibike_stations`
"""
Explanation: 「%%bigquery」に続いてSQLを記述するとBigQueryにクエリを投げることができます
例えば、WebUIから実行した「重複なしでバイクステーションの数をカウントする」クエリは以下のように実行します
End of explanation
"""
%%bigquery
SELECT
COUNT(station_id) as cnt
FROM
`bigquery-public-data.new_york.citibike_stations`
WHERE
is_installed = TRUE
AND is_renting = TRUE
AND is_returning = TRUE
"""
Explanation: 同じように、WebUIから実行した各種クエリを実行してみます。
営業しているバイクステーション
End of explanation
"""
%%bigquery
SELECT
usertype,
gender,
COUNT(gender) AS cnt
FROM
`bigquery-public-data.new_york.citibike_trips`
GROUP BY
usertype,
gender
ORDER BY
cnt DESC
"""
Explanation: ユーザーの課金モデル
End of explanation
"""
%%bigquery
SELECT
start_station_name,
end_station_name,
COUNT(end_station_name) AS cnt
FROM
`bigquery-public-data.new_york.citibike_trips`
GROUP BY
start_station_name,
end_station_name
ORDER BY
cnt DESC
"""
Explanation: バイクの借り方の傾向
End of explanation
"""
%%bigquery utilization_time
SELECT
starttime, stoptime,
TIMESTAMP_DIFF(stoptime, starttime, MINUTE) as minute,
usertype, birth_year, gender
FROM
`bigquery-public-data.new_york.citibike_trips`
WHERE
start_station_name = 'Central Park S & 6 Ave' and end_station_name = 'Central Park S & 6 Ave'
# utilization_timeの中身の確認
utilization_time
"""
Explanation: 結果の解釈(一例)
Central Parkの南に地下鉄の駅がある
観光客がCentral Parkの観光に利用している
12 Ave & W 40 St => West St & Chambers St
通勤での利用(居住区からオフィス街への移動)
南北方面ではなく東西方面の移動が多い
地下鉄は南北方向に駅がある
NY在住者は自転車で東西方向に移動して、南北方向に地下鉄を利用する傾向がある
単純にBigQueryに対してクエリを実行するだけではなく、データの簡易的な可視化などの機能も提供されます。
利用者の調査
最も利用者が多いstart_station_name="Central Park S & 6 Ave", end_station_name="Central Park S & 6 Ave"の利用時間を調査します。
%%bigqueryコマンドに続いて変数名を渡すことで、BigQueryの結果をpandasのDataFrameとして保存することができます。
End of explanation
"""
# 必要となるライブラリのインポート及び警告が表示されないような設定
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
# ヒストグラムの描画
utilization_time['minute'].hist(bins=range(0,100,2))
"""
Explanation: Pythonによるデータ可視化
データの概要を掴むためにヒストグラム(データのばらつきを確認するための図)を描きます。
End of explanation
"""
|
AllenDowney/ThinkBayes2 | examples/regress_soln.ipynb | mit | # Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
import numpy as np
# import classes from thinkbayes2
from thinkbayes2 import Pmf, Cdf, Suite, Joint
import thinkplot
"""
Explanation: Think Bayes
Copyright 2018 Allen B. Downey
MIT License: https://opensource.org/licenses/MIT
End of explanation
"""
slope = 2
inter = 1
sigma = 0.3
xs = np.linspace(0, 1, 6)
ys = inter + slope * xs + np.random.normal(0, sigma, len(xs))
thinkplot.plot(xs, ys)
thinkplot.decorate(xlabel='x',
ylabel='y')
"""
Explanation: Bayesian regression
This notebook presents a simple example of Bayesian regression using sythetic data
Data
Suppose there is a linear relationship between x and y with slope 2 and intercept 1, but the measurements of y are noisy; specifically, the noise is Gaussian with mean 0 and sigma = 0.3.
End of explanation
"""
from scipy.stats import norm
class Regress(Suite, Joint):
def Likelihood(self, data, hypo):
"""
data: x, y
hypo: slope, inter, sigma
"""
return 1
# Solution
from scipy.stats import norm
class Regress(Suite, Joint):
def Likelihood(self, data, hypo):
"""
data: x, y
hypo: slope, inter, sigma
"""
x, y = data
slope, inter, sigma = hypo
yfit = inter + slope * x
error = yfit - y
like = norm(0, sigma).pdf(error)
return like
params = np.linspace(-4, 4, 21)
sigmas = np.linspace(0.1, 2, 20)
from itertools import product
hypos = product(params, params, sigmas)
suite = Regress(hypos);
for data in zip(xs, ys):
suite.Update(data)
thinkplot.Pdf(suite.Marginal(0))
thinkplot.decorate(xlabel='Slope',
ylabel='PMF',
title='Posterior marginal distribution')
thinkplot.Pdf(suite.Marginal(1))
thinkplot.decorate(xlabel='Intercept',
ylabel='PMF',
title='Posterior marginal distribution')
thinkplot.Pdf(suite.Marginal(2))
thinkplot.decorate(xlabel='Sigma',
ylabel='PMF',
title='Posterior marginal distribution')
"""
Explanation: Grid algorithm
We can solve the problem first using a grid algorithm, with uniform priors for slope, intercept, and sigma.
As an exercise, fill in this likelihood function, then test it using the code below.
Your results will depend on the random data you generated, but in general you should find that the posterior marginal distributions peak near the actual parameters.
End of explanation
"""
import pymc3 as pm
pm.GLM
thinkplot.plot(xs, ys)
thinkplot.decorate(xlabel='x',
ylabel='y')
import pymc3 as pm
with pm.Model() as model:
"""Fill this in"""
# Solution
with pm.Model() as model:
slope = pm.Uniform('slope', -4, 4)
inter = pm.Uniform('inter', -4, 4)
sigma = pm.Uniform('sigma', 0, 2)
y_est = slope*xs + inter
y = pm.Normal('y', mu=y_est, sd=sigma, observed=ys)
trace = pm.sample_prior_predictive(100)
# Solution
for y_prior in trace['y']:
thinkplot.plot(xs, y_prior, color='gray', linewidth=0.5)
thinkplot.decorate(xlabel='x',
ylabel='y')
# Solution
with pm.Model() as model:
slope = pm.Uniform('slope', -4, 4)
inter = pm.Uniform('inter', -4, 4)
sigma = pm.Uniform('sigma', 0, 2)
y_est = slope*xs + inter
y = pm.Normal('y', mu=y_est, sd=sigma, observed=ys)
trace = pm.sample(1000, tune=2000)
# Solution
pm.traceplot(trace);
"""
Explanation: MCMC
Implement this model using MCMC. As a starting place, you can use this example from Computational Statistics in Python.
You also have the option of using the GLM module, described here.
End of explanation
"""
|
lowcloudnine/singularity-spark | ipython_notebooks/schiefjm/Elasticsearch/elasticsearch -- curl examples.ipynb | apache-2.0 | %%bash
curl -XGET "http://search-01.ec2.internal:9200/"
"""
Explanation: Using cURL with Elasticsearch
The introductory documents and tutorials all use cURL (here after referred to by its command line name curl) to interact with Elasticsearch and demonstrate what is possible and what is returned. Below is a short collection of these exercises with some explainations.
Hello World!
This first example for elasticsearch is almost always a simple get with no parameters. It is a simple way to check to see if the environment and server are set and functioning properly. Hence, the reason for the title.
The examples are using an AWS instance, the user will need to change the server to either "localhost" for their personal machine or the URL for the elasticsearch server they are using.
End of explanation
"""
%%bash
curl -XGET 'http://search-01.ec2.internal:9200/_count'
"""
Explanation: Count
Counting is faster than searching and should be used when the actual results are not needed. From "ElasticSearch Cookbook - Second Edition":
It is often required to return only the count of the matched results and not the results themselves. The advantages of using a count request is the performance it offers and reduced resource usage, as a standard search call also returns hits count.
The simplest count is a count of all the documents in elasticsearch.
End of explanation
"""
%%bash
curl -XGET 'http://search-01.ec2.internal:9200/gdelt1979/_count'
"""
Explanation: The second type of simple count is to count by index. If the index is gdelt1979 then:
Example 1
End of explanation
"""
%%bash
curl -XGET 'http://search-01.ec2.internal:9200/gsod/_count'
"""
Explanation: or if the index is the Global Summary of the Day data, i.e. gsod then:
Example 2
End of explanation
"""
%%bash
curl -XGET 'http://search-01.ec2.internal:9200/gsod/_count?pretty'
"""
Explanation: If the user prefers a nicer looking output then a request to make it pretty is in order.
End of explanation
"""
%%bash
curl -XGET 'http://search-01.ec2.internal:9200/gsod/_search'
"""
Explanation: Count Summary
Keep in mind counts can be as complicated as searches. Just changing _count to _search and vice versa changes how elasticsearch handles the request.
With that said it is now time to show and develop some search examples.
Search
Search is the main use for elasticsearch, hence the name and where the bulk of the examples will be. This notebook will attempt to take the user through examples that show only one new feature at a time. This will hopefully allow the user to see the order of commands which is unfortuantely important to elasticsearch.
As with count above it will start with a simple example.
End of explanation
"""
%%bash
curl -XGET 'http://search-01.ec2.internal:9200/gsod/_search?pretty'
"""
Explanation: By default elasticsearch returns 10 documents for every search. As is evident the pretty option used for count above is needed here.
End of explanation
"""
%%bash
curl -XGET 'http://search-01.ec2.internal:9200/gsod/_search?pretty' -d '
{
"size": "1"
}'
%%bash
curl -XGET 'http://search-01.ec2.internal:9200/gsod/_search?pretty' -d '
{
"_source": ["Max Temp"],
"size": "2"
}'
%%bash
curl -XGET 'http://search-01.ec2.internal:9200/gsod/_search?pretty' -d '
{
"query": {
"filtered": {
"filter": {
"range": {
"Date": {
"gte": "2007-01-01",
"lte": "2007-01-01"
}
}
}
}
},
"_source": ["Max Temp"],
"size": "1"
}'
%%bash
curl -XGET 'http://search-01.ec2.internal:9200/gsod/_search?pretty' -d '
{
"query": {
"filtered": {
"query": { "match_all": {} },
"filter": {
"range": {
"Date": {
"gte": "2007-01-01",
"lte": "2007-12-31"
}
}
}
}
},
"size": "1"
}'
%%bash
curl -XGET 'http://search-01.ec2.internal:9200/gsod/_count' -d '
{
"query": {
"filtered": {
"filter": {
"range": {
"Date": {
"gte": "2007-01-01",
"lte": "2007-01-31"
}
}
}
}
}
}'
%%bash
curl -XGET 'http://search-01.ec2.internal:9200/gsod/_search?pretty' -d '
{
"query": {
"filtered": {
"query": { "match_all": {} },
"filter": {
"range": {
"Date": {
"gte": "2007-01-01",
"lte": "2007-01-31"
}
}
}
}
},
"_source": ["Mean Temp", "Min Temp", "Max Temp"],
"size": "563280"
}' > temps_200701.txt
import json
with open("temps_2007.txt", "r") as f:
mean_temps = []
max_temps = []
min_temps = []
for line in f:
if "_source" in line:
line = json.loads(line[16:-1])
min_tmp = float(line['Min Temp'])
if -300 < min_tmp < 300:
min_temps.append(min_tmp)
mean_tmp = float(line['Mean Temp'])
if -300 < min_tmp < 300:
mean_temps.append(mean_tmp)
max_tmp = float(line['Max Temp'])
if -300 < max_tmp < 300:
max_temps.append(max_tmp)
print("From {} observations the temperatures for 2007 are:"\
.format(len(mean_temps)))
print("Min Temp: {:.1f}".format(min(min_temps)))
print("Mean Temp: {:.1f}".format(sum(mean_temps)/len(mean_temps)))
print("Max Temp: {:.1f}".format(max(max_temps)))
"""
Explanation: Much better but it can be easily seen that if this notebook continues with the elasticsearch default for number of documents it will become very unweldy very quickly. So, let's use the size option.
End of explanation
"""
|
chrismcginlay/crazy-koala | jupyter/06_conditional_loops.ipynb | gpl-3.0 | word = input("What is the magic word? ")
while word!="abracadabra":
word = input("Wrong. Try again. What is the magic word? ")
print("Correct")
"""
Explanation: 6 Conditional Loops
Loops
Loops are a big deal in computing and robotics! Think about the kinds of tasks that computers and robots often get used for:
- jobs that are dangerous
- jobs where accuracy is important
- jobs that are repetitive and where a human might get bored!
Loops are basically how a computer programmer can make the computer do the same thing over and over again. There are two main kinds of loops
* Conditional Loops
* Fixed Loops
Simplest Conditional Loop
The most basic conditional loop is one that goes on forever. Think about a task where you do the same sequence of operations endlessly - for example production line work often involves seemingly endless loops:
* pick up product from conveyor belt
* check product looks OK
* put product in packing crate
Factories often use robots to do the kind of repetitive work pictured above - for around £20,000 a reconditioned robotic system could pack these crates.
In Python we can use a while loop to keep doing the same thing over and over:
python
while True:
pick_up_product()
check_product()
put_product_in_crate()
The part in brackets () is called the condition. In this case, we have set the condition to be True permananently. This means the loop would go on forever, which is sometimes what you want. Later we will see how if the condition in the brackets () ever stops being True then the loop would stop.
A Conditional Loop with an Actual Condition!
A conditional loop is repeatedly carried out a block of code until a condition of some kind is met.
You are now going to run these two small programs. They should be ready to run, you don't need to edit them.
Run the first program. Once running, you should see that it repeatedly runs its code block until you supply the magic word. Get it wrong first, then enter the magic word
End of explanation
"""
happy = input("Are you happy? ")
while not(happy in ["yes","no"]):
print("I did not understand. ")
happy = input("Are you happy? ")
"""
Explanation: If you've done it right, your output should look like this:
What is the magic word? Foobar
Wrong. Try again. What is the magic word? abracadabra
Correct
In Python != means is not equal to
Run the second program. This one will keep repeating its code block until you type in one of the words in the list ["yes", "no"].
End of explanation
"""
happy = input("Are you happy? ")
while not(happy in ["yes","no"]): #Add to this list of words here
print("I did not understand. ")
happy = input("Are you happy? ")
"""
Explanation: If you've done it right, your output should look like this:
Are you happy? Naw.
I did not understand.
Are you happy? aye
I did not understand.
Are you happy? yes
By adding some more words to the list ["yes", "no] edit the program below to accept "Yes", "No" and test it by running it a few times
End of explanation
"""
|
phoebe-project/phoebe2-docs | 2.3/tutorials/constraints_hierarchies.ipynb | gpl-3.0 | #!pip install -I "phoebe>=2.3,<2.4"
"""
Explanation: Advanced: Constraints and Changing Hierarchies
Setup
Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
End of explanation
"""
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
"""
Explanation: As always, let's do imports and initialize a logger and a new Bundle.
End of explanation
"""
b.set_value('q', 0.8)
"""
Explanation: Changing Hierarchies
Some of the built-in constraints depend on the system hierarchy, and will automatically adjust to reflect changes to the hierarchy.
For example, the masses depend on the period and semi-major axis of the parent orbit but also depend on the mass-ratio (q) which is defined as the primary mass over secondary mass. For this reason, changing the roles of the primary and secondary components should be reflected in the masses (so long as q remains fixed).
In order to show this example, let's set the mass-ratio to be non-unity.
End of explanation
"""
print("M1: {}, M2: {}".format(b.get_value(qualifier='mass', component='primary', context='component'),
b.get_value(qualifier='mass', component='secondary', context='component')))
"""
Explanation: Here the star with component tag 'primary' is actually the primary component in the hierarchy, so should have the LARGER mass (for a q < 1.0).
End of explanation
"""
b['mass@primary']
b.set_hierarchy('orbit:binary(star:secondary, star:primary)')
b['mass@primary@star@component']
print(b.get_value('q'))
print("M1: {}, M2: {}".format(b.get_value(qualifier='mass', component='primary', context='component'),
b.get_value(qualifier='mass', component='secondary', context='component')))
"""
Explanation: Now let's flip the hierarchy so that the star with the 'primary' component tag is actually the secondary component in the system (and so takes the role of numerator in q = M2/M1).
For more information on the syntax for setting hierarchies, see the Building a System Tutorial.
End of explanation
"""
print("M1: {}, M2: {}, period: {}, q: {}".format(b.get_value(qualifier='mass', component='primary', context='component'),
b.get_value(qualifier='mass', component='secondary', context='component'),
b.get_value(qualifier='period', component='binary', context='component'),
b.get_value(qualifier='q', component='binary', context='component')))
b.flip_constraint('mass@secondary@constraint', 'period')
print("M1: {}, M2: {}, period: {}, q: {}".format(b.get_value(qualifier='mass', component='primary', context='component'),
b.get_value(qualifier='mass', component='secondary', context='component'),
b.get_value(qualifier='period', component='binary', context='component'),
b.get_value(qualifier='q', component='binary', context='component')))
b.set_value(qualifier='mass', component='secondary', context='component', value=1.0)
print("M1: {}, M2: {}, period: {}, q: {}".format(b.get_value(qualifier='mass', component='primary', context='component'),
b.get_value(qualifier='mass', component='secondary', context='component'),
b.get_value(qualifier='period', component='binary', context='component'),
b.get_value(qualifier='q', component='binary', context='component')))
"""
Explanation: Even though under-the-hood the constraints are being rebuilt from scratch, they will remember if you have flipped them to solve for some other parameter.
To show this, let's flip the constraint for the secondary mass to solve for 'period' and then change the hierarchy back to its original value.
End of explanation
"""
|
jmunar/pymc3-kalman | notebooks/01_RandomWalkPlusObservationNoise.ipynb | apache-2.0 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
%matplotlib inline
# True values
T = 500 # Time steps
sigma2_eps0 = 3 # Variance of the observation noise
sigma2_eta0 = 10 # Variance in the update of the mean
# Simulate data
np.random.seed(12345)
eps = np.random.normal(scale=sigma2_eps0**0.5, size=T)
eta = np.random.normal(scale=sigma2_eta0**0.5, size=T)
mu = np.cumsum(eta)
y = mu + eps
# Plot the time series
fig, ax = plt.subplots(figsize=(13,2))
ax.fill_between(np.arange(T), 0, y, facecolor=(0.7,0.7,1), edgecolor=(0,0,1))
ax.set(xlabel='$T$', title='Simulated series');
"""
Explanation: An example-based introduction to pymc3-kalman
In this notebook, we will make a Bayesian estimate of the parameters governing a one-dimensional random walk, including observation noise. It is based on this post. Here, we use a generic Kalman filter representation.
The equations governing the system are the following:
\begin{align}
y_t & = \mu_t + \varepsilon_t, \qquad \varepsilon_t \sim N(0, \sigma_\varepsilon^2) \
\mu_{t+1} & = \mu_t + \eta_t, \qquad \eta_t \sim N(0, \sigma_\eta^2) \
\end{align}
Let's create a deterministic "random" configuration:
End of explanation
"""
import kalman
import pymc3
from pymc3 import Model, HalfCauchy
with Model() as model:
ɛ_σ2 = HalfCauchy(name='ɛ_σ2', beta=1e6)
η_σ2 = HalfCauchy(name='η_σ2', beta=1e6)
Z = np.array(1.)
d = np.array(0.)
H = ɛ_σ2
T = np.array(1.)
c = np.array(0.)
R = np.array(1.)
Q = η_σ2
a0 = np.array(0.)
P0 = np.array(1e6)
ts = kalman.KalmanFilter('ts', Z, d, H, T, c, R, Q, a0, P0, observed=y)
"""
Explanation: As a reminder, the equations describing a system in State Space Form are the measurement equation
$$
\boldsymbol y_t = \boldsymbol Z_t \boldsymbol\alpha_t + \boldsymbol d_t
+ \boldsymbol\varepsilon_t ,\qquad t=1,\ldots,T
\qquad\qquad \boldsymbol\varepsilon_t \sim \mathcal{N}(0, \boldsymbol H_t)\ ,
$$
and a transition equation:
$$
\boldsymbol\alpha_t = \boldsymbol T_t \boldsymbol\alpha_{t-1} + \boldsymbol c_t
+ \boldsymbol R_t \boldsymbol \eta_t ,\qquad t=1,\ldots,T
\qquad\qquad \boldsymbol\eta_t \sim \mathcal{N}(0, \boldsymbol Q_t)\ ,
$$
where $\boldsymbol y_t$ is the observable vector, of length $n$, and $\boldsymbol\alpha_t$ is the state vector, of length $m$.
Our system can be readily written in these terms, by setting $\boldsymbol\alpha_t = \mu_t$. Then,
\begin{align}
\boldsymbol Z_t &= \left(\begin{array}{c} 1 \end{array}\right) &
\boldsymbol d_t &= 0 &
\boldsymbol H_t &= \left(\begin{array}{c} \sigma_\varepsilon^2 \end{array}\right)
\end{align}
\begin{align}
\boldsymbol T_t &= \left(\begin{array}{c} 1 \end{array}\right) &
\boldsymbol c_t &= 0 &
\boldsymbol R_t &= \left(\begin{array}{c} 1 \end{array}\right) &
\boldsymbol Q_t &= \left(\begin{array}{c} \sigma_\eta^2 \end{array}\right)
\end{align}
Using the kalman package, it is straightforward to create a probabilistic model. For that, we must choose priors for the 2 parameters, $\sigma_\varepsilon^2$ and $\sigma_\eta^2 $, and an initial guess for $\boldsymbol y_0$. As both the observation vector and the state space are of size 1, it is possible to define all the matrices as scalar, reducing the computational complexity:
End of explanation
"""
import re
with model:
MAP = pymc3.find_MAP()
# We need to undo the log transform
{re.sub('_log__', '', k): np.exp(v) for k,v in MAP.items()}
"""
Explanation: First, we will look at the maximum a posteriori point (MAP):
End of explanation
"""
with model:
trace = pymc3.sample()
pymc3.traceplot(trace);
"""
Explanation: The result seems a bit off with respect to the known true values. Let's sample the posterior to obtain a distribution of possible values for these parameters:
End of explanation
"""
plt.scatter(trace['η_σ2'], trace['ɛ_σ2'])
plt.setp(plt.gca(), 'xlabel', 'Update local level variance',
'ylabel', 'Observation noise variance');
"""
Explanation: As a final remark, note that a simple scatter plot of the samples shows that the chosen parametrization is probably not optimal, with a high correlation:
End of explanation
"""
|
kimkipyo/dss_git_kkp | 통계, 머신러닝 복습/160518수_5일차_미적분Calculus과 최적화Optimization/6.NumPy 패키지의 난수 관련 명령어.ipynb | mit | np.random.seed(0)
"""
Explanation: NumPy 패키지의 난수 관련 명령어
numpy.random 서브패키지
numpy.random 서브패키지는 NumPy 의 랜덤 넘버 생성 관련 함수를 모아 놓은 것으로 다음과 같은 함수를 제공한다.
seed: pseudo random 상태 설정
shuffle: 조합(combination)
choice: 순열(permutation)
random_integers: uniform integer
rand: uniform
randn: Gaussina normal
컴퓨터에서 생성한 난수는 랜덤처럼 보이지만 정해진 알고리즘에 의해 생성되는 규칙적인 순열이다. seed 명령은 이러한 순열을 시작하는 초기값을 설정하여 난수가 정해전 순서로 나오게 만든다.
End of explanation
"""
x = np.arange(10)
np.random.shuffle(x)
x
"""
Explanation: shuffle 명령은 주어진 배열의 순서를 뒤섞는다.
End of explanation
"""
np.random.choice(5, 5, replace=True) # same as shuffle
np.random.choice(5, 3, replace=False)
np.random.choice(5, 10) #default 값은 True구만
np.random.choice(5, 10, p=[0.1, 0, 0.3, 0.6, 0])
"""
Explanation: choice 명령은 단순히 순서를 바꾸는 것이 아니라 size 인수로 정해진 갯수만큼 원소를 골라내는 역할을 한다. 이 때 replace 인수를 True로 설정하여 한 번 골랐던 원소를 다시 고를 수 있도록 할 수 있다. 또한 p 인수를 이용하여 각 원소가 선택될 확률도 설정할 수 있다.
End of explanation
"""
x = np.random.random_integers(-100, 100, 50)
sns.distplot(x, rug=True);
"""
Explanation: random_integers 명령은 주어진 범위 사이의 정수를 랜덤하게 생성한다.
End of explanation
"""
x = np.random.rand(10000)
sns.distplot(x);
"""
Explanation: rand 명령은 0과 1사이의 값을 균일하게 생성한다.
End of explanation
"""
x = np.random.randn(1000)
sns.distplot(x);
"""
Explanation: randn 명령은 표준 정규 분포 값을 균일하게 생성한다.
End of explanation
"""
np.unique([11, 11, 2, 2, 34, 34])
a = np.array([[1, 1], [2, 3]])
a
a = np.array(['a', 'b', 'c', 'b', 'a'])
index, count = np.unique(a, return_counts=True)
index
count
np.bincount([1, 1, 2, 2, 3, 3, 4, 6, 6], minlength=6)
"""
Explanation: NumPy의 카운트 함수
NumPy는 생성된 난수의 통계 정보를 구하는 카운트(count)함수들도 제공한다.
정수가 각각 몇개씩 생성되었는지 알고 싶은 경우에느 unique 명령이나 bincount 명령을 사용한다. unique 명령은 연속적인 정수를 가정하지 않고 있지만 bincount 명령은 0 부터 시작한 연속적인 정수를 가정하고 있어서 혹시 전혀 생성되지 않는 정수도 감안할 수 있다.
실제로는 bincount를 더 많이 쓴다.
End of explanation
"""
|
kaivalyar/Sensei | TensorFlowIntro/IntroToTensorFlow.ipynb | mit | import tensorflow as tf
3 # a rank 0 tensor; this is a scalar with shape []
[1. ,2., 3.] # a rank 1 tensor; this is a vector with shape [3]
[[1., 2., 3.], [4., 5., 6.]] # a rank 2 tensor; a matrix with shape [2, 3]
[[[1., 2., 3.]], [[7., 8., 9.]]] # a rank 3 tensor with shape [2, 1, 3]
"""
Explanation: Introduction to TensorFlow
Based on the official TensorFlow Tutorial
Kaivalya Rawal and Rohan James
Agenda
Synopsis
Installation and Import
Computational Graph Nodes
Training
High Level API
Synopsis
Google library for 'tensor' operations
Applications beyond ML
Most basic unit: matrix-like Tensors
Graph of transformations on defined n-rank Tensors: 2 step process of building the computational graph, and then running it
TensorBoard visualizations
Installation and Import
Install steps on TensorFlow Website, or simple pip install in a virtual environment
TensorBoard comes built-in
Activate to access TensorBoard with logdirectory
End of explanation
"""
n1 = tf.constant(2.0, tf.float32)
n2 = tf.constant(4.0) # type?
print(n1)
print(n2)
"""
Explanation: Nodes
Make up the computational graph
Each node takes in zero or more tensors as input, and gives one tensor as output
Simplest node - constant. Zero in, Single constant out.
Addition
Placeholder
Variable
End of explanation
"""
sess = tf.Session()
print(sess.run([n1, n2]))
"""
Explanation: This is just the build step. To evaluate the nodes, run the graph within a session.
End of explanation
"""
n3 = tf.add(n1, n2)
print('Node 3', n3)
print('sess.run(n3)', sess.run(n3))
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b # + provides a shortcut for tf.add(a, b)
print(sess.run(adder_node, {a: 3, b:4.5}))
print(sess.run(adder_node, {a: [1,3], b: [2, 4]}))
"""
Explanation: additon node
End of explanation
"""
add_and_half = adder_node / 2
print(sess.run(add_and_half, {a:0.5, b:-1.5}))
"""
Explanation: Other operations
End of explanation
"""
m = tf.Variable([.3], tf.float32)
c = tf.Variable([-.3], tf.float32)
x = tf.placeholder(tf.float32)
linear_model = m * x + c
"""
Explanation: Tunable variables:
End of explanation
"""
init = tf.global_variables_initializer()
sess.run(init)
"""
Explanation: Unlike constants - whose values never change, variables aren't initialized by default.
End of explanation
"""
print(sess.run(linear_model, {x:[1,2,3,0,-5,20]}))
y = tf.placeholder(tf.float32)
squared_deltas = tf.square(linear_model - y)
loss = tf.reduce_sum(squared_deltas)
print(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))
n_slope = tf.assign(m, [-0.9])
n_const = tf.assign(c, [1.5])
sess.run([n_slope, n_const])
print(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))
"""
Explanation: Evaluating the lines ordinate for various x values simultaneously:
End of explanation
"""
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
sess.run(init) # reset values to incorrect defaults.
for i in range(1000):
sess.run(train, {x:[1,2,3,4], y:[0,-1,-2,-3]})
print(sess.run([m, m]))
"""
Explanation: Training
Gradient Descent optimization
End of explanation
"""
import numpy as np
features = [tf.contrib.layers.real_valued_column("x", dimension=1)]
estimator = tf.contrib.learn.LinearRegressor(feature_columns=features)
x = np.array([1., 2., 3., 4.])
y = np.array([0., -1., -2., -3.])
input_fn = tf.contrib.learn.io.numpy_input_fn({"x":x}, y, batch_size=4, num_epochs=1000)
score = estimator.evaluate(input_fn=input_fn)
print(score)
"""
Explanation: Experimenting with different (learning rate) step values
High Level API
(still figuring it out)
Linear Regression Example
End of explanation
"""
|
ageron/tensorflow-safari-course | 03_basics_collections_ex3.ipynb | apache-2.0 | from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
tf.__version__
"""
Explanation: Try not to peek at the solutions when you go through the exercises. ;-)
First let's make sure this notebook works well in both Python 2 and Python 3:
End of explanation
"""
>>> graph = tf.Graph()
>>> with graph.as_default():
... x = tf.Variable(100)
... c = tf.constant(5)
... increment_op = tf.assign(x, x + c)
...
"""
Explanation: From notebook 2 on variables:
End of explanation
"""
>>> graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
tf.GraphKeys.GLOBAL_VARIABLES
>>> graph.add_to_collection("my_collection", c)
>>> graph.get_collection("my_collection")
"""
Explanation: Collections
End of explanation
"""
>>> graph = tf.Graph()
>>> with graph.as_default():
... a = tf.constant(3)
... b = tf.constant(5)
... s = a + b
...
>>> graph.get_operations()
>>> graph.get_operation_by_name("add") is s.op
>>> graph.get_tensor_by_name("add:0") is s
>>> list(s.op.inputs)
>>> list(s.op.outputs)
"""
Explanation: Navigating the Graph
End of explanation
"""
>>> graph = tf.Graph()
>>> with graph.as_default():
... a = tf.constant(3, name='a')
... b = tf.constant(5, name='b')
... s = tf.add(a, b, name='s')
...
>>> graph.get_operations()
"""
Explanation: Naming Operations
End of explanation
"""
graph = tf.Graph()
with graph.as_default():
x1 = tf.Variable(1.0, name="x1")
x2 = tf.Variable(2.0, name="x2")
x3 = tf.Variable(3.0, name="x3")
x4 = tf.Variable(4.0, name="x4")
"""
Explanation: Exercise 3
3.1) Create a graph with four variables named "x1", "x2", "x3" and "x4", with initial values 1.0, 2.0, 3.0 and 4.0 respectively, then write some code that prints the name of every operation in the graph.
3.2) Notice that for each Variable, TensorFlow actually created 4 operations:
* the variable itself,
* its initial value,
* an assignment operation to assign the initial value to the variable,
* and a read operation that you can safely ignore for now (for details, check out mrry's great answer to this question).
Get the collection of global variables in the graph, and for each one of them use get_operation_by_name() to find its corresponding /Assign operation (just append "/Assign" to the variable's name).
Hint: each object in the collection of global variables is actually a Tensor, not an Operation (it represents the variable's output, i.e., its value), so its name ends with ":0". You can get the Operation through the Tensor's op attribute: its name will not end with ":0"
3.3) Add a tf.group() to your graph, containing all the assignment operations you got in question 3.2. Congratulations! You have just reimplemented tf.global_variables_initializer().
Start a Session(), run your group operation, then evaluate each variable and print out the result.
3.4) For each assignment operation you fetched earlier, get its second input and store it in a list. Next, start a session and evaluate that list (using sess.run()). Print out the result: you should see [1.0, 2.0, 3.0, 4.0]. Can you guess why?
Try not to peek at the solution below before you have done the exercise! :)
Exercise 3 - Solution
3.1)
End of explanation
"""
gvars = graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
init_assign_ops = [graph.get_operation_by_name(gvar.op.name + "/Assign")
for gvar in gvars]
init_assign_ops
"""
Explanation: 3.2)
End of explanation
"""
with graph.as_default():
init = tf.group(*init_assign_ops)
with tf.Session(graph=graph):
init.run()
print(x1.eval())
print(x2.eval())
print(x3.eval())
print(x4.eval())
"""
Explanation: 3.3)
End of explanation
"""
init_val_ops = [init_assign_op.inputs[1]
for init_assign_op in init_assign_ops]
with tf.Session(graph=graph) as sess:
print(sess.run(init_val_ops))
"""
Explanation: 3.4)
End of explanation
"""
|
SIMEXP/Projects | metaad/network_level_meta_DMN.ipynb | mit | #seed_data = pd.read_csv('20160128_AD_Decrease_Meta_Christian.csv')
template_036= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale036.nii.gz')
template_020= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale020.nii.gz')
template_012= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale012.nii.gz')
template_007= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale007.nii.gz')
scale = '36'
flag_dmn = False
if scale == '7':
template = template_007
else:
template = template_036
#seed_data = pd.read_csv('20160404_AD_Decrease_Meta_DMN_nonDMN_Final.csv')
#seed_data = pd.read_csv('20160404_AD_Increase_Meta_DMN_nonDMN_Final.csv')
#seed_data = pd.read_csv('20160205_MCI_Decrease_Meta_DMN_nonDMN_Final.csv')
#seed_data = pd.read_csv('20160204_MCI_Increase_Meta_DMN_nonDMN_Final.csv')
#seed_data = pd.read_csv('20160404_ADMCI_Decrease_Meta_DMN_nonDMN_Final.csv')
seed_data = pd.read_csv('20160404_ADMCI_Increase_Meta_DMN_nonDMN_Final.csv')
if flag_dmn:
#output_stats = 'AD_decrease_scale'+scale+'_stats_seedDMN.mat'
#output_vol = 'AD_decrease_ratio_scale'+scale+'_vol_seedDMN.nii.gz'
#output_stats = 'AD_increase_scale'+scale+'_stats_seedDMN.mat'
#output_vol = 'AD_increase_ratio_scale'+scale+'_vol_seedDMN.nii.gz'
#output_stats = 'MCI_decrease_scale'+scale+'_stats_seedDMN.mat'
#output_vol = 'MCI_decrease_ratio_scale'+scale+'_vol_seedDMN.nii.gz'
#output_stats = 'MCI_increase_scale'+scale+'_stats_seedDMN.mat'
#output_vol = 'MCI_increase_ratio_scale'+scale+'_vol_seedDMN.nii.gz'
#output_stats = 'ADMCI_decrease_scale'+scale+'_stats_seedDMN.mat'
#output_vol = 'ADMCI_decrease_ratio_scale'+scale+'_vol_seedDMN.nii.gz'
output_stats = 'ADMCI_increase_scale'+scale+'_stats_seedDMN.mat'
output_vol = 'ADMCI_increase_ratio_scale'+scale+'_vol_seedDMN.nii.gz'
else:
#output_stats = 'AD_decrease_scale'+scale+'_stats_nonDMN.mat'
#output_vol = 'AD_decrease_ratio_scale'+scale+'_vol_seednonDMN.nii.gz'
#output_stats = 'AD_increase_scale'+scale+'_stats_seednonDMN.mat'
#output_vol = 'AD_increase_ratio_scale'+scale+'_vol_seednonDMN.nii.gz'
#output_stats = 'MCI_decrease_scale'+scale+'_stats_seednonDMN.mat'
#output_vol = 'MCI_decrease_ratio_scale'+scale+'_vol_seednonDMN.nii.gz'
#output_stats = 'MCI_increase_scale'+scale+'_stats_seednonDMN.mat'
#output_vol = 'MCI_increase_ratio_scale'+scale+'_vol_seednonDMN.nii.gz'
#output_stats = 'ADMCI_decrease_scale'+scale+'_stats_seednonDMN.mat'
#output_vol = 'ADMCI_decrease_ratio_scale'+scale+'_vol_seednonDMN.nii.gz'
output_stats = 'ADMCI_increase_scale'+scale+'_stats_seednonDMN.mat'
output_vol = 'ADMCI_increase_ratio_scale'+scale+'_vol_seednonDMN.nii.gz'
seed_data
seed_data[seed_data['Seed_cambridge']==5][['x','y','z']].values.shape
"""
Explanation: Load data
End of explanation
"""
from numpy.linalg import norm
# find the closest network to the coordo
def get_nearest_net(template,world_coor):
list_coord = np.array(np.where(template.get_data()>0))
mni_coord = apply_affine(template.get_affine(),list_coord.T)
distances = norm(mni_coord-np.array(world_coor),axis=1)
#print distances.shape
idx_nearest_net = np.where(distances == np.min(distances))[0][0]
return int(template.get_data()[list_coord[:,idx_nearest_net][0],list_coord[:,idx_nearest_net][1],list_coord[:,idx_nearest_net][2]])
#get_nearest_net(template,[-15,-10,-10])
# Convert from world MNI space to the EPI voxel space
def get_world2vox(template, mni_coord):
return np.round(apply_affine(npl.inv(template.get_affine()),mni_coord)+[1])
network_votes = np.zeros((np.max(template.get_data().flatten()),1))[:,0]
network_votes
# get the voxel coordinates of the MNI seeds
if flag_dmn:
seed_data = seed_data[seed_data['Seed_cambridge']==5]
else:
seed_data = seed_data[seed_data['Seed_cambridge']!=5]
mni_space_targets = seed_data[['x','y','z']].values
vox_corrd = get_world2vox(template,mni_space_targets)
votes = []
n_outofbrain=0
for i in range(vox_corrd.shape[0]):
net_class = template.get_data()[vox_corrd[i,0],vox_corrd[i,1],vox_corrd[i,2]]
if net_class==0:
n_outofbrain+=1
votes.append(get_nearest_net(template,[mni_space_targets[i,0],mni_space_targets[i,1],mni_space_targets[i,2]]))
else:
votes.append(net_class)
print('Out of brain coordinates: '+ str(n_outofbrain))
votes = np.array(votes)
# take one vote for each study only
uni_pmid = np.unique(seed_data['PMID'])
votes.shape
frequency_votes=np.zeros((len(uni_pmid),len(network_votes)))
#for i in range(len(uni_pmid)):
# frequency_votes = np.hstack((frequency_votes,np.unique(votes[(seed_data['PMID']==uni_pmid[i]).values])))
for i in range(len(uni_pmid)):
aa = votes[(seed_data['PMID']==uni_pmid[i]).values]
for j in aa:
frequency_votes[i,j-1] = (aa == j).sum()/float(len(aa))
print frequency_votes
# compile the stats for each network
#for i in range(1,len(network_votes)+1):
# network_votes[i-1] = np.mean(frequency_votes==i)
network_votes = np.mean(frequency_votes,axis=0)
print network_votes
#vox_corrd[np.array(votes)==5,:]
get_nearest_net(template,[-24,-10, 22])
get_nearest_net(template,[17, -14, -22])
def gen1perm(n_seeds,proba):
ratio_votes_1study = np.zeros_like(proba)
perm_votes = np.random.choice(range(0,len(proba)),size=(n_seeds,1),p=proba)
for j in perm_votes:
ratio_votes_1study[j] = (perm_votes == j).sum()/float(len(perm_votes))
return ratio_votes_1study
# check if the proba is respected
#print proba_networks
#gen1perm(10000,proba_networks)
#ange(0,len(proba_networks))
"""
Explanation: Get the number of coordinates reported for each network
End of explanation
"""
'''
from numpy.random import permutation
def permute_table(frequency_votes,n_iter):
h0_results = []
for n in range(n_iter):
perm_freq = frequency_votes.copy()
#print perm_freq
for i in range(perm_freq.shape[0]):
perm_freq[i,:] = permutation(perm_freq[i,:])
#print perm_freq
h0_results.append(np.mean(perm_freq,axis=0))
return np.array(h0_results).T
'''
def compute_freq(votes,data_ratio_votes,seed_data,proba):
# take one vote for each study only
uni_pmid = np.unique(seed_data['PMID'])
ratio_votes=np.zeros((data_ratio_votes.shape[0],data_ratio_votes.shape[1],10000))
for idx_perm in range(ratio_votes.shape[-1]):
# frequency_votes = np.hstack((frequency_votes,np.unique(votes[(seed_data['PMID']==uni_pmid[i]).values])))
for i in range(len(uni_pmid)):
aa = votes[(seed_data['PMID']==uni_pmid[i]).values]
n_seeds = len(aa)
ratio_votes[i,:,idx_perm] = gen1perm(n_seeds,proba)
#print ratio_votes.shape
# compute the frequency
freq_data = np.mean(ratio_votes,axis=0)
for i in range(freq_data.shape[0]):
freq_data[i,:] = np.sort(freq_data[i,:])[::-1]
return freq_data
# Total volume of the brain
total_volume = np.sum(template.get_data()>0)
# compute the proba of each network
proba_networks=[]
for i in range(1,len(network_votes)+1):
proba_networks.append(np.sum(template.get_data()==i)/(total_volume*1.))
proba_networks = np.array(proba_networks)
print np.sum(proba_networks)
print proba_networks
# generate random values
'''
def gen_rnd_hits(proba,n_seeds):
results_h0 = np.random.choice(range(0,len(proba)),size=(n_seeds,1000),p=proba)
#results_h0 = permute_table(frequency_votes,1000)
print results_h0.shape
ditributions = []
for i in range(frequency_votes.shape[1]):
results_h0[i,:] = np.sort(results_h0[i,:])[::-1]
#ditributions.append(one_way_pdf)
#return ditributions
return results_h0
'''
#dist_data = gen_rnd_hits(proba_networks,np.sum(network_votes))
dist_data = compute_freq(votes,frequency_votes,seed_data,proba_networks)
plt.figure()
plt.hist(dist_data[0],bins=np.arange(0,1,.01))
plt.figure()
plt.plot(dist_data[0].T)
"""
Explanation: Generate random coordinates
The assigned coodinates are generated for each network witha proability equivalent to there volume size compare to the total volume of the brain
End of explanation
"""
def getpval_old(nhit,dist_data):
distribution_val = np.histogram(dist_data,bins=np.arange(0,1,0.01))
idx_bin = np.where((distribution_val[1]>=round(nhit,2)) & (distribution_val[1]<=round(nhit,2)))[0][0]
#print distribution_val[1]
return (np.sum(distribution_val[0][idx_bin:-1])+1)/(dist_data.shape[0]+1.)
def getpval(target,dist_data):
dist_sorted = np.sort(np.copy(dist_data))
b = np.sum(dist_sorted > target)
#print b
#print dist_data.shape[0]
#print distribution_val[1]
return ((b+1.)/(dist_data.shape[0]+1.))
print network_votes
pval_results=[]
for i in range(0,len(dist_data)):
pval_results.append(getpval(network_votes[i],dist_data[i,:]))
print pval_results
plt.figure()
plt.bar(np.arange(1,len(pval_results)+1),pval_results,width=0.5,align='center')
plt.xlabel('Networks')
plt.ylabel('p-value')
"""
Explanation: Generate the p-values for each network
End of explanation
"""
from proteus.matrix import tseries as ts
hitfreq_vol = ts.vec2map(network_votes,template)
pval_vol = ts.vec2map(1-np.array(pval_results),template)
plt.figure()
plotting.plot_stat_map(hitfreq_vol,cut_coords=(0,0,0),draw_cross=False)
plt.figure()
plotting.plot_stat_map(pval_vol,cut_coords=(0,0,0),draw_cross=False)
"""
Explanation: Map the p-values to the template
End of explanation
"""
# correct for FRD
from statsmodels.sandbox.stats.multicomp import fdrcorrection0
fdr_test,fdr_pval=fdrcorrection0(pval_results,alpha=0.05)
print network_votes
print fdr_test
print fdr_pval
# save the results
path_output = '/home/cdansereau/git/Projects/metaad/maps_results/'
stats_results = {'Hits':network_votes ,'pvalues':pval_results,'fdr_test':fdr_test,'fdr_pval':fdr_pval,'n_outofbrain':n_outofbrain}
scipy.io.savemat(path_output + output_stats, stats_results)
hitfreq_vol.to_filename(os.path.join(path_output,output_vol))
#hitfreq_vol.to_filename(os.path.join('/home/cdansereau/git/Projects/metaad/maps_results/','AD_pval_vol.nii.gz'))
"""
Explanation: FDR correction of the p-values
End of explanation
"""
|
m2dsupsdlclass/lectures-labs | labs/06_deep_nlp/Character_Level_Language_Model_rendered.ipynb | mit | %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
"""
Explanation: Character-level Language Modeling with LSTMs
This notebook is adapted from Keras' lstm_text_generation.py.
Steps:
Download a small text corpus and preprocess it.
Extract a character vocabulary and use it to vectorize the text.
Train an LSTM-based character level language model.
Use the trained model to sample random text with varying entropy levels.
Implement a beam-search deterministic decoder.
Note: fitting language models is compute intensive. It is recommended to do this notebook on a server with a GPU or powerful CPUs that you can leave running for several hours at once.
End of explanation
"""
from keras.utils.data_utils import get_file
URL = "https://s3.amazonaws.com/text-datasets/nietzsche.txt"
corpus_path = get_file('nietzsche.txt', origin=URL)
text = open(corpus_path).read().lower()
print('Corpus length: %d characters' % len(text))
print(text[:600], "...")
text = text.replace("\n", " ")
split = int(0.9 * len(text))
train_text = text[:split]
test_text = text[split:]
"""
Explanation: Loading some text data
Let's use some publicly available philosophy:
End of explanation
"""
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
"""
Explanation: Building a vocabulary of all possible symbols
To simplify things, we build a vocabulary by extracting the list all possible characters from the full datasets (train and validation).
In a more realistic setting we would need to take into account that the test data can hold symbols never seen in the training set. This issue is limited when we work at the character level though.
Let's build the list of all possible characters and sort it to assign a unique integer to each possible symbol in the corpus:
End of explanation
"""
len(char_indices)
sorted(char_indices.items())[:15]
"""
Explanation: char_indices is a mapping to from characters to integer identifiers:
End of explanation
"""
len(indices_char)
indices_char[52]
"""
Explanation: indices_char holds the reverse mapping:
End of explanation
"""
from collections import Counter
counter = Counter(text)
chars, counts = zip(*counter.most_common())
indices = np.arange(len(counts))
plt.figure(figsize=(14, 3))
plt.bar(indices, counts, 0.8)
plt.xticks(indices, chars);
"""
Explanation: While not strictly required to build a language model, it's a good idea to have a look at the distribution of relative frequencies of each symbol in the corpus:
End of explanation
"""
max_length = 40
step = 3
def make_sequences(text, max_length=max_length, step=step):
sequences = []
next_chars = []
for i in range(0, len(text) - max_length, step):
sequences.append(text[i: i + max_length])
next_chars.append(text[i + max_length])
return sequences, next_chars
sequences, next_chars = make_sequences(train_text)
sequences_test, next_chars_test = make_sequences(test_text, step=10)
print('nb train sequences:', len(sequences))
print('nb test sequences:', len(sequences_test))
"""
Explanation: Let's cut the dataset into fake sentences at random with some overlap. Instead of cutting at random we could use a English specific sentence tokenizer. This is explained at the end of this notebook. In the mean time random substring will be good enough to train a first language model.
End of explanation
"""
from sklearn.utils import shuffle
sequences, next_chars = shuffle(sequences, next_chars,
random_state=42)
sequences[0]
next_chars[0]
"""
Explanation: Let's shuffle the sequences to break some of the dependencies:
End of explanation
"""
n_sequences = len(sequences)
n_sequences_test = len(sequences_test)
voc_size = len(chars)
X = np.zeros((n_sequences, max_length, voc_size),
dtype=np.float32)
y = np.zeros((n_sequences, voc_size), dtype=np.float32)
X_test = np.zeros((n_sequences_test, max_length, voc_size),
dtype=np.float32)
y_test = np.zeros((n_sequences_test, voc_size), dtype=np.float32)
# TODO
# %load solutions/language_model_one_hot_data.py
n_sequences = len(sequences)
n_sequences_test = len(sequences_test)
voc_size = len(chars)
X = np.zeros((n_sequences, max_length, voc_size),
dtype=np.float32)
y = np.zeros((n_sequences, voc_size), dtype=np.float32)
X_test = np.zeros((n_sequences_test, max_length, voc_size),
dtype=np.float32)
y_test = np.zeros((n_sequences_test, voc_size), dtype=np.float32)
for i, sequence in enumerate(sequences):
for t, char in enumerate(sequence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
for i, sequence in enumerate(sequences_test):
for t, char in enumerate(sequence):
X_test[i, t, char_indices[char]] = 1
y_test[i, char_indices[next_chars_test[i]]] = 1
X.shape
y.shape
X[0]
y[0]
"""
Explanation: Converting the training data to one-hot vectors
Unfortunately the LSTM implementation in Keras does not (yet?) accept integer indices to slice columns from an input embedding by it-self. Let's use one-hot encoding. This is slightly less space and time efficient than integer coding but should be good enough when using a small character level vocabulary.
Exercise:
One hot encoded the training data sequences as X and next_chars as y:
End of explanation
"""
def perplexity(y_true, y_pred):
"""Compute the per-character perplexity of model predictions.
y_true is one-hot encoded ground truth.
y_pred is predicted likelihoods for each class.
2 ** -mean(log2(p))
"""
# TODO
return 1.
# %load solutions/language_model_perplexity.py
def perplexity(y_true, y_pred):
"""Compute the perplexity of model predictions.
y_true is one-hot encoded ground truth.
y_pred is predicted likelihoods for each class.
2 ** -mean(log2(p))
"""
likelihoods = np.sum(y_pred * y_true, axis=1)
return 2 ** -np.mean(np.log2(likelihoods))
y_true = np.array([
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
])
y_pred = np.array([
[0.1, 0.9, 0.0],
[0.1, 0.1, 0.8],
[0.1, 0.2, 0.7],
])
perplexity(y_true, y_pred)
"""
Explanation: Measuring per-character perplexity
The NLP community measures the quality of probabilistic model using perplexity.
In practice perplexity is just a base 2 exponentiation of the average negative log2 likelihoods:
$$perplexity_\theta = 2^{-\frac{1}{n} \sum_{i=1}^{n} log_2 (p_\theta(x_i))}$$
Note: here we define the per-character perplexity (because our model naturally makes per-character predictions). It is more common to report per-word perplexity. Note that this is not as easy to compute the per-world perplexity as we would need to tokenize the strings into a sequence of words and discard whitespace and punctuation character predictions. In practice the whitespace character is the most frequent character by far making our naive per-character perplexity lower than it should be if we ignored those.
Exercise: implement a Python function that computes the per-character perplexity with model predicted probabilities y_pred and y_true for the encoded ground truth:
End of explanation
"""
perplexity(y_true, y_true)
"""
Explanation: A perfect model has a minimal perplexity of 1.0 (negative log likelihood of 0.0):
End of explanation
"""
from keras.models import Sequential
from keras.layers import LSTM, Dense
from keras.optimizers import RMSprop
model = Sequential()
model.add(LSTM(128, input_shape=(max_length, voc_size)))
model.add(Dense(voc_size, activation='softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(optimizer=optimizer, loss='categorical_crossentropy')
"""
Explanation: Building recurrent model
Let's build a first model and train it on a very small subset of the data to check that it works as expected:
End of explanation
"""
def model_perplexity(model, X, y, verbose=0):
predictions = model.predict(X, verbose=verbose)
return perplexity(y, predictions)
model_perplexity(model, X_test, y_test)
"""
Explanation: Let's measure the perplexity of the randomly initialized model:
End of explanation
"""
small_train = slice(0, None, 40)
model.fit(X[small_train], y[small_train], validation_split=0.1,
batch_size=128, nb_epoch=1)
model_perplexity(model, X[small_train], y[small_train])
model_perplexity(model, X_test, y_test)
"""
Explanation: Let's train the model for one epoch on a very small subset of the training set to check that it's well defined:
End of explanation
"""
def sample_one(preds, temperature=1.0):
"""Sample the next character according to the network output.
Use a lower temperature to force the model to output more
confident predictions: more peaky distribution.
"""
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
# Draw a single sample (size=1) from a multinoulli distribution
# parameterized by the output of the softmax layer of our
# network. A multinoulli distribution is a multinomial
# distribution with a single trial with n_classes outcomes.
probs = np.random.multinomial(1, preds, size=1)
return np.argmax(probs)
def generate_text(model, seed_string, length=300, temperature=1.0):
"""Recursively sample a sequence of chars, one char at a time.
Each prediction is concatenated to the past string of predicted
chars so as to condition the next prediction.
Feed seed string as a sequence of characters to condition the
first predictions recursively. If seed_string is lower than
max_length, pad the input with zeros at the beginning of the
conditioning string.
"""
generated = seed_string
prefix = seed_string
for i in range(length):
# Vectorize prefix string to feed as input to the model:
x = np.zeros((1, max_length, voc_size))
shift = max_length - len(prefix)
for t, char in enumerate(prefix):
x[0, t + shift, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample_one(preds, temperature)
next_char = indices_char[next_index]
generated += next_char
prefix = prefix[1:] + next_char
return generated
generate_text(model, 'philosophers are ', temperature=0.1)
generate_text(model, 'atheism is the root of ', temperature=0.8)
"""
Explanation: Sampling random text from the model
Recursively generate one character at a time by sampling from the distribution parameterized by the model:
$$
p_{\theta}(c_n | c_{n-1}, c_{n-2}, \ldots, c_0) \cdot p_{\theta}(c_{n-1} | c_{n-2}, \ldots, c_0) \cdot \ldots \cdot p_{\theta}(c_{0})
$$
The temperature parameter makes it possible to remove additional entropy (bias) into the parameterized multinoulli distribution of the output of the model:
End of explanation
"""
nb_epoch = 30
seed_strings = [
'philosophers are ',
'atheism is the root of ',
]
for epoch in range(nb_epoch):
print("# Epoch %d/%d" % (epoch + 1, nb_epoch))
print("Training on one epoch takes ~90s on a K80 GPU")
model.fit(X, y, validation_split=0.1, batch_size=128, nb_epoch=1,
verbose=2)
print("Computing perplexity on the test set:")
test_perplexity = model_perplexity(model, X_test, y_test)
print("Perplexity: %0.3f\n" % test_perplexity)
for temperature in [0.1, 0.5, 1]:
print("Sampling text from model at %0.2f:\n" % temperature)
for seed_string in seed_strings:
print(generate_text(model, seed_string, temperature=temperature))
print()
"""
Explanation: Training the model
Let's train the model and monitor the perplexity after each epoch and sample some text to qualitatively evaluate the model:
End of explanation
"""
text_with_case = open(corpus_path).read().replace("\n", " ")
import nltk
nltk.download('punkt')
from nltk.tokenize import sent_tokenize
sentences = sent_tokenize(text_with_case)
plt.hist([len(s.split()) for s in sentences], bins=30);
plt.title('Distribution of sentence lengths')
plt.xlabel('Approximate number of words');
"""
Explanation: Beam search for deterministic decoding
Exercise: adapt the sampling decoder to implement a deterministic decoder with a beam of k=30 sequences that are the most likely sequences based on the model predictions.
Better handling of sentence boundaries
To simplify things we used the lower case version of the text and we ignored any sentence boundaries. This prevents our model to learn when to stop generating characters. If we want to train a model that can start generating text at the beginning of a sentence and stop at the end of a sentence, we need to provide it with sentency boundary markers in the training set and use those special markers when sampling.
The following give an example of how to use NLTK to detect sentence boundaries in English text.
This could be used to insert an explicit "end_of_sentence" (EOS) symbol to mark separation between two consecutive sentences. This should make it possible to train a language model that explicitly generates complete sentences from start to end.
Use the following command (in a terminal) to install nltk before importing it in the notebook:
$ pip install nltk
End of explanation
"""
sorted_sentences = sorted([s for s in sentences if len(s) > 20], key=len)
for s in sorted_sentences[:5]:
print(s)
"""
Explanation: The first few sentences detected by NLTK are too short to be considered real sentences. Let's have a look at short sentences with at least 20 characters:
End of explanation
"""
for s in sorted_sentences[-3:]:
print(s)
"""
Explanation: Some long sentences:
End of explanation
"""
import nltk
nltk.download('gutenberg')
book_selection_text = nltk.corpus.gutenberg.raw().replace("\n", " ")
print(book_selection_text[:300])
print("Book corpus length: %d characters" % len(book_selection_text))
"""
Explanation: The NLTK sentence tokenizer seems to do a reasonable job despite the weird casing and '--' signs scattered around the text.
Note that here we use the original case information because it can help the NLTK sentence boundary detection model make better split decisions. Our text corpus is probably too small to train a good sentence aware language model though, especially with full case information. Using larger corpora such as a large collection of public domain books or Wikipedia dumps. The NLTK toolkit also comes from corpus loading utilities.
The following loads a selection of famous books from the Gutenberg project archive:
End of explanation
"""
split = int(0.9 * len(book_selection_text))
book_selection_train = book_selection_text[:split]
book_selection_validation = book_selection_text[split:]
"""
Explanation: Let's do an arbitrary split. Note the training set will have a majority of text that is not authored by the author(s) of the validation set:
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/text_classification/labs/word2vec.ipynb | apache-2.0 | # Use the chown command to change the ownership of repository to user.
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
!pip install -q tqdm
# You can use any Python source file as a module by executing an import statement in some other Python source file.
# The import statement combines two operations; it searches for the named module, then it binds the
# results of that search to a name in the local scope.
import io
import itertools
import numpy as np
import os
import re
import string
import tensorflow as tf
import tqdm
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Activation, Dense, Dot, Embedding, Flatten, GlobalAveragePooling1D, Reshape
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
"""
Explanation: Word2Vec
Learning Objectives
Compile all steps into one function
Prepare training data for Word2Vec
Model and Training
Embedding lookup and analysis
Introduction
Word2Vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through Word2Vec have proven to be successful on a variety of downstream natural language processing tasks.
Note: This notebook is based on Efficient Estimation of Word Representations in Vector Space and
Distributed
Representations of Words and Phrases and their Compositionality. It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.
These papers proposed two methods for learning representations of words:
Continuous Bag-of-Words Model which predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.
Continuous Skip-gram Model which predict words within a certain range before and after the current word in the same sentence. A worked example of this is given below.
You'll use the skip-gram approach in this notebook. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own Word2Vec model on a small dataset. This notebook also contains code to export the trained embeddings and visualize them in the TensorFlow Embedding Projector.
Each learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the solution notebook
Skip-gram and Negative Sampling
While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of (target_word, context_word) where context_word appears in the neighboring context of target_word.
Consider the following sentence of 8 words.
The wide road shimmered in the hot sun.
The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a target_word that can be considered context word. Take a look at this table of skip-grams for target words based on different window sizes.
Note: For this tutorial, a window size of n implies n words on each side with a total window span of 2*n+1 words across a word.
The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words w<sub>1</sub>, w<sub>2</sub>, ... w<sub>T</sub>, the objective can be written as the average log probability
where c is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.
where v and v<sup>'<sup> are target and context vector representations of words and W is vocabulary size.
Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words which is often large (10<sup>5</sup>-10<sup>7</sup>) terms.
The Noise Contrastive Estimation loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modelling the word distribution, NCE loss can be simplified to use negative sampling.
The simplified negative sampling objective for a target word is to distinguish the context word from num_ns negative samples drawn from noise distribution P<sub>n</sub>(w) of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and num_ns negative samples.
A negative sample is defined as a (target_word, context_word) pair such that the context_word does not appear in the window_size neighborhood of the target_word. For the example sentence, these are few potential negative samples (when window_size is 2).
(hot, shimmered)
(wide, hot)
(wide, sun)
In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial.
Setup
End of explanation
"""
# Show the currently installed version of TensorFlow
print("TensorFlow version: ",tf.version.VERSION)
SEED = 42
AUTOTUNE = tf.data.experimental.AUTOTUNE
"""
Explanation: Please check your tensorflow version using the cell below.
End of explanation
"""
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
"""
Explanation: Vectorize an example sentence
Consider the following sentence:
The wide road shimmered in the hot sun.
Tokenize the sentence:
End of explanation
"""
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
"""
Explanation: Create a vocabulary to save mappings from tokens to integer indices.
End of explanation
"""
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
"""
Explanation: Create an inverse vocabulary to save mappings from integer indices to tokens.
End of explanation
"""
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
"""
Explanation: Vectorize your sentence.
End of explanation
"""
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
"""
Explanation: Generate skip-grams from one sentence
The tf.keras.preprocessing.sequence module provides useful functions that simplify data preparation for Word2Vec. You can use the tf.keras.preprocessing.sequence.skipgrams to generate skip-gram pairs from the example_sequence with a given window_size from tokens in the range [0, vocab_size).
Note: negative_samples is set to 0 here as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
End of explanation
"""
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
"""
Explanation: Take a look at few positive skip-grams.
End of explanation
"""
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
"""
Explanation: Negative sampling for one skip-gram
The skipgrams function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the tf.random.log_uniform_candidate_sampler function to sample num_ns number of negative samples for a given target word in a window. You can call the funtion on one skip-grams's target word and pass the context word as true class to exclude it from being sampled.
Key point: num_ns (number of negative samples per positive context word) between [5, 20] is shown to work best for smaller datasets, while num_ns between [2,5] suffices for larger datasets.
End of explanation
"""
# Add a dimension so you can use concatenation (on the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concat positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label first context word as 1 (positive) followed by num_ns 0s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape target to shape (1,) and context and label to (num_ns+1,).
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
"""
Explanation: Construct one training example
For a given positive (target_word, context_word) skip-gram, you now also have num_ns negative sampled context words that do not appear in the window size neighborhood of target_word. Batch the 1 positive context_word and num_ns negative context words into one tensor. This produces a set of positive skip-grams (labelled as 1) and negative samples (labelled as 0) for each target word.
End of explanation
"""
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
"""
Explanation: Take a look at the context and the corresponding labels for the target word from the skip-gram example above.
End of explanation
"""
print(f"target :", target)
print(f"context :", context )
print(f"label :", label )
"""
Explanation: A tuple of (target, context, label) tensors constitutes one training example for training your skip-gram negative sampling Word2Vec model. Notice that the target is of shape (1,) while the context and label are of shape (1+num_ns,)
End of explanation
"""
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
"""
Explanation: Summary
This picture summarizes the procedure of generating training example from a sentence.
Lab Task 1: Compile all steps into one function
Skip-gram Sampling table
A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occuring words (such as the, is, on) don't add much useful information for the model to learn from. Mikolov et al. suggest subsampling of frequent words as a helpful practice to improve embedding quality.
The tf.keras.preprocessing.sequence.skipgrams function accepts a sampling table argument to encode probabilities of sampling any token. You can use the tf.keras.preprocessing.sequence.make_sampling_table to generate a word-frequency rank based probabilistic sampling table and pass it to skipgrams function. Take a look at the sampling probabilities for a vocab_size of 10.
End of explanation
"""
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for vocab_size tokens.
# TODO 1a -- your code goes here
# Iterate over all sequences (sentences) in dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with positive context word and negative samples.
# TODO 1b -- your code goes here
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
"""
Explanation: sampling_table[i] denotes the probability of sampling the i-th most common word in a dataset. The function assumes a Zipf's distribution of the word frequencies for sampling.
Key point: The tf.random.log_uniform_candidate_sampler already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective.
Generate training data
Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
End of explanation
"""
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
"""
Explanation: Lab Task 2: Prepare training data for Word2Vec
With an understanding of how to work with one sentence for a skip-gram negative sampling based Word2Vec model, you can proceed to generate training examples from a larger list of sentences!
Download text corpus
You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
End of explanation
"""
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
"""
Explanation: Read text from the file and take a look at the first few lines.
End of explanation
"""
# TODO 2a -- your code goes here
"""
Explanation: Use the non empty lines to construct a tf.data.TextLineDataset object for next steps.
End of explanation
"""
# We create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the text vectorization layer to normalize, split, and map strings to
# integers. Set output_sequence_length length to pad all samples to same length.
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
"""
Explanation: Vectorize sentences from the corpus
You can use the TextVectorization layer to vectorize sentences from the corpus. Learn more about using this layer in this Text Classification tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a custom_standardization function that can be used in the TextVectorization layer.
End of explanation
"""
vectorize_layer.adapt(text_ds.batch(1024))
"""
Explanation: Call adapt on the text dataset to create vocabulary.
End of explanation
"""
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
"""
Explanation: Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with get_vocabulary(). This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
End of explanation
"""
def vectorize_text(text):
text = tf.expand_dims(text, -1)
return tf.squeeze(vectorize_layer(text))
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
"""
Explanation: The vectorize_layer can now be used to generate vectors for each element in the text_ds.
End of explanation
"""
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
"""
Explanation: Obtain sequences from the dataset
You now have a tf.data.Dataset of integer encoded sentences. To prepare the dataset for training a Word2Vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples.
Note: Since the generate_training_data() defined earlier uses non-TF python/numpy functions, you could also use a tf.py_function or tf.numpy_function with tf.data.Dataset.map().
End of explanation
"""
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
"""
Explanation: Take a look at few examples from sequences.
End of explanation
"""
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
print(len(targets), len(contexts), len(labels))
"""
Explanation: Generate training examples from sequences
sequences is now a list of int encoded sentences. Just call the generate_training_data() function defined earlier to generate training examples for the Word2Vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be same, representing the total number of training examples.
End of explanation
"""
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
"""
Explanation: Configure the dataset for performance
To perform efficient batching for the potentially large number of training examples, use the tf.data.Dataset API. After this step, you would have a tf.data.Dataset object of (target_word, context_word), (label) elements to train your Word2Vec model!
End of explanation
"""
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
"""
Explanation: Add cache() and prefetch() to improve performance.
End of explanation
"""
class Word2Vec(Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding", )
self.context_embedding = Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
self.dots = Dot(axes=(3,2))
self.flatten = Flatten()
def call(self, pair):
target, context = pair
we = self.target_embedding(target)
ce = self.context_embedding(context)
dots = self.dots([ce, we])
return self.flatten(dots)
"""
Explanation: Lab Task 3: Model and Training
The Word2Vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product between the embeddings of target and context words to obtain predictions for labels and compute loss against true labels in the dataset.
Subclassed Word2Vec Model
Use the Keras Subclassing API to define your Word2Vec model with the following layers:
target_embedding: A tf.keras.layers.Embedding layer which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are (vocab_size * embedding_dim).
context_embedding: Another tf.keras.layers.Embedding layer which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in target_embedding, i.e. (vocab_size * embedding_dim).
dots: A tf.keras.layers.Dot layer that computes the dot product of target and context embeddings from a training pair.
flatten: A tf.keras.layers.Flatten layer to flatten the results of dots layer into logits.
With the sublassed model, you can define the call() function that accepts (target, context) pairs which can then be passed into their corresponding embedding layer. Reshape the context_embedding to perform a dot product with target_embedding and return the flattened result.
Key point: The target_embedding and context_embedding layers can be shared as well. You could also use a concatenation of both embeddings as the final Word2Vec embedding.
End of explanation
"""
# TODO 3a -- your code goes here
"""
Explanation: Define loss function and compile model
For simplicity, you can use tf.keras.losses.CategoricalCrossEntropy as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:
python
def custom_loss(x_logit, y_true):
return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)
It's time to build your model! Instantiate your Word2Vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the tf.keras.optimizers.Adam optimizer.
End of explanation
"""
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
"""
Explanation: Also define a callback to log training statistics for tensorboard.
End of explanation
"""
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
"""
Explanation: Train the model with dataset prepared above for some number of epochs.
End of explanation
"""
!tensorboard --bind_all --port=8081 --load_fast=false --logdir logs
"""
Explanation: Tensorboard now shows the Word2Vec model's accuracy and loss.
End of explanation
"""
# TODO 4a -- your code goes here
"""
Explanation: Run the following command in Cloud Shell:
<code>gcloud beta compute ssh --zone <instance-zone> <notebook-instance-name> --project <project-id> -- -L 8081:localhost:8081</code>
Make sure to replace <instance-zone>, <notebook-instance-name> and <project-id>.
In Cloud Shell, click Web Preview > Change Port and insert port number 8081. Click Change and Preview to open the TensorBoard.
To quit the TensorBoard, click Kernel > Interrupt kernel.
Lab Task 4: Embedding lookup and analysis
Obtain the weights from the model using get_layer() and get_weights(). The get_vocabulary() function provides the vocabulary to build a metadata file with one token per line.
End of explanation
"""
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0: continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
"""
Explanation: Create and save the vectors and metadata file.
End of explanation
"""
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception as e:
pass
"""
Explanation: Download the vectors.tsv and metadata.tsv to analyze the obtained embeddings in the Embedding Projector.
End of explanation
"""
|
tgrammat/ML-Data_Challenges | Dato-tutorials/anomaly-detection/Anomaly Detection - Demo 2 [Moving Z-Score and Bayesian Changepoint Models].ipynb | apache-2.0 | import graphlab as gl
import matplotlib.pyplot as plt
fred_dcoilbrenteu = gl.SFrame.read_csv('./FRED-DCOILBRENTEU.csv')
fred_dcoilbrenteu
"""
Explanation: Anomaly Detection: Moving Z-Score and Bayesian Changepoints Model
Introductory Remarks
Anomalies are data points that are different from other observations in some way, typically measured against a model fit to the data. On the contrary with the ordinary descriptive statistics, we are interested here to found where these anomalous data points exist and not exclude them as outliers.
We assume the anomaly detection task is unsupervised, i.e. we don’t have training data with points labeled as anomalous. Each data point passed to an anomaly detection model is given a score indicating how different the point is relative to the rest of the dataset. The calculation of this score varies between models, but a higher score always indicates a point is more anomalous. Often a threshold is chosen to make a final classification of each point as typical or anomalous; this post-processing step is left to the user.
The GraphLab Create (GLC) Anomaly Detection toolkit currently includes three models for two different data contexts:
Local Outlier Factor, for detecting outliers in multivariate data that are assumed to be independently and identically distributed,
Moving Z-score, for scoring outliers in a univariate, sequential dataset, typically a time series, and
Bayesian Changepoints for identifying changes in the mean or variance of a sequential series.
In this short note, we demonstrate how the Moving Z-Score and Bayesian Changepoints models can be used to reveal anomalies in a time series object. As an example we are going to use the "Crude Oil Prices: Brent - Europe" time series, FRED-DCOILBRENTEU, as it is currently provided by the Quandl database of finance and economic data and the Federal Reserve Bank of St. Luis. This times series covers the daily closing prices of Crude Oil Brent - Europe (Dollars per Barrel, Not Seasonally Adjusted) starting from May 1987 to May 2016. It follows a pretty volatile behavior across the years, and we hope to find out where the most anomalous spot values are. For notes and definitions, please see the corresponding US Energy Information Agency (eia), Explanatory Notes.
The GLC Moving Z-Score Model
In a first step of our analysis, we are going to use the GLC Moving Z-Score implementation. This unsupervised learning model fits a moving average to a univariate time series and identifying that way points that are far from the fitted curve. The MovingZScoreModel works with either TimeSeries or SFrame inputs. A uniform sampling rate is assumed and the data window must be defined in terms of number of observations.
The moving Z-score for a data point $x_{t}$ is simply the value of $x_{t}$ standardized by subtracting the moving mean just prior to time $t$ and dividing by the moving standard deviation which is calculated for the same time interval. In particular, assuming that $w$ stands for the window_size in terms of the number of observations the moving Z-score is defined as:
\begin{equation}
z(x_{t}) = \frac{x_{t}-\bar{x}{t}}{s{t}},
\end{equation}
where the moving average is:
\begin{equation}
\bar{x}{t} = (1/w)\,\sum{i=t-w}^{t-1}x_{i},
\end{equation}
and the standard deviation for the same time interval:
\begin{equation}
s_{t} = \sqrt{(1/w)\,\sum_{i=t-w}^{t-1}(x_{i}-\bar{x}_{t})^{2}}.
\end{equation}
Notes:
The moving Z-score at points within the window_size observations of the beginning of a series are not defined, because there are insufficient points to compute the moving average and moving standard deviation. This is represented by missing values.
Missing values in the input dataset are assigned missing values (‘None’) for their anomaly scores as well.
If there is no variation in the values preceding a given observation, the moving Z-score can be infinite or undefined. If the given observation is equal to the moving average, the anomaly score is coded as 'nan'; if the observation is not equal to the moving average, the anomaly score is 'inf'.
The GLC Bayesian Changepoints Model
As a next step of our analysis we are going to use the GLC Bayesian Changepoints model and compare the results of these two methods. The Bayesian Changepoints implementation scores changepoint probability in a univariate sequential dataset, often a time series. Changepoints are abrupt changes in the mean or variance of a time series. For instance, during an economic recession, stock values might suddenly drop to a very low value. The time at which the stock value dropped is called a changepoint.
The Bayesian Changepoints model is an implementation of the Bayesian Online Changepoint Detection algorithm developed by Ryan Adams and David MacKay. This algorithm computes a probability distribution over the possible run lengths at each point in the data, where run length refers to the number of observations since the last changepoint. When the probability of a 0-length run spikes, there is most likely a change point at the current data point.
More specifically, the algorithm follows the procedure below:
Step 1: Observe new datum $x_{t}$ and evaluate the likelihood of seeing this value for each possible run length. This is a probability vector, with an element for all possible run lengths. A Gaussian distribution between each pair of changepoints is assumed.
\begin{equation}
L(r)= P(x|x_{r})
\end{equation}
Step 2: For each possible run length, $r>0$, at current time $t$, calculate the probability of growth. expected_runlength is a parameter describing the a-priori best guess of run length. The larger expected_runlength is, the stronger the evidence must be in the data to support a high changepoint probability.
\begin{equation}
P_{t}(runlength\equiv r) = P_{t-1}(runlength\equiv r-1)\ast L(r)\ast \left(1-\frac{1}{{expected_runlength}}\right)
\end{equation}
Step 3: Calculate probability of change, or $r=0$.
\begin{equation}
P_{t}(runlength\equiv 0)= \sum_{r_{prev}}\left[P_{t−1}(runlength\equiv r_{prev})\ast L(0)\ast \left(\frac{1}{expected_runlength}\right)\right]
\end{equation}
Step 4: Normalize the probability. For all run length probabilities at time $t$, divide by the sum of all run length probabilities.
\begin{equation}
P_{t}(runlength\equiv r_{i})=\frac{P_{t}(runlength\equiv r_{i})}{\sum_{r}P_{t}(runlength\equiv r)}
\end{equation}
For each incoming point, this process is repeated.
This per-point update is why the method is considered an online learning algorithm.
As described, the algorithm scores each point $x_{t}$ immediately, but if the user can afford to wait several observations, it is often more accurate to assign lagged changepoint scores. The number of observations to wait before scoring a point is set with the lag parameter.
Libraries and Necessary Data Transformation
First we fire up GraphLab Create, all the other necessary libraries for our study and load the FRED/DCOILBRENTEU data set in an SFrame.
End of explanation
"""
import time
import dateutil
def _unix_timestamp_to_datetime(x):
import datetime
import pytz
return dateutil.parser.parse(x)
fred_dcoilbrenteu['DATE'] = fred_dcoilbrenteu['DATE'].apply(_unix_timestamp_to_datetime)
fred_dcoilbrenteu = gl.TimeSeries(fred_dcoilbrenteu, index='DATE')
fred_dcoilbrenteu
"""
Explanation: Next we transform the DATE column in an appropriate timestamp format, and the fred_dcoilbrenteu SFrame in a TimeSeries object.
End of explanation
"""
%matplotlib inline
def plot_time_series(timestamp, values, title, **kwargs):
plt.rcParams['figure.figsize'] = 14, 7
plt.plot_date(timestamp, values, fmt='g-', tz='utc', **kwargs)
plt.title(title)
plt.xlabel('Year')
plt.ylabel('Dollars per Barrel')
plt.rcParams.update({'font.size': 16})
plot_time_series(fred_dcoilbrenteu['DATE'], fred_dcoilbrenteu['VALUE'],\
'Crude Oil Prices: Brent - Europe [FRED/DCOILBRENTEU]')
"""
Explanation: We can plot the fred_dcoilbrenteu time series set as follows.
End of explanation
"""
window_size = 252 # average trading days per year
model_moving_zscore =gl.anomaly_detection.moving_zscore.create(fred_dcoilbrenteu,
window_size, feature='VALUE')
"""
Explanation: Training a Moving Z-Score Model
In this section we train a Moving Z-Score model to reveal where any anomalies exist in the fred_dcoilbrenteu time series.
End of explanation
"""
scores = model_moving_zscore.scores.to_sframe()
scores.print_rows(num_rows=10, max_row_width=100)
scores[252-10:252+10].print_rows(num_rows=60, max_row_width=100)
"""
Explanation: The primary output of the Moving Z-score model is the scores field. This TimeSeries object contains:
row id/time: ID of the corresponding row in the input dataset. Here the dataset is a TimeSeries object and the model returns the DATE timestamp. If it was an SFrame, this column would be filled with the row numbers of the input data.
anomaly score: absolute value of the moving Z-score. A score of 0 indicates that the value is identical to the moving average. The higher the score, the more likely a point is to be an anomaly.
VALUE: the recorded value of Dollars per Barrel of "Crude Oil Brent - Europe".
model update time: time that the model was updated. This is particularly useful for model updating.
End of explanation
"""
scores.sort('anomaly_score', ascending=False).print_rows(num_rows=30, max_row_width=100)
"""
Explanation: Of course, the first 252 rows of the scores output don't have a moving average or Z-score. This is because the moving window does not have sufficient data for those observations.
To reveal the 30, lets say, more anomalous data points we can sort the scores SFrame as follows.
End of explanation
"""
sketch = scores['anomaly_score'].sketch_summary()
threshold = sketch.quantile(0.99)
anomalies = scores[scores['anomaly_score'] > threshold]
anomalies.print_rows(num_rows=30, max_row_width=100)
"""
Explanation: Of cource, a lot more anomalous observations may exist in the fred_dcoilbrenteu time series. A good way to make a final decision on that, is to look at the approximate distribution of the anomaly scores with the SArray.sketch_summary() tool, then get a threshold for the anomaly score with the sketch summary's quantile method. Here we declare the top 1% of the data to be anomalies, characterizing that way 71 data points as "anomalous".
End of explanation
"""
%matplotlib inline
plot_time_series(fred_dcoilbrenteu['DATE'], fred_dcoilbrenteu['VALUE'],\
'Crude Oil Prices: Brent - Europe [FRED/DCOILBRENTEU]', label='FRED/DCOILBRENTEU')
plt.plot_date(scores['DATE'], scores['moving_average'], fmt='b-', tz='utc', lw=2, label='Moving Average')
plt.plot(anomalies['DATE'], anomalies['VALUE'], 'rx', markersize=12, markeredgewidth=1.3, label='Anomalies')
plt.legend(loc='upper left', prop={'size': 16})
plt.show()
"""
Explanation: In the figure below, we plot the original FRED/DCOILBRENTEU time series of "Dollars per Barrel of Crude Oil Brent - Europe", its Moving Average across the years, and the data points that we found to be anomalous.
End of explanation
"""
model_bayesian_changepoints = gl.anomaly_detection.bayesian_changepoints.\
create(fred_dcoilbrenteu,
feature='VALUE',
# avg trading days per year
expected_runlength = 252,
# avg trading days per fiscal quarter
lag=63)
"""
Explanation: Training a Bayesian Changepoints Model
In this second part of our analysis we train a Bayesian Changepoints model to reveal where any anomalies exist in the fred_dcoilbrenteu time series.
End of explanation
"""
scores2 = model_bayesian_changepoints.scores.to_sframe()
scores2.print_rows(num_rows=10, max_row_width=100)
"""
Explanation: The primary output of the Moving Z-score model is the scores field. This TimeSeries object contains:
row id/time: ID of the corresponding row in the input dataset. Here the dataset is a TimeSeries object and the model returns the DATE timestamp. If it was an SFrame, this column would be filled with the row numbers of the input data.
changepoint_score: The probability that the given point is a changepoint. This value is in a range between 0 and 1.
VALUE: the recorded value of Dollars per Barrel of "Crude Oil Brent - Europe".
model update time: time that the model was updated. This is particularly useful for model updating.
End of explanation
"""
scores2.sort('changepoint_score', ascending=False).print_rows(num_rows=30, max_row_width=100)
"""
Explanation: To reveal the 30, lets say, more anomalous data points we can sort the scores SFrame as follows.
End of explanation
"""
scores2.tail(80).print_rows(num_rows=80, max_row_width=100)
"""
Explanation: One interesting thing is that if you look at the tail of scores, you will see a handful of missing values. These data points have insufficient data after them to compute lagged changepoint scores. The number of missing values in the tail of the dataset can be reduced by equally reducing the lag parameter in our learning model. However, the returned results will be less accurate. Alternatively, one can choose to update the model with new data.
End of explanation
"""
sketch2 = scores2['changepoint_score'].sketch_summary()
threshold2 = sketch2.quantile(0.99)
changepoints = scores2[scores2['changepoint_score'] > threshold2]
changepoints.print_rows(num_rows=105, max_row_width=100)
"""
Explanation: Of cource, a lot more anomalous observations may exist in the fred_dcoilbrenteu time series. A good way to make a final decision on that, is to look at the approximate distribution of the changepoint scores with the SArray.sketch_summary() tool, then get a threshold for the changepoint score with the sketch summary's quantile method. Again, we declare the top 1% of the data to be anomalies, characterizing that way 75 data points as "anomalous".
End of explanation
"""
%matplotlib inline
plt.rcParams['figure.figsize'] = 14, 24
plt.figure(1)
plt.subplot(3,1,1)
plt.plot_date(fred_dcoilbrenteu['DATE'], fred_dcoilbrenteu['VALUE'],\
fmt='g-', tz='utc', label='FRED/DCOILBRENTEU')
plt.plot_date(scores['DATE'], scores['moving_average'],\
fmt='b-', tz='utc', lw=2, label='Moving Average')
plt.xlabel('Year')
plt.ylabel('Dollars per Barrel')
plt.title('Crude Oil Prices: Brent - Europe [FRED/DCOILBRENTEU]')
plt.rcParams.update({'font.size': 16})
plt.plot(anomalies['DATE'], anomalies['VALUE'],\
'bx', markersize=12, markeredgewidth=1.3, label='Anomalies [Moving Z-Score]')
plt.legend(loc='upper left', prop={'size': 16})
plt.subplot(3,1,2)
plt.plot_date(fred_dcoilbrenteu['DATE'], fred_dcoilbrenteu['VALUE'],\
fmt='g-', tz='utc', label='FRED/DCOILBRENTEU')
plt.plot_date(scores['DATE'], scores['moving_average'],\
fmt='b-', tz='utc', lw=2, label='Moving Average')
plt.xlabel('Year')
plt.ylabel('Dollars per Barrel')
plt.title('Crude Oil Prices: Brent - Europe [FRED/DCOILBRENTEU]')
plt.rcParams.update({'font.size': 16})
plt.plot(changepoints['DATE'], changepoints['VALUE'],\
'rx', markersize=12, markeredgewidth=1.3, label='Anomalies [Bayesian Changepoints]')
plt.legend(loc='upper left', prop={'size': 16})
plt.subplot(3,1,3)
plt.plot_date(scores2['DATE'], scores2['changepoint_score'],\
fmt='r-', tz='utc', lw=2, label='Bayesian Changepoint Probability')
plt.rcParams.update({'font.size': 16})
plt.xlabel('Year')
plt.ylabel('Changepoint Probability')
plt.title('Crude Oil Prices: Brent - Europe [FRED/DCOILBRENTEU]')
plt.legend(loc='upper left', prop={'size': 16})
plt.show()
"""
Explanation: In the figure below, we plot the original FRED/DCOILBRENTEU time series of "Dollars per Barrel of Crude Oil Brent - Europe", its Moving Average across the years, and the data points that we found to be anomalous with both the Moving Z-Score and the Bayesian Changepoint model.
End of explanation
"""
|
fastai/fastai | nbs/20b_tutorial.distributed.ipynb | apache-2.0 | #|all_multicuda
"""
Explanation: Tutorial - Distributed training in a notebook!
Using Accelerate to launch a training script from your notebook
End of explanation
"""
#hide
from fastai.vision.all import *
from fastai.distributed import *
from fastai.vision.models.xresnet import *
from accelerate import notebook_launcher
from accelerate.utils import write_basic_config
"""
Explanation: Overview
In this tutorial we will see how to use Accelerate to launch a training function on a distributed system, from inside your notebook!
To keep it easy, this example will follow training PETs, showcasing how all it takes is 3 new lines of code to be on your way!
Setting up imports and building the DataLoaders
First, make sure that Accelerate is installed on your system by running:
bash
pip install accelerate -U
In your code, along with the normal from fastai.module.all import * imports two new ones need to be added:
```diff
+ from fastai.distributed import *
from fastai.vision.all import *
from fastai.vision.models.xresnet import *
from accelerate import notebook_launcher
```
The first brings in the Learner.distrib_ctx context manager. The second brings in Accelerate's notebook_launcher, the key function we will call to run what we want.
End of explanation
"""
#from accelerate.utils import write_basic_config
#write_basic_config()
"""
Explanation: We need to setup Accelerate to use all of our GPUs. We can do so quickly with write_basic_config ():
Note: Since this checks torch.cuda.device_count, you will need to restart your notebook and skip calling this again to continue. It only needs to be ran once!
End of explanation
"""
path = untar_data(URLs.PETS)
"""
Explanation: Next let's download some data to train on. You don't need to worry about using rank0_first, as since we're in our Jupyter Notebook it will only run on one process like normal:
End of explanation
"""
def get_y(o): return o[0].isupper()
def train(path):
dls = ImageDataLoaders.from_name_func(
path, get_image_files(path), valid_pct=0.2,
label_func=get_y, item_tfms=Resize(224))
learn = vision_learner(dls, resnet34, metrics=error_rate).to_fp16()
learn.fine_tune(1)
"""
Explanation: We wrap the creation of the DataLoaders, our vision_learner, and call to fine_tune inside of a train function.
Note: It is important to not build the DataLoaders outside of the function, as absolutely nothing can be loaded onto CUDA beforehand.
End of explanation
"""
def train(path):
dls = ImageDataLoaders.from_name_func(
path, get_image_files(path), valid_pct=0.2,
label_func=get_y, item_tfms=Resize(224))
learn = vision_learner(dls, resnet34, metrics=error_rate).to_fp16()
with learn.distrib_ctx(sync_bn=False, in_notebook=True):
learn.fine_tune(1)
learn.export("pets")
"""
Explanation: The last addition to the train function needed is to use our context manager before calling fine_tune and setting in_notebook to True:
Note: for this example sync_bn is disabled for compatibility purposes with torchvision's resnet34
End of explanation
"""
notebook_launcher(train, (path,), num_processes=2)
"""
Explanation: if not rank_distrib(): checks if you are on the main process or not, and in this case if you are you export your Learner only once.
Finally, just call notebook_launcher, passing in the training function, any arguments as a tuple, and the number of GPUs (processes) to use:
End of explanation
"""
imgs = get_image_files(path)
learn = load_learner(path/'pets')
learn.predict(imgs[0])
"""
Explanation: Afterwards we can import our exported Learner, save, or anything else we may want to do in our Jupyter Notebook outside of a distributed process
End of explanation
"""
|
Olsthoorn/TransientGroundwaterFlow | Syllabus_in_notebooks/Sec6_5_Dalem-pumptest-Hantush.ipynb | gpl-3.0 | from scipy.special import exp1
import numpy as np
import matplotlib.pyplot as plt
"""
Explanation: Secton 6.5.
The Dalem pumping test (semi-confined, Hantush type)
IHE, Transient groundwater
Olsthoorn, 2019-01-03
The most famous book on pumping test analyses is due to Krusemand and De Ridder (1970, 1994). Their book contains all known solutions suitable for the analyses of pumping tests on groundwater wells and some examples with data.
The Dalem pumping test, held in the Netherlands, is a test in a semi-confined setting, which should yield a value for the aquifers' transmissivity $kD$ [m2/d] and its storage coefficient $S$ [-] and the hydraulic resistance $c$ [d].
The situation in cross section is hier (taken from Kruzeman and De Ridder (1994).
Hantush considered the transient flow due to a well with a constant extraction since $t=0$ placed in a uniform confined aquifer of infinite extent that is covered by a layer with uniform hydralic resistance against vertical flow and a fixed head equal to zero maintained above this covering layer.
The test can be interpreted from the Theis or the Hantush point of view, i.e. without or with leakage from a layer with fixed head. Which of the two may be deduced from the data: will they fit onto the Theis type curve or, when not, do they match with one of the Hantush type curves. Other effects may also influence the data, like partial penetration of the screen in the aquifer, storage inside the well and delayed yield and, notably, any effects caused by non-linearity, such as non-constant aquifer thickness under the influence of the drawdown in water table aquifers. All such effects may play their role under various circumstances, but may initially be ignored, to be included only when the data show that it is necessary.
The data for the pumping test are in a small text file "Dalem_data.txt", which we'll open and read into this notebook shortly.
We will interpret the test using the Hantush solution for flow to a single well with fully penetrating screen in a uniorm aquifer of infinite extent having as yet unknown transmissivity $kD$ and storage coefficient $S$.
$$ s(r, t) = \frac Q {4 \pi kD} W_h(u, \frac r \lambda),\,\,\,\, u = \frac {r^2 S} {4 kD t}, \,\,\,
\lambda = \sqrt{kD c}$$
The Hantush well function will be implentend first as it is not available in scipy.special.
End of explanation
"""
def Wh(U, rho):
'''Return Hantus well function for vector of u values and single rho'''
W = np.zeros_like(U)
for i, u in enumerate(U):
W[i] = wh(u, rho)
return W
def wh(u, rho):
'''Return Wh(u, rho) for single value of u and rho'''
uMax = 20 # sufficiently high
y = np.logspace(np.log10(u), np.log10(uMax), 1000) # enough points, log axis
ym = 0.5*(y[:-1] + y[1:])
dy = np.diff(y)
return np.sum(np.exp(-ym - rho**2 / (4 * ym)) / ym * dy)
"""
Explanation: The Hantush well function
$$ W(u, \frac r \lambda) = \intop _u ^\infty \frac {e^{-y - \frac {\left( \frac r {\lambda} \right) ^2} {4 y} }} y dy $$
The implementation is readily done by numeric integration using Simpsons rule, with sufficient points te make sure the function is computed accurately enough.
End of explanation
"""
fname = './Dalem_data.txt'
with open(fname, 'r') as f:
data = f.readlines() # read the data as a list of strings
hdr = data[0].split() # get the first line, i.e. the header
data = data[1:] # remove the header line from the data
# split each line (string) into its individual tokens
# each token is still a string not yet a number
toklist = [d.split() for d in data]
# convert this list of lines with string tokens into a list of lists with numbers
data = [] # start empty
for line in toklist:
data.append([float(d) for d in line]) # convert this line
# when done, convert this list of lists of numbers into a numpy array
data = np.array(data)
#data # show what we've got
# get the piezometer distances from the first data column, the unique values
distances = np.unique(data[:,0])
plt.title('Dalem pumping test measured drawdowns')
plt.xlabel('t [min]')
plt.ylabel('dd [m]')
plt.grid()
for r in distances:
I = data[:,0] == r # boolean array telling which data belong to this observation well
plt.plot(data[I, -2], data[I,-1], '.-', label='r={:.0f} m'.format(r))
plt.legend()
plt.show()
"""
Explanation: Read the data
End of explanation
"""
plt.title('Dalem pumping test measured drawdowns')
plt.xlabel('t [min]')
plt.ylabel('dd [m]')
plt.xscale('log')
plt.grid()
for r in distances:
I = data[:,0] == r
plt.plot(data[I,-2], data[I,-1], '.-', label='r={:.0f} m'.format(r))
plt.legend()
plt.show()
"""
Explanation: Same, but using log scale
End of explanation
"""
plt.title('Dalem pumping test measured drawdowns')
plt.xlabel('t [min]')
plt.ylabel('dd [m]')
plt.xscale('log')
plt.yscale('log')
plt.grid()
for r in distances:
I = data[:,0] == r
plt.plot(data[I,-2], data[I,-1], '.-', label='r={:.0f} m'.format(r))
plt.legend()
plt.show()
"""
Explanation: Drawdown on double log scale
End of explanation
"""
plt.title('Dalem pumping test measured drawdowns')
plt.xlabel('$t/r^2$ [min/m$^2$]')
plt.ylabel('dd [m]')
plt.xscale('log')
#plt.yscale('log')
plt.grid()
for r in distances:
I = data[:,0] == r
tr2 = data[I, -2] / r**2
plt.plot(tr2, data[I,-1], '.-', label='r={:.0f} m'.format(r))
plt.legend()
plt.show()
"""
Explanation: Drawdown on double log scale using $t/r^2$ on x-axis
End of explanation
"""
A = 30
B = 5.0e6
u = np.logspace(-4, 0, 41)
plt.title('Type curve and $A \times s$ vs $B \times t/r^2$, with $A$={}, $B$={}'.format(A, B))
plt.xlabel('$1/u$ and $B \, t/r^2$')
plt.ylabel('W(u) and $A \, s$')
plt.xscale('log')
plt.yscale('log')
plt.grid()
# the Theis type curve
plt.plot(1/u, exp1(u), 'k', lw=3, label='Theis')
for rho in [0.01, 0.03, 0.1, 0.3, 3]:
plt.plot(1/u, Wh(u, rho), label='rho={:.2f}'.format(rho))
# The measurements
for r in distances :
I = data[:,0] == r
t = data[I,-2] / (24 * 60)
s = data[I,-1] # Q /(4 * np.pi * kD) * exp1(r**2 * S / (4 * kD * t))
plt.plot(B * t/r**2, A * s, 'o', label='$r$= {:.3g} m'.format(r))
plt.legend()
plt.show()
"""
Explanation: Interpretation using the match on double log scales (Classical method)
The classical interpreation plots the measured drawdowns on double log paper (drawdown $s$ versus $t/r^2$ and compares them with the Theis type curve $W(u)$ versus $1/u$ also drawn on double log paper. Because $1/u = (4 kD t) / (r^2 S)$ it follows that on logarthmic scales $1/u$ and $t/r^2$ differ only by a constant factor, which represents a horizontal shift on the log scale. The drawdown $s$ only differs the constant $Q/(4 \pi kD$ from the well function $W(u)$, and so this implies a vertical shift on logarithmic scale. Hence the measured drawdown versus $t/r^2$ on double log scale looks exactly the same as the theis type curve but it is only shifted a given distance along the horizontal axis and a given distance along the vertical axis. These two shifts yield the sought transmissivity and storage coefficient.
Below we draw the Theis type curve and the drawdown $s$ multiplied by a factor $A$ and the $t/r^2$ multiplied by a factor $B$, choosing $A$ and $B$ interactively untill the measured and the type curve match best.
In this worked out example, I already optmized the values of $A$ and $B$ by hand. Set them both to 1 and try optimizing them yourself.
End of explanation
"""
Q = 761 # m3/d
kD = A * Q /4 /np.pi
print('kD = {:.0f} m2/d'.format(kD))
"""
Explanation: So $A s = W(u)$ and $s = \frac Q {2 \pi kD} W(u)$ and, therefore $A = \frac {4 \pi kD} {Q}$ and $ kD = \frac {A Q} {4 \pi}$
End of explanation
"""
S = 4 * kD / B
print('S = {:.2e} [-]'.format(S))
"""
Explanation: The storage coefficient then follows from
$\frac 1 u = B \frac t {r^2}$, that is, $\frac {4 kD t} {r^2 S} = B \frac t {r^2}$ so that $S = \frac {4 kD} B$
End of explanation
"""
r = 30
rho = 0.03
c = (r/rho)**2 / kD
print('c = {:.0f}'.format(c))
r = 90
rho = 0.1
c = (r/rho)**2 / kD
print('c = {:.0f}'.format(c))
"""
Explanation: The vertical resistance is obtained from observing which of the lines depending on $\rho$ the measurements of the individual piezomters follow. In this case, the $r=30$ m piezometer seems to follow the type curve for $\rho = 0.03$ and the 90 m curve seems to follow the $\rho = 0.1$ type curve.
$$\rho = r / \lambda$$
then yields
$$ \lambda = \sqrt{kD c} = \frac r \rho $$
and
$$ c = \frac {\left( \frac r \rho \right)^2} {kD} $$
End of explanation
"""
|
Soil-Carbon-Coalition/atlasdata | Mapping federal crop insurance in the U.S..ipynb | mit | #some usual imports, including some options for displaying large currency amounts with commas and only 2 decimals
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
pd.set_option('display.float_format', '{:,}'.format)
pd.set_option('display.precision',2)
"""
Explanation: Mapping federal crop insurance in the U.S.
A Jupyter notebook (Python 3) by Peter Donovan, info@soilcarboncoalition.org
Open data is not just a thing or a tool. It's a behavior, based on beliefs. This notebook is a way of sharing my methods and assumptions, and if you use the same or similar tools (such as R instead of Python, for example) you can retread these steps. I hope this notebook may also serve as a guide for me as well as others who want to do similar things.
With crop insurance, as with any data set, looking at the data is a good way of learning about its particulars if not its intentions. Some knowledge of the context or domain of the data is usually required.
For background on federal crop insurance, the following may be a start:
Dennis Shields' 2015 report from the Congressional Research Service: https://fas.org/sgp/crs/misc/R40532.pdf
Environmental Working Group's material on crop insurance, which includes interactive maps showing rate of return (payouts compared to premiums) on some crops by county from 2001 through 2014: http://www.ewg.org/research/crop-insurance-lottery. The average federal subsidy for crop insurance premiums is about 60%.
The Natural Resources Defense Council has a 2013 paper on crop insurance, https://www.nrdc.org/sites/default/files/soil-matters-IP.pdf. This paper suggests that crop insurance could be reformed to reward farming that is low risk with environmental rewards.
A starting hypothesis: federally subsidized crop insurance, while it sustains the economic viability of many farm businesses, might also tend to replace soil health and function as the foundation of a viable agriculture.
To investigate the hypothesis, we'll start by compiling data.
First, we get data. Download and unzip the data file from the USDA Risk Management Agency website: http://www.rma.usda.gov/data/cause.html The complete data for each year is under the "Summary of Business with Month of Loss" header. So far I am using the 2014 through 2016 data. You can get the column headers from the same web page as a Word or pdf doc.
End of explanation
"""
df = pd.read_csv('/Users/Peter/Documents/atlas/RMA/colsom14.txt',sep='|',header=None)
df.shape #this counts rows, columns in our dataframe
"""
Explanation: From http://www.rma.usda.gov/data/cause we see that years 2010 through 2016 are available as zip archives in Summary of Business. With a slower connection it is better to download and extract the zip archives outside of this notebook. Each contains a text file such as colsom14.txt, which will be an example for this notebook.
Unzip the file and inspect it with a text editor. There are pipe characters separating the fields, and sometimes sequences of spaces before them or after them. There are no column headers, we'll add those next.
End of explanation
"""
the_columns_2014 = ['Crop Year Identifier','State Code','State Abbreviation ','County Code','County Name','Crop Code','Crop Name','Insurance Plan Code','Insurance Plan Name Abbreviation','Coverage Category','Stage Code','Cause of Loss Code','Cause of Loss Description','Month of Loss','Month of Loss Name','Policies Earning Premium','Policies Indemnified','Net Planted Acres','Liability','Total Premium','Subsidy','Determined Acres','Indemnity Amount','Loss Ratio']
the_columns_15_16 = ['Crop Year Identifier', 'State Code', 'State Abbreviation ',
'County Code', 'County Name', 'Crop Code', 'Crop Name',
'Insurance Plan Code', 'Insurance Plan Name Abbreviation',
'Coverage Category', 'Stage Code', 'Cause of Loss Code',
'Cause of Loss Description', 'Month of Loss', 'Month of Loss Name',
'Policies Earning Premium', 'Policies Indemnified', 'Net Planted Acres',
'Net Endorsed Acres', 'Liability', 'Total Premium', 'Subsidy',
'Determined Acres', 'Indemnity Amount', 'Loss Ratio']
df.columns = the_columns_2014 #this adds our column headers
"""
Explanation: The column headers are supplied in a Word document (Record layout: Word) from the same web page. They differ for 2010-2014 and from 2015 forward. Format them as a python list of strings as follows, and add them to the dataframe.
End of explanation
"""
#we strip excess white space from the columns (numeric columns don't work for strip)
cols_w_spaces = ['County Name','Crop Name','Insurance Plan Name Abbreviation','Cause of Loss Description']
for item in cols_w_spaces:
df[item] = df[item].map(lambda x: x.strip())
#check the result
print(list(df.loc[1187]))
"""
Explanation: There are spaces on either side of some of the fields. We can use str.strip() to remove them.
End of explanation
"""
#convert to strings, pad with zeros, 2 digits for state, 3 for county
df['State Code'] = df['State Code'].map(lambda x: str(x)).apply(lambda x: x.zfill(2))
df['County Code'] = df['County Code'].map(lambda x: str(x)).apply(lambda x: x.zfill(3))
#add FIPS or id column and test
df['FIPS'] = df['State Code'] + df['County Code']
df['FIPS'][10] #to make sure we have a 5-digit string, not a number
"""
Explanation: FIPS code
The state and county location codes are numeric (int64). FIPS (Federal Information Processing Standard) codes for counties are 5-digit strings. We'll pad with zeros using zfill function. This will come in handy when it comes to mapping, as we will want to merge or join our data with county boundaries using the FIPS code.
End of explanation
"""
counties = df.groupby(['FIPS','County Name'])
aggregated = counties.agg({'Indemnity Amount': np.sum})
aggregated.sort_values('Indemnity Amount',ascending=False)
aggregated.reset_index(level=0, inplace=True)
aggregated.reset_index(level=0, inplace=True)
#run this twice to convert the two indexes to columns
#rename columns for convenience
aggregated.rename(columns={'County Name': 'name', 'FIPS': 'id', 'Indemnity Amount': 'indemnity'}, inplace=True)
#convert to $millions
aggregated['indemnity']=aggregated['indemnity']/1000000
#reorder columns and write to tab-separated tsv file for d3 mapping
aggregated = aggregated[['id','name','indemnity']]
aggregated.to_csv('/Users/Peter/Documents/atlas/RMA/indemnity2014.tsv', sep='\t', index=False)
"""
Explanation: Map indemnities by county
End of explanation
"""
df.groupby('Cause of Loss Description').agg({'Indemnity Amount':np.sum}).sort_values('Indemnity Amount',ascending=False)
causes_2014 = df.groupby('Cause of Loss Description')['Indemnity Amount'].sum()
causes_2014.sort_values(ascending=False)
#to generate a table of total indemnities by Cause of Loss, you can export a csv
causes_2014.to_csv('/Users/Peter/Documents/atlas/RMA/causes_2014.csv')
"""
Explanation: Causes of loss
Let's look at the causes of loss. NOTE: These procedures could be duplicated to aggregate indemnities by 'Crop Name' as well.
End of explanation
"""
rain = df[df['Cause of Loss Description']=='Excess Moisture/Precip/Rain']
drought = df[df['Cause of Loss Description']=='Drought']
print(rain.shape, drought.shape)
"""
Explanation: 'Excess Moisture/Precip/Rain' and 'Drought' are by far the most common causes. Let's filter the dataframe by these two, so we can potentially see which counties had indemnities for both causes, and how much.
End of explanation
"""
g_rain = rain.groupby(['FIPS','County Name']).agg({'Indemnity Amount':np.sum})
g_drought = drought.groupby(['FIPS','County Name']).agg({'Indemnity Amount':np.sum})
together=pd.concat([g_rain,g_drought],axis=1)
together.columns = ['moisture','drought']
together.head()
"""
Explanation: Now do a groupby on each dataframe by county, with sums of indemnity amounts.
End of explanation
"""
together['total']=together.moisture + together.drought
together['ratio']=together.moisture / together.drought
together.head(20)
mixed = together[(together.ratio < 4) & (together.ratio > .25)]
mixed.shape
mixed.reset_index(level=0, inplace=True)
mixed.reset_index(level=0, inplace=True)
#run this twice
mixed = mixed.rename(columns={'total':'indemnity'})
mixed.indemnity = mixed.indemnity/1000000
mixed.to_csv('/Users/Peter/Documents/atlas/RMA/moisture_plus_drought_2014.tsv', sep='\t', index=False)
"""
Explanation: Let's add two columns, a total, and a ratio of moisture to drought.
End of explanation
"""
|
BrownDwarf/ApJdataFrames | notebooks/Luhman2012.ipynb | mit | import warnings
warnings.filterwarnings("ignore")
from astropy.io import ascii
import pandas as pd
"""
Explanation: ApJdataFrames 008: Luhman2012
Title: THE DISK POPULATION OF THE UPPER SCORPIUS ASSOCIATION
Authors: K. L. Luhman and E. E. Mamajek
Data is from this paper:
http://iopscience.iop.org/0004-637X/758/1/31/article#apj443828t1
End of explanation
"""
tbl1 = ascii.read("http://iopscience.iop.org/0004-637X/758/1/31/suppdata/apj443828t1_mrt.txt")
tbl1.columns
tbl1[0:5]
len(tbl1)
"""
Explanation: Table 1 - VOTable with all source properties
End of explanation
"""
from astroquery.simbad import Simbad
import astropy.coordinates as coord
import astropy.units as u
customSimbad = Simbad()
customSimbad.add_votable_fields('otype', 'sptype')
query_list = tbl1["Name"].data.data
result = customSimbad.query_objects(query_list, verbose=True)
result[0:3]
print "There were {} sources queried, and {} sources found.".format(len(query_list), len(result))
if len(query_list) == len(result):
print "Hooray! Everything matched"
else:
print "Which ones were not found?"
def add_input_column_to_simbad_result(self, input_list, verbose=False):
"""
Adds 'INPUT' column to the result of a Simbad query
Parameters
----------
object_names : sequence of strs
names of objects from most recent query
verbose : boolean, optional
When `True`, verbose output is printed
Returns
-------
table : `~astropy.table.Table`
Query results table
"""
error_string = self.last_parsed_result.error_raw
fails = []
for error in error_string.split("\n"):
start_loc = error.rfind(":")+2
fail = error[start_loc:]
fails.append(fail)
successes = [s for s in input_list if s not in fails]
if verbose:
out_message = "There were {} successful Simbad matches and {} failures."
print out_message.format(len(successes), len(fails))
self.last_parsed_result.table["INPUT"] = successes
return self.last_parsed_result.table
result_fix = add_input_column_to_simbad_result(customSimbad, query_list, verbose=True)
tbl1_pd = tbl1.to_pandas()
result_pd = result_fix.to_pandas()
tbl1_plusSimbad = pd.merge(tbl1_pd, result_pd, how="left", left_on="Name", right_on="INPUT")
"""
Explanation: Cross match with SIMBAD
End of explanation
"""
tbl1_plusSimbad.head()
! mkdir ../data/Luhman2012/
tbl1_plusSimbad.to_csv("../data/Luhman2012/tbl1_plusSimbad.csv", index=False)
"""
Explanation: Save the data table locally.
End of explanation
"""
|
ctzhu/Python_Data_Wrangling | Challenge01_key.ipynb | cc0-1.0 | df_temp = pd.read_csv('Temp_116760.csv', skiprows=1, index_col=0)
df_temp.tail()
df_prcp = pd.read_csv('Prcp_116760.csv', index_col=0)
df_prcp.index = pd.to_datetime(df_prcp.index)
df_prcp.head()
# and I want the index to be of date-time, rather than just strings
df_prcp.index.dtype
"""
Explanation: Data Wrangling the Pandas
The are two datasets in CSV format, both are from weather station 'USC00116760' in Petersburg, IL
Data ranges from 2015-01-01 to 2015-06-29
'Temp_116760.csv' stores temperture data, the index is day-of-year.
'Prcp_116760.csv' stores Precipation data, the index is date-time.
Now how can we read the data such that they appear like the followings?
Tip: Pandas will always try to align index
Tip: try to bring up the docsting of Pandas.read_csv
Tip: use Pandas.concat to join DataFrame together
End of explanation
"""
pd.concat((df_prcp,
pd.DataFrame(data=df_temp.values,
index=df_prcp.index,
columns=df_temp.columns)),
axis=1).head()
"""
Explanation: Try pandas.concat
End of explanation
"""
pd.merge(left=df_prcp,
right=df_temp,
left_on=df_prcp.index.dayofyear,
right_index=True,
how='left').head()
"""
Explanation: Try pandas.merge
Why merge might be the better apporach?
End of explanation
"""
df = pd.merge(left=df_prcp,
right=df_temp,
left_on=df_prcp.index.dayofyear,
right_index=True,
how='left')
df.pivot_table(values='TMAX',
index=df.index.month,
columns=df.SNOW.isnull(),
aggfunc='count')
"""
Explanation: Using pivot_table to summarize data
How many snow days and non-snow days are there for each month?
Can you generate the following result, say, with the merged data?
Dose the result make sense to you
If not, why it dosen't and how to fix it?
End of explanation
"""
df3 = pd.DataFrame(index=pd.date_range('2015-01-01','2015-06-30'),
columns=df.columns)
df3.update(df)
df3.pivot_table(values='TMAX',
index=df3.index.month,
columns=np.where(df3.isnull().all(1),
'Missing',
df3.SNOW.isnull()),
aggfunc=len)
"""
Explanation: Generate the CORRECT summary table for snowy days
It can be done with just 3 method calls
TIP: lookup the pandas.DataFrame.update() method.
TIP: lookup the pandas.date_range() method.
End of explanation
"""
|
bgruening/EDeN | examples/ExampleModel.ipynb | gpl-3.0 | #code for making artificial dataset
import random
def swap_two_characters(seq):
'''define a function that swaps two characters at random positions in a string '''
line = list(seq)
id_i = random.randint(0,len(line)-1)
id_j = random.randint(0,len(line)-1)
line[id_i], line[id_j] = line[id_j], line[id_i]
return ''.join(line)
def swap_characters(seed, n):
seq=seed
for i in range(n):
seq = swap_two_characters(seq)
return seq
def make_seed(start=0, end=26):
seq = ''.join([str(unichr(97+i)) for i in range(start,end)])
return swap_characters(seq, end-start)
def make_dataset(n_sequences=None, seed=None, n_swaps=None):
seqs = []
seqs.append( seed )
for i in range(n_sequences):
seq = swap_characters( seed, n_swaps )
seqs.append( seq )
return seqs
def random_capitalize(seqs, p=0.5):
new_seqs=[]
for seq in seqs:
new_seq = [c.upper() if random.random() < p else c for c in seq ]
new_seqs.append(''.join(new_seq))
return new_seqs
def make_artificial_dataset(sequence_length=None, n_sequences=None, n_swaps=None):
seed = make_seed(start=0, end=sequence_length)
print 'Seed: ',seed
seqs = make_dataset(n_sequences=n_sequences, seed=seed, n_swaps=n_swaps)
train_seqs_orig=seqs[:len(seqs)/2]
test_seqs_orig=seqs[len(seqs)/2:]
seqs = random_capitalize(seqs, p=0.5)
print 'Sample with random capitalization:',seqs[:7]
train_seqs=seqs[:len(seqs)/2]
test_seqs=seqs[len(seqs)/2:]
return train_seqs_orig, test_seqs_orig, train_seqs, test_seqs
#code to estimate predictive performance on categorical labeled sequences
def discriminative_estimate(train_pos_seqs, train_neg_seqs, test_pos_seqs, test_neg_seqs):
from eden.graph import Vectorizer
vectorizer = Vectorizer(complexity=complexity)
from eden.converter.graph.sequence import sequence_to_eden
iterable_pos = sequence_to_eden(train_pos_seqs)
iterable_neg = sequence_to_eden(train_neg_seqs)
from eden.util import fit, estimate
estimator = fit(iterable_pos,iterable_neg, vectorizer, n_iter_search=n_iter_search)
from eden.converter.graph.sequence import sequence_to_eden
iterable_pos = sequence_to_eden(test_pos_seqs)
iterable_neg = sequence_to_eden(test_neg_seqs)
estimate(iterable_pos, iterable_neg, estimator, vectorizer)
#code to create real vector labels
def make_encoding(encoding_vector_dimension=3, sequence_length=None, noise_size=0.01):
#vector encoding for chars
default_encoding = [0]*encoding_vector_dimension
start=0
end=sequence_length
#take a list of all chars up to 'length'
char_list = [str(unichr(97+i)) for i in range(start,end)]
encodings={}
import numpy as np
codes = np.random.rand(len(char_list),encoding_vector_dimension)
for i, code in enumerate(codes):
c = str(unichr(97+i))
cc = c.upper()
encoding = list(code)
encodings[c] = encoding
#add noise for the encoding of capitalized chars
noise = np.random.rand(encoding_vector_dimension)*noise_size
encodings[cc] = list(code + noise)
return encodings, default_encoding
def make_encodings(n_encodings=3, encoding_vector_dimension=3, sequence_length=None, noise_size=0.01):
encodings=[]
for i in range(1,n_encodings+1):
encoding, default_encoding = make_encoding(encoding_vector_dimension, sequence_length, noise_size=noise_size)
encodings.append(encoding)
return encodings, default_encoding
"""
Explanation: Sequence Modeling with EDeN
The case for real valued vector labels
Aim: Suppose you are given two sets of sequences. Each sequence is composed of characters in a finite alphabet. However there are similarity relationships between the characters. We want to build a predictive model that can discriminate between the two sets.
Artificial Dataset
Lets build an artificial case. We construct two classes in the following way: for each class we start from a specific but random seed sequence, and the full set is then generated every time by permuting the position of k pairs of characters chosen at random in the seed sequence.
To simulate the relationship between characters we do as follows: we select at random some charaters and we capitalize them. For the machine, a capitalized character is completely different from its lowercase counterpart, but it is easier for humans to see them.
Assume the similarity between chars is given as a symmetric matrix. We can then perform a low dimensionality embedding of the similarity matrix (e.g. MDS in $\mathbb{R}^4$) and obtain some vector representation for each char such that their euclidean distance is proportional to their dissimilarity. Lets assume we are already given the vector representation. In our case we just take some random vectors as they will be roughly equally distant from each other. In order to simulate that the capitalized version of a cahr should be similar to its lowercase counterpart, we just add a small amount of noise to the vector representation of one of the two.
Auxiliary Code
End of explanation
"""
from eden.util import configure_logging
import logging
configure_logging(logging.getLogger(),verbosity=2)
#problem parameters
random.seed(1)
sequence_length = 8 #sequences length
n_sequences = 50 #num sequences in positive and negative set
n_swaps = 2 #num pairs of chars that are swapped at random
n_iter_search = 30 #num paramter configurations that are evaluated in hyperparameter optimization
complexity = 2 #feature complexity for the vectorizer
n_encodings = 5 #num vector encoding schemes for chars
encoding_vector_dimension = 9 #vector dimension for char encoding
noise_size = 0.05 #amount of random noise
print 'Positive examples:'
train_pos_seqs_orig, test_pos_seqs_orig, train_pos_seqs, test_pos_seqs = make_artificial_dataset(sequence_length,n_sequences,n_swaps)
print 'Negative examples:'
train_neg_seqs_orig, test_neg_seqs_orig, train_neg_seqs, test_neg_seqs = make_artificial_dataset(sequence_length,n_sequences,n_swaps)
"""
Explanation: Artificial data generation
End of explanation
"""
%%time
#lets estimate the predictive performance of a classifier over the original sequences
print 'Predictive performance on original sequences'
discriminative_estimate(train_pos_seqs_orig, train_neg_seqs_orig, test_pos_seqs_orig, test_neg_seqs_orig)
print '\n\n'
#lets estimate the predictive performance of a classifier over the capitalized sequences
print 'Predictive performance on sequences with random capitalization'
discriminative_estimate(train_pos_seqs, train_neg_seqs, test_pos_seqs, test_neg_seqs)
"""
Explanation: Discriminative model on categorical labels
End of explanation
"""
#lets make a vector encoding for the chars simply using a random encoding
#and a small amount of noise for the capitalized versions
#we can generate a few encodings and let the algorithm choose the best one.
encodings, default_encoding = make_encodings(n_encodings, encoding_vector_dimension, sequence_length, noise_size)
#lets define the 3 main machines: 1) pre_processor, 2) vectorizer, 3) estimator
#the pre_processor takes the raw format and makes graphs
def pre_processor( seqs, encoding=None, default_encoding=None, **args ):
#convert sequences to path graphs
from eden.converter.graph.sequence import sequence_to_eden
graphs = sequence_to_eden(seqs)
#relabel nodes with corresponding vector encoding
from eden.modifier.graph.vertex_attributes import translate
graphs = translate(graphs, label_map = encoding, default = default_encoding)
return graphs
#the vectorizer takes graphs and makes sparse vectors
from eden.graph import Vectorizer
vectorizer = Vectorizer()
#the estimator takes a sparse data matrix and a target column vector and makes a predictive model
from sklearn.linear_model import SGDClassifier
estimator = SGDClassifier(class_weight='auto', shuffle=True)
#the model takes a pre_processor, a vectorizer, an estimator and returns the predictive model
from eden.model import ActiveLearningBinaryClassificationModel
model = ActiveLearningBinaryClassificationModel(pre_processor=pre_processor,
estimator=estimator,
vectorizer=vectorizer,
fit_vectorizer=True )
#lets define hyper-parameters vaule ranges
from numpy.random import randint
from numpy.random import uniform
pre_processor_parameters={'encoding':encodings, 'default_encoding':[default_encoding]}
vectorizer_parameters={'complexity':[complexity],
'n':randint(3, 20, size=n_iter_search)}
estimator_parameters={'n_iter':randint(5, 100, size=n_iter_search),
'penalty':['l1','l2','elasticnet'],
'l1_ratio':uniform(0.1,0.9, size=n_iter_search),
'loss':['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'],
'power_t':uniform(0.1, size=n_iter_search),
'alpha': [10**x for x in range(-8,0)],
'eta0': [10**x for x in range(-4,-1)],
'learning_rate': ["invscaling", "constant", "optimal"]}
"""
Explanation: Note: as expected the capitalization makes the predicitve task harder since it expands the vocabulary size and adds variations that look random
Discriminative model on real valued vector labels
End of explanation
"""
%%time
#optimize hyperparameters and fit a predictive model
#determine optimal parameter configuration
model.optimize(train_pos_seqs, train_neg_seqs,
model_name='my_seq.model',
n_active_learning_iterations=0,
n_iter=n_iter_search, cv=3,
pre_processor_parameters=pre_processor_parameters,
vectorizer_parameters=vectorizer_parameters,
estimator_parameters=estimator_parameters)
#print optimal parameter configuration
print model.get_parameters()
#evaluate predictive performance
apr, roc = model.estimate(test_pos_seqs, test_neg_seqs)
"""
Explanation: Model Auto Optimization
End of explanation
"""
|
smharper/openmc | examples/jupyter/mg-mode-part-iii.ipynb | mit | import os
import matplotlib.pyplot as plt
import numpy as np
import openmc
%matplotlib inline
"""
Explanation: This Notebook illustrates the use of the the more advanced features of OpenMC's multi-group mode and the openmc.mgxs.Library class. During this process, this notebook will illustrate the following features:
Calculation of multi-group cross sections for a simplified BWR 8x8 assembly with isotropic and angle-dependent MGXS.
Automated creation and storage of MGXS with openmc.mgxs.Library
Fission rate comparison between continuous-energy and the two multi-group OpenMC cases.
To avoid focusing on unimportant details, the BWR assembly in this notebook is greatly simplified. The descriptions which follow will point out some areas of simplification.
Generate Input Files
End of explanation
"""
materials = {}
# Fuel
materials['Fuel'] = openmc.Material(name='Fuel')
materials['Fuel'].set_density('g/cm3', 10.32)
materials['Fuel'].add_element('O', 2)
materials['Fuel'].add_element('U', 1, enrichment=3.)
# Gadolinia bearing fuel
materials['Gad'] = openmc.Material(name='Gad')
materials['Gad'].set_density('g/cm3', 10.23)
materials['Gad'].add_element('O', 2)
materials['Gad'].add_element('U', 1, enrichment=3.)
materials['Gad'].add_element('Gd', .02)
# Zircaloy
materials['Zirc2'] = openmc.Material(name='Zirc2')
materials['Zirc2'].set_density('g/cm3', 6.55)
materials['Zirc2'].add_element('Zr', 1)
# Boiling Water
materials['Water'] = openmc.Material(name='Water')
materials['Water'].set_density('g/cm3', 0.6)
materials['Water'].add_element('H', 2)
materials['Water'].add_element('O', 1)
# Boron Carbide for the Control Rods
materials['B4C'] = openmc.Material(name='B4C')
materials['B4C'].set_density('g/cm3', 0.7 * 2.52)
materials['B4C'].add_element('B', 4)
materials['B4C'].add_element('C', 1)
# Steel
materials['Steel'] = openmc.Material(name='Steel')
materials['Steel'].set_density('g/cm3', 7.75)
materials['Steel'].add_element('Fe', 1)
"""
Explanation: We will be running a rodded 8x8 assembly with Gadolinia fuel pins. Let's start by creating the materials that we will use later.
Material Definition Simplifications:
This model will be run at room temperature so the NNDC ENDF-B/VII.1 data set can be used but the water density will be representative of a module with around 20% voiding. This water density will be non-physically used in all regions of the problem.
Steel is composed of more than just iron, but we will only treat it as such here.
End of explanation
"""
# Instantiate a Materials object
materials_file = openmc.Materials(materials.values())
# Export to "materials.xml"
materials_file.export_to_xml()
"""
Explanation: We can now create a Materials object that can be exported to an actual XML file.
End of explanation
"""
# Set constants for the problem and assembly dimensions
fuel_rad = 0.53213
clad_rad = 0.61341
Np = 8
pin_pitch = 1.6256
length = float(Np + 2) * pin_pitch
assembly_width = length - 2. * pin_pitch
rod_thick = 0.47752 / 2. + 0.14224
rod_span = 7. * pin_pitch
surfaces = {}
# Create boundary planes to surround the geometry
surfaces['Global x-'] = openmc.XPlane(0., boundary_type='reflective')
surfaces['Global x+'] = openmc.XPlane(length, boundary_type='reflective')
surfaces['Global y-'] = openmc.YPlane(0., boundary_type='reflective')
surfaces['Global y+'] = openmc.YPlane(length, boundary_type='reflective')
# Create cylinders for the fuel and clad
surfaces['Fuel Radius'] = openmc.ZCylinder(r=fuel_rad)
surfaces['Clad Radius'] = openmc.ZCylinder(r=clad_rad)
surfaces['Assembly x-'] = openmc.XPlane(pin_pitch)
surfaces['Assembly x+'] = openmc.XPlane(length - pin_pitch)
surfaces['Assembly y-'] = openmc.YPlane(pin_pitch)
surfaces['Assembly y+'] = openmc.YPlane(length - pin_pitch)
# Set surfaces for the control blades
surfaces['Top Blade y-'] = openmc.YPlane(length - rod_thick)
surfaces['Top Blade x-'] = openmc.XPlane(pin_pitch)
surfaces['Top Blade x+'] = openmc.XPlane(rod_span)
surfaces['Left Blade x+'] = openmc.XPlane(rod_thick)
surfaces['Left Blade y-'] = openmc.YPlane(length - rod_span)
surfaces['Left Blade y+'] = openmc.YPlane(9. * pin_pitch)
"""
Explanation: Now let's move on to the geometry. The first step is to define some constants which will be used to set our dimensions and then we can start creating the surfaces and regions for the problem, the 8x8 lattice, the rods and the control blade.
Before proceeding let's discuss some simplifications made to the problem geometry:
- To enable the use of an equal-width mesh for running the multi-group calculations, the intra-assembly gap was increased to the same size as the pitch of the 8x8 fuel lattice
- The can is neglected
- The pin-in-water geometry for the control blade is ignored and instead the blade is a solid block of B4C
- Rounded corners are ignored
- There is no cladding for the water rod
End of explanation
"""
# Set regions for geometry building
regions = {}
regions['Global'] = \
(+surfaces['Global x-'] & -surfaces['Global x+'] &
+surfaces['Global y-'] & -surfaces['Global y+'])
regions['Assembly'] = \
(+surfaces['Assembly x-'] & -surfaces['Assembly x+'] &
+surfaces['Assembly y-'] & -surfaces['Assembly y+'])
regions['Fuel'] = -surfaces['Fuel Radius']
regions['Clad'] = +surfaces['Fuel Radius'] & -surfaces['Clad Radius']
regions['Water'] = +surfaces['Clad Radius']
regions['Top Blade'] = \
(+surfaces['Top Blade y-'] & -surfaces['Global y+']) & \
(+surfaces['Top Blade x-'] & -surfaces['Top Blade x+'])
regions['Top Steel'] = \
(+surfaces['Global x-'] & -surfaces['Top Blade x-']) & \
(+surfaces['Top Blade y-'] & -surfaces['Global y+'])
regions['Left Blade'] = \
(+surfaces['Left Blade y-'] & -surfaces['Left Blade y+']) & \
(+surfaces['Global x-'] & -surfaces['Left Blade x+'])
regions['Left Steel'] = \
(+surfaces['Left Blade y+'] & -surfaces['Top Blade y-']) & \
(+surfaces['Global x-'] & -surfaces['Left Blade x+'])
regions['Corner Blade'] = \
regions['Left Steel'] | regions['Top Steel']
regions['Water Fill'] = \
regions['Global'] & ~regions['Assembly'] & \
~regions['Top Blade'] & ~regions['Left Blade'] &\
~regions['Corner Blade']
"""
Explanation: With the surfaces defined, we can now construct regions with these surfaces before we use those to create cells
End of explanation
"""
universes = {}
cells = {}
for name, mat, in zip(['Fuel Pin', 'Gd Pin'],
[materials['Fuel'], materials['Gad']]):
universes[name] = openmc.Universe(name=name)
cells[name] = openmc.Cell(name=name)
cells[name].fill = mat
cells[name].region = regions['Fuel']
universes[name].add_cell(cells[name])
cells[name + ' Clad'] = openmc.Cell(name=name + ' Clad')
cells[name + ' Clad'].fill = materials['Zirc2']
cells[name + ' Clad'].region = regions['Clad']
universes[name].add_cell(cells[name + ' Clad'])
cells[name + ' Water'] = openmc.Cell(name=name + ' Water')
cells[name + ' Water'].fill = materials['Water']
cells[name + ' Water'].region = regions['Water']
universes[name].add_cell(cells[name + ' Water'])
universes['Hole'] = openmc.Universe(name='Hole')
cells['Hole'] = openmc.Cell(name='Hole')
cells['Hole'].fill = materials['Water']
universes['Hole'].add_cell(cells['Hole'])
"""
Explanation: We will begin building the 8x8 assembly. To do that we will have to build the cells and universe for each pin type (fuel, gadolinia-fuel, and water).
End of explanation
"""
# Create fuel assembly Lattice
universes['Assembly'] = openmc.RectLattice(name='Assembly')
universes['Assembly'].pitch = (pin_pitch, pin_pitch)
universes['Assembly'].lower_left = [pin_pitch, pin_pitch]
f = universes['Fuel Pin']
g = universes['Gd Pin']
h = universes['Hole']
lattices = [[f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f],
[f, f, f, g, f, g, f, f],
[f, f, g, h, h, f, g, f],
[f, f, f, h, h, f, f, f],
[f, f, g, f, f, f, g, f],
[f, f, f, g, f, g, f, f],
[f, f, f, f, f, f, f, f]]
# Store the array of lattice universes
universes['Assembly'].universes = lattices
cells['Assembly'] = openmc.Cell(name='Assembly')
cells['Assembly'].fill = universes['Assembly']
cells['Assembly'].region = regions['Assembly']
"""
Explanation: Let's use this pin information to create our 8x8 assembly.
End of explanation
"""
# The top portion of the blade, poisoned with B4C
cells['Top Blade'] = openmc.Cell(name='Top Blade')
cells['Top Blade'].fill = materials['B4C']
cells['Top Blade'].region = regions['Top Blade']
# The left portion of the blade, poisoned with B4C
cells['Left Blade'] = openmc.Cell(name='Left Blade')
cells['Left Blade'].fill = materials['B4C']
cells['Left Blade'].region = regions['Left Blade']
# The top-left corner portion of the blade, with no poison
cells['Corner Blade'] = openmc.Cell(name='Corner Blade')
cells['Corner Blade'].fill = materials['Steel']
cells['Corner Blade'].region = regions['Corner Blade']
# Water surrounding all other cells and our assembly
cells['Water Fill'] = openmc.Cell(name='Water Fill')
cells['Water Fill'].fill = materials['Water']
cells['Water Fill'].region = regions['Water Fill']
"""
Explanation: So far we have the rods and water within the assembly , but we still need the control blade and the water which fills the rest of the space. We will create those cells now
End of explanation
"""
# Create root Universe
universes['Root'] = openmc.Universe(name='root universe', universe_id=0)
universes['Root'].add_cells([cells['Assembly'], cells['Top Blade'],
cells['Corner Blade'], cells['Left Blade'],
cells['Water Fill']])
"""
Explanation: OpenMC requires that there is a "root" universe. Let us create our root universe and fill it with the cells just defined.
End of explanation
"""
universes['Root'].plot(origin=(length / 2., length / 2., 0.),
pixels=(500, 500), width=(length, length),
color_by='material',
colors={materials['Fuel']: (1., 0., 0.),
materials['Gad']: (1., 1., 0.),
materials['Zirc2']: (0.5, 0.5, 0.5),
materials['Water']: (0.0, 0.0, 1.0),
materials['B4C']: (0.0, 0.0, 0.0),
materials['Steel']: (0.4, 0.4, 0.4)})
"""
Explanation: What do you do after you create your model? Check it! We will use the plotting capabilities of the Python API to do this for us.
When doing so, we will coloring by material with fuel being red, gadolinia-fuel as yellow, zirc cladding as a light grey, water as blue, B4C as black and steel as a darker gray.
End of explanation
"""
# Create Geometry and set root universe
geometry = openmc.Geometry(universes['Root'])
# Export to "geometry.xml"
geometry.export_to_xml()
"""
Explanation: Looks pretty good to us!
We now must create a geometry that is assigned a root universe and export it to XML.
End of explanation
"""
# OpenMC simulation parameters
batches = 1000
inactive = 20
particles = 1000
# Instantiate a Settings object
settings_file = openmc.Settings()
settings_file.batches = batches
settings_file.inactive = inactive
settings_file.particles = particles
settings_file.output = {'tallies': False}
settings_file.verbosity = 4
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [pin_pitch, pin_pitch, 10, length - pin_pitch, length - pin_pitch, 10]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings_file.source = openmc.Source(space=uniform_dist)
# Export to "settings.xml"
settings_file.export_to_xml()
"""
Explanation: With the geometry and materials finished, we now just need to define simulation parameters, including how to run the model and what we want to learn from the model (i.e., define the tallies). We will start with our simulation parameters in the next block.
This will include setting the run strategy, telling OpenMC not to bother creating a tallies.out file, and limiting the verbosity of our output to just the header and results to not clog up our notebook with results from each batch.
End of explanation
"""
# Instantiate a 2-group EnergyGroups object
groups = openmc.mgxs.EnergyGroups()
groups.group_edges = np.array([0., 0.625, 20.0e6])
"""
Explanation: Create an MGXS Library
Now we are ready to generate multi-group cross sections! First, let's define a 2-group structure using the built-in EnergyGroups class.
End of explanation
"""
# Initialize a 2-group Isotropic MGXS Library for OpenMC
iso_mgxs_lib = openmc.mgxs.Library(geometry)
iso_mgxs_lib.energy_groups = groups
"""
Explanation: Next, we will instantiate an openmc.mgxs.Library for the energy groups with our the problem geometry. This library will use the default setting of isotropically-weighting the multi-group cross sections.
End of explanation
"""
# Specify multi-group cross section types to compute
iso_mgxs_lib.mgxs_types = ['total', 'absorption', 'nu-fission', 'fission',
'nu-scatter matrix', 'multiplicity matrix', 'chi']
"""
Explanation: Now, we must specify to the Library which types of cross sections to compute. OpenMC's multi-group mode can accept isotropic flux-weighted cross sections or angle-dependent cross sections, as well as supporting anisotropic scattering represented by either Legendre polynomials, histogram, or tabular angular distributions.
Just like before, we will create the following multi-group cross sections needed to run an OpenMC simulation to verify the accuracy of our cross sections: "total", "absorption", "nu-fission", '"fission", "nu-scatter matrix", "multiplicity matrix", and "chi".
"multiplicity matrix" is needed to provide OpenMC's multi-group mode with additional information needed to accurately treat scattering multiplication (i.e., (n,xn) reactions)) explicitly.
End of explanation
"""
# Instantiate a tally Mesh
mesh = openmc.RegularMesh()
mesh.dimension = [10, 10]
mesh.lower_left = [0., 0.]
mesh.upper_right = [length, length]
# Specify a "mesh" domain type for the cross section tally filters
iso_mgxs_lib.domain_type = "mesh"
# Specify the mesh over which to compute multi-group cross sections
iso_mgxs_lib.domains = [mesh]
"""
Explanation: Now we must specify the type of domain over which we would like the Library to compute multi-group cross sections. The domain type corresponds to the type of tally filter to be used in the tallies created to compute multi-group cross sections. At the present time, the Library supports "material" "cell", "universe", and "mesh" domain types.
For the sake of example we will use a mesh to gather our cross sections. This mesh will be set up so there is one mesh bin for every pin cell.
End of explanation
"""
# Set the scattering format to histogram and then define the number of bins
# Avoid a warning that corrections don't make sense with histogram data
iso_mgxs_lib.correction = None
# Set the histogram data
iso_mgxs_lib.scatter_format = 'histogram'
iso_mgxs_lib.histogram_bins = 11
"""
Explanation: Now we will set the scattering treatment that we wish to use.
In the mg-mode-part-ii notebook, the cross sections were generated with a typical P3 scattering expansion in mind. Now, however, we will use a more advanced technique: OpenMC will directly provide us a histogram of the change-in-angle (i.e., $\mu$) distribution.
Where as in the mg-mode-part-ii notebook, all that was required was to set the legendre_order attribute of mgxs_lib, here we have only slightly more work: we have to tell the Library that we want to use a histogram distribution (as it is not the default), and then tell it the number of bins.
For this problem we will use 11 bins.
End of explanation
"""
# Let's repeat all of the above for an angular MGXS library so we can gather
# that in the same continuous-energy calculation
angle_mgxs_lib = openmc.mgxs.Library(geometry)
angle_mgxs_lib.energy_groups = groups
angle_mgxs_lib.mgxs_types = ['total', 'absorption', 'nu-fission', 'fission',
'nu-scatter matrix', 'multiplicity matrix', 'chi']
angle_mgxs_lib.domain_type = "mesh"
angle_mgxs_lib.domains = [mesh]
angle_mgxs_lib.correction = None
angle_mgxs_lib.scatter_format = 'histogram'
angle_mgxs_lib.histogram_bins = 11
# Set the angular bins to 8
angle_mgxs_lib.num_azimuthal = 8
"""
Explanation: Ok, we made our isotropic library with histogram-scattering!
Now why don't we go ahead and create a library to do the same, but with angle-dependent MGXS. That is, we will avoid making the isotropic flux weighting approximation and instead just store a cross section for every polar and azimuthal angle pair.
To do this with the Python API and OpenMC, all we have to do is set the number of polar and azimuthal bins. Here we only need to set the number of bins, the API will convert all of angular space into equal-width bins for us.
Since this problem is symmetric in the z-direction, we only need to concern ourselves with the azimuthal variation here. We will use eight angles.
Ok, we will repeat all the above steps for a new library object, but will also set the number of azimuthal bins at the end.
End of explanation
"""
# Check the libraries - if no errors are raised, then the library is satisfactory.
iso_mgxs_lib.check_library_for_openmc_mgxs()
angle_mgxs_lib.check_library_for_openmc_mgxs()
"""
Explanation: Now that our libraries have been setup, let's make sure they contain the types of cross sections which meet the needs of OpenMC's multi-group solver. Note that this step is done automatically when writing the Multi-Group Library file later in the process (as part of the mgxs_lib.write_mg_library()), but it is a good practice to also run this before spending all the time running OpenMC to generate the cross sections.
End of explanation
"""
# Construct all tallies needed for the multi-group cross section library
iso_mgxs_lib.build_library()
angle_mgxs_lib.build_library()
"""
Explanation: Lastly, we use our two Library objects to construct the tallies needed to compute all of the requested multi-group cross sections in each domain.
We expect a warning here telling us that the default Legendre order is not meaningful since we are using histogram scattering.
End of explanation
"""
# Create a "tallies.xml" file for the MGXS Library
tallies_file = openmc.Tallies()
iso_mgxs_lib.add_to_tallies_file(tallies_file, merge=True)
angle_mgxs_lib.add_to_tallies_file(tallies_file, merge=True)
"""
Explanation: The tallies within the libraries can now be exported to a "tallies.xml" input file for OpenMC.
End of explanation
"""
# Instantiate tally Filter
mesh_filter = openmc.MeshFilter(mesh)
# Instantiate the Tally
tally = openmc.Tally(name='mesh tally')
tally.filters = [mesh_filter]
tally.scores = ['fission']
# Add tally to collection
tallies_file.append(tally, merge=True)
# Export all tallies to a "tallies.xml" file
tallies_file.export_to_xml()
"""
Explanation: In addition, we instantiate a fission rate mesh tally for eventual comparison of results.
End of explanation
"""
# Run OpenMC
openmc.run()
"""
Explanation: Time to run the calculation and get our results!
End of explanation
"""
# Move the StatePoint File
ce_spfile = './statepoint_ce.h5'
os.rename('statepoint.' + str(batches) + '.h5', ce_spfile)
# Move the Summary file
ce_sumfile = './summary_ce.h5'
os.rename('summary.h5', ce_sumfile)
"""
Explanation: To make the files available and not be over-written when running the multi-group calculation, we will now rename the statepoint and summary files.
End of explanation
"""
# Load the statepoint file, but not the summary file, as it is a different filename than expected.
sp = openmc.StatePoint(ce_spfile, autolink=False)
"""
Explanation: Tally Data Processing
Our simulation ran successfully and created statepoint and summary output files. Let's begin by loading the StatePoint file, but not automatically linking the summary file.
End of explanation
"""
su = openmc.Summary(ce_sumfile)
sp.link_with_summary(su)
"""
Explanation: In addition to the statepoint file, our simulation also created a summary file which encapsulates information about the materials and geometry. This is necessary for the openmc.Library to properly process the tally data. We first create a Summary object and link it with the statepoint. Normally this would not need to be performed, but since we have renamed our summary file to avoid conflicts with the Multi-Group calculation's summary file, we will load this in explicitly.
End of explanation
"""
# Initialize MGXS Library with OpenMC statepoint data
iso_mgxs_lib.load_from_statepoint(sp)
angle_mgxs_lib.load_from_statepoint(sp)
"""
Explanation: The statepoint is now ready to be analyzed. To create our libraries we simply have to load the tallies from the statepoint into each Library and our MGXS objects will compute the cross sections for us under-the-hood.
End of explanation
"""
# Allow the API to create our Library, materials, and geometry file
iso_mgxs_file, materials_file, geometry_file = iso_mgxs_lib.create_mg_mode()
# Tell the materials file what we want to call the multi-group library
materials_file.cross_sections = 'mgxs.h5'
# Write our newly-created files to disk
iso_mgxs_file.export_to_hdf5('mgxs.h5')
materials_file.export_to_xml()
geometry_file.export_to_xml()
"""
Explanation: The next step will be to prepare the input for OpenMC to use our newly created multi-group data.
Isotropic Multi-Group OpenMC Calculation
We will now use the Library to produce the isotropic multi-group cross section data set for use by the OpenMC multi-group solver.
If the model to be run in multi-group mode is the same as the continuous-energy mode, the openmc.mgxs.Library class has the ability to directly create the multi-group geometry, materials, and multi-group library for us.
Note that this feature is only useful if the MG model is intended to replicate the CE geometry - it is not useful if the CE library is not the same geometry (like it would be for generating MGXS from a generic spectral region).
This method creates and assigns the materials automatically, including creating a geometry which is equivalent to our mesh cells for which the cross sections were derived.
End of explanation
"""
# Set the energy mode
settings_file.energy_mode = 'multi-group'
# Export to "settings.xml"
settings_file.export_to_xml()
"""
Explanation: Next, we can make the changes we need to the settings file.
These changes are limited to telling OpenMC to run a multi-group calculation and provide the location of our multi-group cross section file.
End of explanation
"""
# Create a "tallies.xml" file for the MGXS Library
tallies_file = openmc.Tallies()
# Add our fission rate mesh tally
tallies_file.append(tally)
# Export to "tallies.xml"
tallies_file.export_to_xml()
"""
Explanation: Let's clear up the tallies file so it doesn't include all the extra tallies for re-generating a multi-group library
End of explanation
"""
geometry_file.root_universe.plot(origin=(length / 2., length / 2., 0.),
pixels=(300, 300), width=(length, length),
color_by='material')
"""
Explanation: Before running the calculation let's look at our meshed model. It might not be interesting, but let's take a look anyways.
End of explanation
"""
# Execute the Isotropic MG OpenMC Run
openmc.run()
"""
Explanation: So, we see a 10x10 grid with a different color for every material, sounds good!
At this point, the problem is set up and we can run the multi-group calculation.
End of explanation
"""
# Move the StatePoint File
iso_mg_spfile = './statepoint_mg_iso.h5'
os.rename('statepoint.' + str(batches) + '.h5', iso_mg_spfile)
# Move the Summary file
iso_mg_sumfile = './summary_mg_iso.h5'
os.rename('summary.h5', iso_mg_sumfile)
"""
Explanation: Before we go the angle-dependent case, let's save the StatePoint and Summary files so they don't get over-written
End of explanation
"""
# Let's repeat for the angle-dependent case
angle_mgxs_lib.load_from_statepoint(sp)
angle_mgxs_file, materials_file, geometry_file = angle_mgxs_lib.create_mg_mode()
angle_mgxs_file.export_to_hdf5()
"""
Explanation: Angle-Dependent Multi-Group OpenMC Calculation
Let's now run the calculation with the angle-dependent multi-group cross sections. This process will be the exact same as above, except this time we will use the angle-dependent Library as our starting point.
We do not need to re-write the materials, geometry, or tallies file to disk since they are the same as for the isotropic case.
End of explanation
"""
# Execute the angle-dependent OpenMC Run
openmc.run()
"""
Explanation: At this point, the problem is set up and we can run the multi-group calculation.
End of explanation
"""
# Load the isotropic statepoint file
iso_mgsp = openmc.StatePoint(iso_mg_spfile, autolink=False)
iso_mgsum = openmc.Summary(iso_mg_sumfile)
iso_mgsp.link_with_summary(iso_mgsum)
# Load the angle-dependent statepoint file
angle_mgsp = openmc.StatePoint('statepoint.' + str(batches) + '.h5')
"""
Explanation: Results Comparison
In this section we will compare the eigenvalues and fission rate distributions of the continuous-energy, isotropic multi-group and angle-dependent multi-group cases.
We will begin by loading the multi-group statepoint files, first the isotropic, then angle-dependent. The angle-dependent was not renamed, so we can autolink its summary.
End of explanation
"""
ce_keff = sp.k_combined
iso_mg_keff = iso_mgsp.k_combined
angle_mg_keff = angle_mgsp.k_combined
# Find eigenvalue bias
iso_bias = 1.0e5 * (ce_keff - iso_mg_keff)
angle_bias = 1.0e5 * (ce_keff - angle_mg_keff)
"""
Explanation: Eigenvalue Comparison
Next, we can load the eigenvalues for comparison and do that comparison
End of explanation
"""
print('Isotropic to CE Bias [pcm]: {0:1.1f}'.format(iso_bias.nominal_value))
print('Angle to CE Bias [pcm]: {0:1.1f}'.format(angle_bias.nominal_value))
"""
Explanation: Let's compare the eigenvalues in units of pcm
End of explanation
"""
sp_files = [sp, iso_mgsp, angle_mgsp]
titles = ['Continuous-Energy', 'Isotropic Multi-Group',
'Angle-Dependent Multi-Group']
fiss_rates = []
fig = plt.figure(figsize=(12, 6))
for i, (case, title) in enumerate(zip(sp_files, titles)):
# Get our mesh tally information
mesh_tally = case.get_tally(name='mesh tally')
fiss_rates.append(mesh_tally.get_values(scores=['fission']))
# Reshape the array
fiss_rates[-1].shape = mesh.dimension
# Normalize the fission rates
fiss_rates[-1] /= np.mean(fiss_rates[-1][fiss_rates[-1] > 0.])
# Set 0s to NaNs so they show as white
fiss_rates[-1][fiss_rates[-1] == 0.] = np.nan
fig = plt.subplot(1, len(titles), i + 1)
# Plot only the fueled regions
plt.imshow(fiss_rates[-1][1:-1, 1:-1], cmap='jet', origin='lower',
vmin=0.4, vmax=4.)
plt.title(title + '\nFission Rates')
"""
Explanation: We see a large reduction in error by switching to the usage of angle-dependent multi-group cross sections!
Of course, this rodded and partially voided BWR problem was chosen specifically to exacerbate the angular variation of the reaction rates (and thus cross sections). Such improvements should not be expected in every case, especially if localized absorbers are not present.
It is important to note that both eigenvalues can be improved by the application of finer geometric or energetic discretizations, but this shows that the angle discretization may be a factor for consideration.
Fission Rate Distribution Comparison
Next we will visualize the mesh tally results obtained from our three cases.
This will be performed by first obtaining the one-group fission rate tally information from our state point files. After we have this information we will re-shape the data to match the original mesh laydown. We will then normalize, and finally create side-by-side plots of all.
End of explanation
"""
# Calculate and plot the ratios of MG to CE for each of the 2 MG cases
ratios = []
fig, axes = plt.subplots(figsize=(12, 6), nrows=1, ncols=2)
for i, (case, title, axis) in enumerate(zip(sp_files[1:], titles[1:], axes.flat)):
# Get our ratio relative to the CE (in fiss_ratios[0])
ratios.append(np.divide(fiss_rates[i + 1], fiss_rates[0]))
# Plot only the fueled regions
im = axis.imshow(ratios[-1][1:-1, 1:-1], cmap='bwr', origin='lower',
vmin = 0.9, vmax = 1.1)
axis.set_title(title + '\nFission Rates Relative\nto Continuous-Energy')
# Add a color bar
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
"""
Explanation: With this colormap, dark blue is the lowest power and dark red is the highest power.
We see general agreement between the fission rate distributions, but it looks like there may be less of a gradient near the rods in the continuous-energy and angle-dependent MGXS cases than in the isotropic MGXS case.
To better see the differences, let's plot ratios of the fission powers for our two multi-group cases compared to the continuous-energy case t
End of explanation
"""
|
anhaidgroup/py_entitymatching | notebooks/guides/step_wise_em_guides/.ipynb_checkpoints/Selecting the Best Learning Matcher-checkpoint.ipynb | bsd-3-clause | # Import py_entitymatching package
import py_entitymatching as em
import os
import pandas as pd
# Set the seed value
seed = 0
!ls $datasets_dir
# Get the datasets directory
datasets_dir = em.get_install_path() + os.sep + 'datasets'
path_A = datasets_dir + os.sep + 'dblp_demo.csv'
path_B = datasets_dir + os.sep + 'acm_demo.csv'
path_labeled_data = datasets_dir + os.sep + 'labeled_data_demo.csv'
A = em.read_csv_metadata(path_A, key='id')
B = em.read_csv_metadata(path_B, key='id')
# Load the pre-labeled data
S = em.read_csv_metadata(path_labeled_data,
key='_id',
ltable=A, rtable=B,
fk_ltable='ltable_id', fk_rtable='rtable_id')
"""
Explanation: Introduction
This IPython notebook illustrates how to select the best learning based matcher. First, we need to import py_entitymatching package and other libraries as follows:
End of explanation
"""
# Split S into I an J
IJ = em.split_train_test(S, train_proportion=0.5, random_state=0)
I = IJ['train']
J = IJ['test']
"""
Explanation: Then, split the labeled data into development set and evaluation set. Use the development set to select the best learning-based matcher
End of explanation
"""
# Create a set of ML-matchers
dt = em.DTMatcher(name='DecisionTree', random_state=0)
svm = em.SVMMatcher(name='SVM', random_state=0)
rf = em.RFMatcher(name='RF', random_state=0)
lg = em.LogRegMatcher(name='LogReg', random_state=0)
ln = em.LinRegMatcher(name='LinReg')
"""
Explanation: Selecting the Best learning-based matcher
This, typically involves the following steps:
1. Creating a set of learning-based matchers
2. Creating features
3. Extracting feature vectors
4. Selecting the best learning-based matcher using k-fold cross validation
5. Debugging the matcher (and possibly repeat the above steps)
Creating a set of learning-based matchers
First, we need to create a set of learning-based matchers. The following matchers are supported in Magellan: (1) decision tree, (2) random forest, (3) naive bayes, (4) svm, (5) logistic regression, and (6) linear regression.
End of explanation
"""
# Generate a set of features
F = em.get_features_for_matching(A, B, validate_inferred_attr_types=False)
"""
Explanation: Creating features
Next, we need to create a set of features for the development set. Magellan provides a way to automatically generate features based on the attributes in the input tables. For the purposes of this guide, we use the automatically generated features.
End of explanation
"""
F.feature_name
"""
Explanation: We observe that there were 20 features generated. As a first step, lets say that we decide to use only 'year' related features.
End of explanation
"""
# Convert the I into a set of feature vectors using F
H = em.extract_feature_vecs(I,
feature_table=F,
attrs_after='label',
show_progress=False)
# Display first few rows
H.head()
# Check if the feature vectors contain missing values
# A return value of True means that there are missing values
any(pd.notnull(H))
"""
Explanation: Extracting feature vectors
In this step, we extract feature vectors using the development set and the created features.
End of explanation
"""
# Impute feature vectors with the mean of the column values.
H = em.impute_table(H,
exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'],
strategy='mean')
"""
Explanation: We observe that the extracted feature vectors contain missing values. We have to impute the missing values for the learning-based matchers to fit the model correctly. For the purposes of this guide, we impute the missing value in a column with the mean of the values in that column.
End of explanation
"""
# Select the best ML matcher using CV
result = em.select_matcher([dt, rf, svm, ln, lg], table=H,
exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'],
k=5,
target_attr='label', metric_to_select_matcher='f1', random_state=0)
result['cv_stats']
result['drill_down_cv_stats']['precision']
result['drill_down_cv_stats']['recall']
result['drill_down_cv_stats']['f1']
"""
Explanation: Selecting the best matcher using cross-validation
Now, we select the best matcher using k-fold cross-validation. For the purposes of this guide, we use five fold cross validation and use 'precision' metric to select the best matcher.
End of explanation
"""
# Split H into P and Q
PQ = em.split_train_test(H, train_proportion=0.5, random_state=0)
P = PQ['train']
Q = PQ['test']
# Debug RF matcher using GUI
em.vis_debug_rf(rf, P, Q,
exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'],
target_attr='label')
# Add a feature to do Jaccard on title + authors and add it to F
# Create a feature declaratively
sim = em.get_sim_funs_for_matching()
tok = em.get_tokenizers_for_matching()
feature_string = """jaccard(wspace((ltuple['title'] + ' ' + ltuple['authors']).lower()),
wspace((rtuple['title'] + ' ' + rtuple['authors']).lower()))"""
feature = em.get_feature_fn(feature_string, sim, tok)
# Add feature to F
em.add_feature(F, 'jac_ws_title_authors', feature)
# Convert I into feature vectors using updated F
H = em.extract_feature_vecs(I,
feature_table=F,
attrs_after='label',
show_progress=False)
# Check whether the updated F improves X (Random Forest)
result = em.select_matcher([rf], table=H,
exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'],
k=5,
target_attr='label', metric_to_select_matcher='f1', random_state=0)
result['drill_down_cv_stats']['f1']
# Select the best matcher again using CV
result = em.select_matcher([dt, rf, svm, ln, lg], table=H,
exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'],
k=5,
target_attr='label', metric_to_select_matcher='f1', random_state=0)
result['cv_stats']
result['drill_down_cv_stats']['f1']
"""
Explanation: Debug X (Random Forest)
End of explanation
"""
|
barjacks/foundations-homework | 07/.ipynb_checkpoints/07_Introduction_to_Pandas-checkpoint.ipynb | mit | # import pandas, but call it pd. Why? Because that's What People Do.
import pandas as pd
"""
Explanation: An Introduction to pandas
Pandas! They are adorable animals. You might think they are the worst animal ever but that is not true. You might sometimes think pandas is the worst library every, and that is only kind of true.
The important thing is use the right tool for the job. pandas is good for some stuff, SQL is good for some stuff, writing raw Python is good for some stuff. You'll figure it out as you go along.
Now let's start coding. Hopefully you did pip install pandas before you started up this notebook.
End of explanation
"""
# We're going to call this df, which means "data frame"
# It isn't in UTF-8 (I saved it from my mac!) so we need to set the encoding
df = pd.read_csv("NBA-Census-10.14.2013.csv", encoding ="mac_roman")
#this is a data frame (df)
"""
Explanation: When you import pandas, you use import pandas as pd. That means instead of typing pandas in your code you'll type pd.
You don't have to, but every other person on the planet will be doing it, so you might as well.
Now we're going to read in a file. Our file is called NBA-Census-10.14.2013.csv because we're sports moguls. pandas can read_ different types of files, so try to figure it out by typing pd.read_ and hitting tab for autocomplete.
End of explanation
"""
# Let's look at all of it
df
"""
Explanation: A dataframe is basically a spreadsheet, except it lives in the world of Python or the statistical programming language R. They can't call it a spreadsheet because then people would think those programmers used Excel, which would make them boring and normal and they'd have to wear a tie every day.
Selecting rows
Now let's look at our data, since that's what data is for
End of explanation
"""
# Look at the first few rows
df.head() #shows first 5 rows
"""
Explanation: If we scroll we can see all of it. But maybe we don't want to see all of it. Maybe we hate scrolling?
End of explanation
"""
# Let's look at MORE of the first few rows
df.head(10)
"""
Explanation: ...but maybe we want to see more than a measly five results?
End of explanation
"""
# Let's look at the final few rows
df.tail(4)
"""
Explanation: But maybe we want to make a basketball joke and see the final four?
End of explanation
"""
# Show the 6th through the 8th rows
df[5:8]
"""
Explanation: So yes, head and tail work kind of like the terminal commands. That's nice, I guess.
But maybe we're incredibly demanding (which we are) and we want, say, the 6th through the 8th row (which we do). Don't worry (which I know you were), we can do that, too.
End of explanation
"""
# Get the names of the columns, just because
#columns_we_want = ['Name', 'Age']
#df[columns_we_want]
# If we want to be "correct" we add .values on the end of it
df.columns
# Select only name and age
# Combing that with .head() to see not-so-many rows
columns_we_want = ['Name', 'Age']
df[columns_we_want].head()
# We can also do this all in one line, even though it starts looking ugly
# (unlike the cute bears pandas looks ugly pretty often)
df[['Name', 'Age',]].head()
"""
Explanation: It's kind of like an array, right? Except where in an array we'd say df[0] this time we need to give it two numbers, the start and the end.
Selecting columns
But jeez, my eyes don't want to go that far over the data. I only want to see, uh, name and age.
End of explanation
"""
df.head()
"""
Explanation: NOTE: That was not df['Name', 'Age'], it was df[['Name', 'Age']]. You'll definitely type it wrong all of the time. When things break with pandas it's probably because you forgot to put in a million brackets.
Describing your data
A powerful tool of pandas is being able to select a portion of your data, because who ordered all that data anyway.
End of explanation
"""
# Grab the POS column, and count the different values in it.
df['POS'].value_counts()
"""
Explanation: I want to know how many people are in each position. Luckily, pandas can tell me!
End of explanation
"""
#race
race_counts = df['Race'].value_counts()
race_counts
# Summary statistics for Age
df['Age'].describe()
df.describe()
# That's pretty good. Does it work for everything? How about the money?
df['2013 $'].describe()
#The result is the result, because the Money is a string.
"""
Explanation: Now that was a little weird, yes - we used df['POS'] instead of df[['POS']] when viewing the data's details.
But now I'm curious about numbers: how old is everyone? Maybe we could, I don't know, get some statistics about age? Some statistics to describe age?
End of explanation
"""
# Doing more describing
df['Ht (In.)'].describe()
"""
Explanation: Unfortunately because that has dollar signs and commas it's thought of as a string. We'll fix it in a second, but let's try describing one more thing.
End of explanation
"""
# Take another look at our inches, but only the first few
df['Ht (In.)'].head()
# Divide those inches by 12
#number_of_inches = 300
#number_of_inches / 12
df['Ht (In.)'].head() / 12
# Let's divide ALL of them by 12
df['Ht (In.)'] / 12
# Can we get statistics on those?
height_in_feet = df['Ht (In.)'] / 12
height_in_feet.describe()
# Let's look at our original data again
df.head(3)
"""
Explanation: That's stupid, though, what's an inch even look like? What's 80 inches? I don't have a clue. If only there were some wa to manipulate our data.
Manipulating data
Oh wait there is, HA HA HA.
End of explanation
"""
# Store a new column
df['feet'] = df['Ht (In.)'] / 12
df.head()
"""
Explanation: Okay that was nice but unfortunately we can't do anything with it. It's just sitting there, separate from our data. If this were normal code we could do blahblah['feet'] = blahblah['Ht (In.)'] / 12, but since this is pandas, we can't. Right? Right?
End of explanation
"""
# Can't just use .replace
# Need to use this weird .str thing
# Can't just immediately replace the , either
# Need to use the .str thing before EVERY string method
# Describe still doesn't work.
# Let's convert it to an integer using .astype(int) before we describe it
# Maybe we can just make them millions?
# Unfortunately one is "n/a" which is going to break our code, so we can make n/a be 0
# Remove the .head() piece and save it back into the dataframe
"""
Explanation: That's cool, maybe we could do the same thing with their salary? Take out the $ and the , and convert it to an integer?
End of explanation
"""
# This is just the first few guys in the dataset. Can we order it?
# Let's try to sort them, ascending value
df.sort_values('feet')
"""
Explanation: The average basketball player makes 3.8 million dollars and is a little over six and a half feet tall.
But who cares about those guys? I don't care about those guys. They're boring. I want the real rich guys!
Sorting and sub-selecting
End of explanation
"""
# It isn't descending = True, unfortunately
df.sort_values('feet', ascending=False).head()
# We can use this to find the oldest guys in the league
df.sort_values('Age', ascending=False).head()
# Or the youngest, by taking out 'ascending=False'
df.sort_values('feet').head()
"""
Explanation: Those guys are making nothing! If only there were a way to sort from high to low, a.k.a. descending instead of ascending.
End of explanation
"""
# Get a big long list of True and False for every single row.
df['feet'] > 6.5
# We could use value counts if we wanted
above_or_below_six_five = df['feet'] > 6.5
above_or_below_six_five.value_counts()
# But we can also apply this to every single row to say whether YES we want it or NO we don't
# Instead of putting column names inside of the brackets, we instead
# put the True/False statements. It will only return the players above
# seven feet tall
df[df['feet'] > 6.5]
df['Race'] == 'Asian'
df[]
# Or only the guards
df['POS'] == 'G'.head()
#People below 6 feet
df['feet'] < 6.5
#Every column you ant to query needs parenthesis aroung it
#Guards that are higher than 6.5
#this is combination of both
df[(df['POS'] == 'G') & (df['feet'] < 6.5)].head()
#We can save stuff
centers = df[df['POS'] == 'C']
guards = df[df['POS'] == 'G']
centers['feet'].describe()
guards['feet'].describe()
# It might be easier to break down the booleans into separate variables
# We can save this stuff
# Maybe we can compare them to taller players?
"""
Explanation: But sometimes instead of just looking at them, I want to do stuff with them. Play some games with them! Dunk on them~ describe them! And we don't want to dunk on everyone, only the players above 7 feet tall.
First, we need to check out boolean things.
End of explanation
"""
!pip install matplotlib
# This will scream we don't have matplotlib.
df['feet'].hist()
"""
Explanation: Drawing pictures
Okay okay enough code and enough stupid numbers. I'm visual. I want graphics. Okay????? Okay.
End of explanation
"""
%matplotlib inline
df['feet'].hist()
# this will open up a weird window that won't do anything
import matplot.
# So instead you run this code
plt.style.use('fivethirtyeight')
df['feet'].hist()
"""
Explanation: matplotlib is a graphing library. It's the Python way to make graphs!
End of explanation
"""
# Import matplotlib
# What's available?
# Use ggplot
# Make a histogram
# Try some other styles
"""
Explanation: But that's ugly. There's a thing called ggplot for R that looks nice. We want to look nice. We want to look like ggplot.
End of explanation
"""
# Pass in all sorts of stuff!
# Most from http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.hist.html
# .range() is a matplotlib thing
"""
Explanation: That might look better with a little more customization. So let's customize it.
End of explanation
"""
# How does experience relate with the amount of money they're making?
# At least we can assume height and weight are related
# At least we can assume height and weight are related
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html
# We can also use plt separately
# It's SIMILAR but TOTALLY DIFFERENT
"""
Explanation: I want more graphics! Do tall people make more money?!?!
End of explanation
"""
|
ocelot-collab/ocelot | demos/ipython_tutorials/4_wake.ipynb | gpl-3.0 | # the output of plotting commands is displayed inline within frontends,
# directly below the code cell that produced it
%matplotlib inline
# this python library provides generic shallow (copy) and deep copy (deepcopy) operations
from copy import deepcopy
import time
# import from Ocelot main modules and functions
from ocelot import *
# import from Ocelot graphical modules
from ocelot.gui.accelerator import *
# load beam distribution
# this function convert Astra beam distribution to Ocelot format
# - ParticleArray. ParticleArray is designed for tracking.
# in order to work with converters we have to import
# specific module from ocelot.adaptors
from ocelot.adaptors.astra2ocelot import *
"""
Explanation: This notebook was created by Sergey Tomin (sergey.tomin@desy.de) and Igor Zagorodnov for Workshop: Designing future X-ray FELs. Source and license info is on GitHub. Updated September 2020.
Tutorial N4. Wakefields.
Wake table format
Wakefields of a Beam near a Single Plate in a Flat Dechirper
Crosscheck with analytics: Dipole kick
Chirper.
Influence of corrugated structure on the electron beam.
This example based on the work: I. Zagorodnov, G. Feng, T. Limberg. Corrugated structure insertion for extending the SASE bandwidth up to 3% at the European XFEL.
Gerometry of the corrugated structure. The blue ellipse represents an electron beam
propagating along the z axis.
<img src="4_corrugated_str.png" />
Wakefields
In order to take into account the impact of the wake field on the beam the longitudinal wake function
of point charge through the second order Taylor expansion is used.
In general case it uses 13 one-dimensional functions to represent the longitudinal component of the wake
function for arbitrary sets of the source and the wittness particles near to the reference axis.
The wake field impact on the beam is included as series of kicks.
The implementation of the wakefields follows closely the approach described
in:
* O. Zagorodnova, T. Limberg, Impedance budget database for the European XFEL,
in Proceedings of 2009 Particle Accelerator Conference,(Vancouver, Canada, 2009)
* M. Dohlus, K. Floettmann, C. Henning, Fast particle tracking with wake
fields, Report No. DESY 12-012, 2012.
This example will cover the following topics:
Initialization of the wakes and the places of their applying
tracking of second order with wakes
Requirements
beam_chirper.ast - input file, initial beam distribution in ASTRA format (was obtained from s2e simulation performed with ASTRA and CSRtrack).
wake_vert_1m.txt - wake table of the vertical corrugated structure (was calculated with ECHO)
wake_hor_1m.txt - wake table of the vertical corrugated structure (was calculated with ECHO)
Wake Table format
We use the same format of the wakes as implemented in ASTRA and the description of the format can be found in M. Dohlus, K. Floettmann, C. Henning, Fast particle tracking with wake
fields, Report No. DESY 12-012, 2012.
Second order Taylor expansion of the longitudinal wake ($w_z$) in the transverse coordinates
$$
w_z(x_s, y_s, x_o, y_o, s) =
\begin{bmatrix}
1 \
x_s\
y_s\
x_o \
y_o
\end{bmatrix}^T
\begin{bmatrix}
h_{00}(s) & h_{01}(s) & h_{02}(s) & h_{03}(s) & h_{04}(s) \
0 & h_{11}(s) & h_{12}(s) & h_{13}(s) & h_{14}(s)\
0 & h_{12}(s) & -h_{11}(s) & h_{23}(s) & h_{24}(s) \
0 & h_{13}(s) & h_{23}(s) & h_{33}(s) & h_{34}(s)\
0 & h_{14}(s) & h_{24}(s) & h_{34}(s) & -h_{33}(s)
\end{bmatrix}
\begin{bmatrix}
1 \
x_s\
y_s\
x_o \
y_o
\end{bmatrix} ;
$$
where $x_s$ and $y_s$ transverse coordinates of the source particle and $x_o$ and $y_o$ are transverse coordinates of the observer, $s$ is distance between source and observer. Thus to describe longitudinal wake we need 13 functions $h_{\alpha \beta}$.
The transverse components are uniquely related to the longitudinal wake by causality and Panofsky-Wenzel-Theorem.
For each of these coefficients, we use the representation in O. Zagorodnova, T. Limberg, Impedance budget database for the European XFEL
\begin{equation}
h(s) = w_0(s) + \frac{1}{C} + R c\delta(s) + c\frac{\partial}{\partial s}\left[L c \delta(s) + w_1 (s) \right]
\end{equation}
where $w_0(s)$, $w_1(s)$ are nonsingular functions, which can be tabulated easily, and constants $R$, $L$, and $C$ have the meaning of resistivity, inductance, and capacitance, correspondingly.
The functions $w_0(s)$, $w_1(s)$ can be represented by table, e.g. [$s_i$, $w_0^i$].
Now we can describe whole table how it is saved in a file.
$N_h$| $0$
--------------|-----------------
$N_{w_0}$ | $N_{w_1}$
$R,\: [Us]$ | $L,\: [Us^2]$
$C,\: [1/Us]$ | $10\alpha + \beta$
$s_1,\: [m]$ | $w_0(s_1),\: [U]$
$s_2,\: [m]$ |$w_0(s_2),\: [U]$
... | ...
$s_{N_{w_0}},\: [m]$ | $w_0(s_{N_{w_0}}),\: [U]$
$s_1,\: [m]$ | $w_1(s_1),\: [U]$
$s_2,\: [m]$ |$w_1(s_2),\: [U]$
... | ...
$s_{N_{w_1}},\: [m]$ | $w_1(s_{N_{w_1}}),\: [U]$
$N_{w_0}$ | $N_{w_1}$
$R,\: [Us]$ | $L,\: [Us^2]$
$C,\: [1/Us]$ | $10\alpha + \beta$
$s_1,\: [m]$ | $w_0(s_1),\: [U]$
... | ...
$N_{w_0}$ | $N_{w_1}$
$R,\: [Us]$ | $L,\: [Us^2]$
$C,\: [1/Us]$ | $10\alpha + \beta$
$s_1,\: [m]$ | $w_0(s_1),\: [U]$
... | ...
<img width=150/>|<img width=150/>
In the very first line, $N_h$ is number of $h_{\alpha\beta}(s)$ functions in the table. After that, a typical table repeated $N_h$ times describing every $h_{\alpha\beta}(s)$ function.
Every table starts with $N_{w_0}$ and $N_{w_1}$ which are number of points of $w_0(s_i)$ and $w_1(s_i)$ functions.
Next two lines are included $R$, $L$, $C$ and entry $10\alpha + \beta$ which describes the subscript of the auxiliary function $h_{\alpha\beta}(s)$. Next $N_{w_0}$ lines described function $w_0(s)$, and after that next $N_{w_1}$ lines described function $w_1(s)$.
And to describe next $h_{\alpha\beta}(s)$ we repeat procedure.
The unit $U$ is $V/(A\cdot s)$ for $\alpha\beta = 00$, $V/(A\cdot s \cdot m)$ for $\alpha\beta = 01, ... 04$ and $V/(A\cdot s \cdot m^2)$ for all other coefficients.
Import of modules
End of explanation
"""
D00m25 = Drift(l = 0.25)
D01m = Drift(l = 1)
D02m = Drift(l = 2)
# Create markers for defining places of the wakes applying
w1_start = Marker()
w1_stop = Marker()
w2_start = Marker()
w2_stop = Marker()
w3_start = Marker()
w3_stop = Marker()
w4_start = Marker()
w4_stop = Marker()
w5_start = Marker()
w5_stop = Marker()
w6_start = Marker()
w6_stop = Marker()
# quadrupoles
Q1 = Quadrupole(l = 0.5, k1 = 0.215)
# lattice
lattice = (D01m, w1_start, D02m, w1_stop, w2_start, D02m, w2_stop,
w3_start, D02m, w3_stop, D00m25, Q1, D00m25,
w4_start, D02m, w4_stop, w5_start, D02m, w5_stop,
w6_start, D02m, w6_stop, D01m)
# creation MagneticLattice
method = MethodTM()
method.global_method = SecondTM
lat = MagneticLattice(lattice, method=method)
# calculate twiss functions with initial twiss parameters
tws0 = Twiss()
tws0.E = 14 # in GeV
tws0.beta_x = 22.5995
tws0.beta_y = 22.5995
tws0.alpha_x = -1.4285
tws0.alpha_y = 1.4285
tws = twiss(lat, tws0, nPoints=None)
# ploting twiss paramentrs.
plot_opt_func(lat, tws, top_plot=["Dx"], fig_name="i1", legend=False)
plt.show()
"""
Explanation: Layout of the corrugated structure insertion. Create Ocelot lattice <img src="4_layout.png" />
End of explanation
"""
# load and convert ASTRA file to OCELOT beam distribution
# p_array_init = astraBeam2particleArray(filename='beam_chirper.ast')
# save ParticleArray to compresssed numpy array
# save_particle_array("chirper_beam.npz", p_array_init)
p_array_init = load_particle_array("chirper_beam.npz")
plt.plot(-p_array_init.tau()*1000, p_array_init.p(), "r.")
plt.grid(True)
plt.xlabel(r"$\tau$, mm")
plt.ylabel(r"$\frac{\Delta E}{E}$")
plt.show()
"""
Explanation: Load beam file
End of explanation
"""
from ocelot.cpbd.wake3D import *
# load wake tables of corrugated structures
wk_vert = WakeTable('wake_vert_1m.txt')
wk_hor = WakeTable('wake_hor_1m.txt')
# creation of wake object with parameters
wake_v1 = Wake()
# w_sampling - defines the number of the equidistant sampling points for the one-dimensional
# wake coefficients in the Taylor expansion of the 3D wake function.
wake_v1.w_sampling = 500
wake_v1.wake_table = wk_vert
wake_v1.step = 1 # step in Navigator.unit_step, dz = Navigator.unit_step * wake.step [m]
wake_h1 = Wake()
wake_h1.w_sampling = 500
wake_h1.wake_table = wk_hor
wake_h1.step = 1
wake_v2 = deepcopy(wake_v1)
wake_h2 = deepcopy(wake_h1)
wake_v3 = deepcopy(wake_v1)
wake_h3 = deepcopy(wake_h1)
"""
Explanation: Initialization of the wakes and the places of their applying
End of explanation
"""
navi = Navigator(lat)
# add physics proccesses
navi.add_physics_proc(wake_v1, w1_start, w1_stop)
navi.add_physics_proc(wake_h1, w2_start, w2_stop)
navi.add_physics_proc(wake_v2, w3_start, w3_stop)
navi.add_physics_proc(wake_h2, w4_start, w4_stop)
navi.add_physics_proc(wake_v3, w5_start, w5_stop)
navi.add_physics_proc(wake_h3, w6_start, w6_stop)
# definiing unit step in [m]
navi.unit_step = 0.2
# deep copy of the initial beam distribution
p_array = deepcopy(p_array_init)
print("tracking with Wakes .... ")
start = time.time()
tws_track, p_array = track(lat, p_array, navi)
print("\n time exec:", time.time() - start, "sec")
"""
Explanation: Add the wakes in the lattice
Navigator defines step (dz) of tracking and which, if it exists, physical process will be applied on each step.
In order to add collective effects (Space charge, CSR or wake) method add_physics_proc() must be run.
Method:
* Navigator.add_physics_proc(physics_proc, elem1, elem2)
- physics_proc - physics process, can be CSR, SpaceCharge or Wake,
- elem1 and elem2 - first and last elements between which the physics process will be applied.
Also must be define unit_step in [m] (by default 1 m). unit_step is minimal step of tracking for any collective effect.
For each collective effect must be define number of unit_steps so step of applying physics process will be
dz = unit_step*step [m]
End of explanation
"""
tau0 = p_array_init.tau()
p0 = p_array_init.p()
tau1 = p_array.tau()
p1 = p_array.p()
print(len(p1))
plt.figure(1)
plt.plot(-tau0*1000, p0, "r.", -tau1*1000, p1, "b.")
plt.legend(["before", "after"], loc=4)
plt.grid(True)
plt.xlabel(r"$\tau$, mm")
plt.ylabel(r"$\frac{\Delta E}{E}$")
plt.show()
"""
Explanation: Longitudinal beam distribution
End of explanation
"""
# by default the beam head on the left side
show_e_beam(p_array, figsize=(8,6))
plt.show()
# plotting twiss parameters.
plot_opt_func(lat, tws_track, top_plot=["Dx"], fig_name="i1", legend=False)
plt.show()
"""
Explanation: Beam distribution
End of explanation
"""
# create a simple lattice MagneticLattice
m1 = Marker()
m2 = Marker()
# quadrupoles
Q1 = Quadrupole(l = 0.5, k1 = 0.215)
lattice = (Drift(l=1), m1, Drift(l=1), m2, Drift(l=2), Q1, Drift(l=2))
method = MethodTM()
method.global_method = SecondTM
lat = MagneticLattice(lattice, method=method)
"""
Explanation: Wakefields of a Beam near a Single Plate in a Flat Dechirper
For some FEL applications, e.g. a two-color scheme, only one flat corrugated structure can be used to get a correlated transverse kick along the electron bunch. In that case, we can use analytical approach from I. Zagorodnov, G. Feng, T. Limberg. Corrugated structure insertion for extending the SASE bandwidth up to 3% at the European XFEL and K. Bane, G. Stupakov, and I. Zagorodnov, Wakefields of a Beam near a Single Plate in a Flat Dechirper to calculate described above the wakefield tables.
<div class="alert alert-block alert-warning">
<b>Note:</b> Due to the use of assumptions in the analytical approach, a transverse kick is infinite if the electron beam distance to the plate wall is zero. </div>
End of explanation
"""
# description of args can be also be shown with Shift+Tab
sigma = np.std(p_array.tau())
print("RMS long beam size: ", sigma * 1e6, " um")
wk_tv_kick = WakeTableDechirperOffAxis(b=500*1e-6, # distance from the plate in [m]
a=0.01, # half gap between plates in [m]
width=0.02, # width of the corrugated structure in [m]
t=0.25*1e-3, # longitudinal gap in [m]
p=0.5*1e-3, # period of corrugation in [m]
length=1, # length of the corrugated structure in [m]
sigma=12e-6, # characteristic (rms) longitudinal beam size in [m]
orient="horz") # "horz" or "vert" plate orientation
# creation of wake object with parameters
wake = Wake()
# w_sampling - defines the number of the equidistant sampling points for the one-dimensional
# wake coefficients in the Taylor expansion of the 3D wake function.
wake.w_sampling = 500
wake.wake_table = wk_tv_kick
wake.step = 1 # step in Navigator.unit_step, dz = Navigator.unit_step * wake.step [m]
navi = Navigator(lat)
# add physics proccesses
navi.add_physics_proc(wake, m1, m2)
"""
Explanation: Describe corrugated structure and add wake to the lattice
End of explanation
"""
# deep copy of the initial beam distribution
p_array = deepcopy(p_array_init)
print("tracking with Wakes .... ")
start = time.time()
tws_track, p_array = track(lat, p_array, navi)
print("\n time exec:", time.time() - start, "sec")
# by default the beam head on the left side
show_e_beam(p_array, figsize=(8,6))
plt.show()
"""
Explanation: Track the beam through the lattice
End of explanation
"""
p = 0.5e-3 # period
t = 0.25e-3 # Longitudinal gap
b = 500e-6 # Distance from the plate
alpha = 1 - 0.465 * np.sqrt(t/p) - 0.07 * t/p
print("alpha = ", alpha)
s0 = 8*b**2 * t / (9 * np.pi * alpha**2 * p**2)
print("s0 = ", s0*1e6, " [um]")
"""
Explanation: Crosscheck with analytics: Dipole kick
We are going to crosscheck our current implementation with formulas from K. Bane, G. Stupakov, and I. Zagorodnov, Wakefields of a Beam near a Single Plate in a Flat Dechirper:
$$
w_{yd}(s) = \frac{2}{b^3}s_{0yd} \left[ 1 - \left(1 + \sqrt{\frac{s}{s_{0yd}}}\right) e^{- \sqrt{s/s_{0yd}}} \right]
$$
with
$$
s_{0yd} = 8b^2t/(9\pi \alpha^2 p^2)
$$
Let's take the same parameters from example above:
Period, p = 0.5 mm
Longitudinal gap, t = 0.25 mm
Distance from the plate, b = 500 $\mu$m
$\alpha = 1 - 0.465 \sqrt{t/p} - 0.07 t/p$
End of explanation
"""
s = np.linspace(0, 100, num=100) *1e-6
w = lambda s: 2./b**3 * s0 * (1 - (1 + np.sqrt(s/s0)) * np.exp(- np.sqrt(s/s0))) * Z0 * speed_of_light / (4 * np.pi)
MV = 1e6
nC = 1e-9
plt.plot(s*1e6, np.array([w(si) for si in s])*nC/MV )
plt.xlabel("s [um]")
plt.ylabel("Wd [MV/(nC m)]")
plt.show()
"""
Explanation: Dipole wake
End of explanation
"""
def convolve_beam(current, wake):
"""
convolve wake with beam current
:param current: current[:, 0] - s in [m], current[:, 1] - current in [A]. The beam head is on the left
:param wake: wake function in form: wake(s)
:return: wake_kick[:, 0] - s in [m], wake_kick[:, 1] - V
"""
s_shift = current[0, 0]
current[:, 0] -= s_shift
s = current[:, 0]
step = (s[-1] - s[0]) / (len(s) - 1)
q = current[:, 1] / speed_of_light
w = np.array([wake(si) for si in s])
wake = np.convolve(q, w) * step
s_new = np.cumsum(np.ones(len(wake))) * step
wake_kick = np.vstack((s_new, wake))
return wake_kick.T
I = s_to_cur(p_array.tau(), 0.01 * np.std(p_array.tau()), np.sum(p_array.q_array), speed_of_light)
dipole_kick = convolve_beam(I, w)
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('s [mm]')
ax1.set_ylabel('Wake [MV]', color=color)
ax1.plot(dipole_kick[:, 0] * 1e3, dipole_kick[:, 1] * 1e-6, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('I [kA]', color=color)
ax2.plot(I[:, 0] * 1e3, I[:, 1] * 1e-3, color=color)
ax2.tick_params(axis='y', labelcolor=color)
plt.show()
length = 1 # corrugated structure in [m]
p_array = deepcopy(p_array_init)
z = p_array.tau()
ind_z_sort = np.argsort(z)
z_sort = z[ind_z_sort]
wd = np.interp(z_sort - z_sort[0], dipole_kick[:, 0], dipole_kick[:, 1])
delta_E_y = wd * 1e-9 * length
pc_ref = np.sqrt(p_array.E ** 2 / m_e_GeV ** 2 - 1) * m_e_GeV
delta_py = delta_E_y / pc_ref
p_array.rparticles[3][ind_z_sort] += delta_py
show_density(p_array.tau() * 1e3, p_array.py() * 1e3, ax=None, nbins_x=250, nbins_y=250,
interpolation="bilinear", xlabel="s [mm]", ylabel='py [mrad]', nfig=50,
title="Side view", figsize=None, grid=False)
plt.show()
p_array = deepcopy(p_array_init)
wk_tv_kick = WakeTableDechirperOffAxis(b=b, # distance from the plate in [m]
a=0.01, # half gap between plates in [m]
width=0.02, # width of the corrugated structure in [m]
t=t, # longitudinal gap in [m]
p=p, # period of corrugation in [m]
length=1, # length of the corrugated structure in [m]
sigma=12e-6, # characteristic (rms) longitudinal beam size in [m]
orient="horz") # "horz" or "vert" plate orientation
# creation of wake object with parameters
wake = Wake()
# w_sampling - defines the number of the equidistant sampling points for the one-dimensional
# wake coefficients in the Taylor expansion of the 3D wake function.
wake.w_sampling = 500
wake.wake_table = wk_tv_kick
wake.step = 1 # step in Navigator.unit_step, dz = Navigator.unit_step * wake.step [m]
wake.prepare(None)
wake.s_start = 0
wake.s_stop = 1
wake.apply(p_array, dz=1)
show_density(p_array.tau() * 1e6, p_array.py() * 1e3, ax=None, nbins_x=250, nbins_y=250,
interpolation="bilinear", xlabel="s [um]", ylabel='py [mrad]', nfig=60,
title="Side view", figsize=None, grid=False)
plt.show()
"""
Explanation: Convolution wake with the beam current
End of explanation
"""
|
ContinuumIO/nbpresent | notebooks/Importing revealjs themes.ipynb | bsd-3-clause | from os.path import join, basename, splitext, abspath
from glob import glob
import re
from pprint import pprint
from collections import defaultdict
from copy import deepcopy
import json
from uuid import uuid4
import colour
import jinja2
import yaml
from IPython.display import Javascript, display, Markdown
"""
Explanation: Building Themes from (S)CSS
nbpresent themes are a simple data structure with a lot of flexibility. They reflect a data-driven view of style, and make some use of references, as for colors. More work needs to be done, especially surrounding reuse!
In this notebook, we look at the themes from revealjs which has provided so many inspirations for nbpresent.
End of explanation
"""
var_re = r"^(\$.*)\s*:\s*(.*);"
"""
Explanation: Extract SCSS variables
The revealjs themes are written in SCSS on a base template. Great! With one little regular expression we can pull out variable names, and have a pretty good idea of what they mean.
End of explanation
"""
sizes = {
"38px": 6,
"36px": 5,
"30px": 4,
"1.0em": 3,
"1.00em": 3,
"1.3em": 3.5,
"1.55em": 3.75,
"1.6em": 4,
"2.11em": 5,
"2.5em": 5.25,
"3.77em": 7
}
fonts = {
"League Gothic": "Oswald",
"Palatino Linotype": "EB Garamond"
}
"""
Explanation: Provide alternates
All sizes in nbpresent are specified in rem, a device-aware measure that helps avoid surprises. reveal uses a selection of measures, mostly based on em. I figured these out just by trying it, but there may indeed be a more elegant and robust way to calculate these.
End of explanation
"""
def update_headings(rules, directives):
for i in range(1, 8):
rules["h{}".format(i)].update(directives)
"""
Explanation: Headings
Headings end up being really important, but are pretty verbose. While it's theoretically possible to use traditional CSS selectors like h1, h2, h3{...}, for the time being we store them all verbosely.
End of explanation
"""
def pretty(theme):
display(Markdown("""```yaml\n{}\n```""".format(
yaml.safe_dump(yaml.safe_load(json.dumps(theme))))))
"""
Explanation: Reading themes
While a bit verbose, they can be quite readable.
End of explanation
"""
def make_theme(scss, theme_id=None, theme=None):
theme = deepcopy(theme) or {}
theme["id"] = theme_id = theme_id or str(uuid4())
palette = theme["palette"] = theme.get("palette", {})
backgrounds = theme["backgrounds"] = theme.get("backgrounds", {})
rules = theme["rules"] = theme.get("rules", defaultdict(dict))
base = theme["text-base"] = rules["p"] = rules["li"] = theme.get("text-base", {"font-size": 3})
all_vars = dict([
(name, val)
for name, val in
re.findall(var_re, scss, flags=re.M)
])
# handle colors
for name, val in all_vars.items():
if "Color" in name and "selection" not in name and "Hover" not in name:
if val in all_vars:
val = all_vars[val]
cid = name[1:]
palette[cid] = {
"id": cid,
"rgb": [int(256 * c) for c in colour.Color(val).rgb]
}
if "background" in name:
backgrounds[cid] = {
"id": cid,
"background-color": cid
}
elif "heading" in name:
update_headings(rules, {"color": cid})
elif "main" in name:
base["color"] = cid
elif "link" in name:
rules["a"]["color"] = cid
else:
print(theme_id, name, val)
elif re.match(r".*Font$", name):
font = val.split(",")[0].replace("'", "")
if font in fonts:
font = fonts[font]
if "heading" in name:
update_headings(rules, {"font-family": font})
elif "main" in name:
base["font-family"] = font
elif "Size" in name:
size = sizes[val]
if "main" in name:
base["font-size"] = size
elif "heading" in name:
h = "h{}".format(*re.findall(r'\d+', name))
rules[h]["font-size"] = size
return theme
"""
Explanation: Importing the variables
make_theme runs through all the variables it found, and tries to determine where they should go in the theme data model.
End of explanation
"""
#!git clone https://github.com/hakimel/reveal.js.git ../../revealjs
"""
Explanation: Whew!
Great, we could parse some themes... where can we get them? A handy approach would be to use git: try uncommenting and running the line below: this will make a directory next to nbpresent.
End of explanation
"""
reveal = abspath(join("..", "..", "revealjs"))
reveal_version = json.load(open(join(reveal, "package.json")))["version"]
reveal_version
"""
Explanation: Reveal details
Let's capture the version of the reveal from which we extracted these.
End of explanation
"""
settings = open(join(reveal, "css", "theme", "template", "settings.scss")).read()
base_theme = make_theme(settings)
pretty(base_theme)
"""
Explanation: The base theme
css/theme/template/settings.scss gives us the reveal baseline. If we could inherit themes, we'd do something clever with references, but for the time being, we can just use it as a data baseline.
End of explanation
"""
scss = dict([
(
"{}-by-revealjs-{}".format(splitext(basename(fname))[0],
reveal_version),
open(fname).read()
)
for fname in glob(join(reveal, "css", "theme", "source", "*.scss"))
])
len(scss)
"""
Explanation: Reading the SCSS
Since these aren't too big, let's build up a dictionary with:
- the name of the them, indexing...
- ...the SCSS source
End of explanation
"""
themes = dict([
(name, make_theme(s, name, base_theme))
for name, s in scss.items()])
list(map(pretty, themes.values()));
"""
Explanation: Put it all together
Let's use that dictionary to build some themes!
End of explanation
"""
Javascript(jinja2.Template("""
require(["nbextensions/nbpresent/js/nbpresent.min"], function(nbpresent){
nbpresent.initialized().then(function(nbpresent){
nbpresent.mode.tree.set(["themes", "theme"], {{ themes }});
});
});
""").render(themes=json.dumps(themes, indent=2)))
"""
Explanation: Load the data
This will load up all of the themes right into this presentation
End of explanation
"""
es6_tmpl = jinja2.Template("""/*
THIS IS GENERATED CODE BY /notebooks/Importing revealjs themes.ipynb
DO NOT EDIT DIRECTLY
Source content:
https://github.com/hakimel/reveal.js/tree/master/css/theme
All original themes copyright their respective creators:
beige Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
black Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
blood Author: Walther http://github.com/Walther
league Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
moon Author: Achim Staebler
night Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
serif Copyright (C) 2012-2013 Owen Versteeg, http://owenversteeg.com - it is MIT licensed.
simple Copyright (C) 2012 Owen Versteeg, https://github.com/StereotypicalApps. It is MIT licensed.
sky Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
solarized Author: Achim Staebler
white By Hakim El Hattab, http://hakim.se
*/
export const REVEAL_THEMES = {{ themes }};
""")
with open(join("..", "src", "es6", "theme", "theme", "reveal.es6"), "w") as es6:
es6.write(es6_tmpl.render(themes=json.dumps(themes, indent=2)))
"""
Explanation: Test it out!
the two cells below have already been added to a slide... try opening the per-slide theme view to see it looks with different themes!
h1 h1 h1
h2 h2 h2
h3 h3 h3
h4 h4 h4
Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae
bullets
bullets
Export the data
Oh, yeah: I did this to actually make this available in nbpresent!
End of explanation
"""
|
opalytics/opalytics-ticdat | examples/expert_section/ml_soda_promotion/soda_promotion.ipynb | bsd-2-clause | import pandas
df_hist = pandas.read_excel("soda_sales_historical_data.xlsx")
df_hist[:5]
df_hist.shape
"""
Explanation: Combining Machine Learning and Optimization
With Gurobi and sklearn
Machine Learning topics
Touching the elephant here, but ~~not there~~
Supervised Learning
* Algorithm selection and hyper-parametric optimization
* KFold assessment vs overfitting
* Separating training from prediction
~~Unsupervised Learning~~
~~Time Series Data~~
~~Deep Learning~~
Optimization topics
Exploratory programming to application deployment
Coping with the combinatorial explosion
Validating optimization with simulation
All Under the Banner of Python!
The Soda Promotion Problem
We have the challenge of designing the upcoming promotion campaign for a Soda Company. The intended objective is to bolster sales while at the same time obeying various business constraints.
The First Challenge
We need to predict impact of different price points on the expected sales for each type of soda.
To do this, we need to train a soda sales predictor from a historical data table.
Examine historical data
End of explanation
"""
from pandas import DataFrame, get_dummies
categorical_columns = ['Product','Easter Included','Super Bowl Included',
'Christmas Included', 'Other Holiday']
df_hist = get_dummies(df_hist, prefix={k:"dmy_%s"%k for k in categorical_columns},
columns = list(categorical_columns))
df_hist[:5]
"""
Explanation: Convert categorical columns to numeric
End of explanation
"""
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import BaggingRegressor
from sklearn import model_selection
experiments = {"Algorithm":["Ordinary Least Squares", "Regression Tree",
"Big Random Forest", "Random Forest",
"Bagging"],
"Objects" : [lambda : LinearRegression(),
lambda : DecisionTreeRegressor(),
lambda : RandomForestRegressor(n_estimators=100),
lambda : RandomForestRegressor(),
lambda : BaggingRegressor()],
"Predictions":[[] for _ in range(5)]}
actuals = []
"""
Explanation: Picking the right predictor algorithm is of upmost importance
Hence we examine our choices here in great deal.
End of explanation
"""
from sklearn.model_selection import train_test_split
[_.shape for _ in train_test_split(df_hist.drop("Sales", axis=1),
df_hist["Sales"], test_size=0.25)]
"""
Explanation: Resist the temptation to overfit!
Instead, split the samples into train, test subsections.
End of explanation
"""
for _ in range (4):
train_X, test_X, train_y, test_y = (
train_test_split(df_hist.drop("Sales", axis=1),
df_hist["Sales"], test_size=0.25))
for i, obj_factory in enumerate(experiments["Objects"]):
obj = obj_factory()
obj.fit(y=train_y,X=train_X)
experiments["Predictions"][i] += list(obj.predict(test_X))
actuals += list(test_y)
actuals = pandas.Series(actuals)
experiments["Predictions"] = list(map(pandas.Series, experiments["Predictions"]))
len(actuals), map(len, experiments["Predictions"])
"""
Explanation: By repeatedly splitting, training, and testing, you can create a realistic simulation of prediction accuracy.
End of explanation
"""
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
color=iter(cm.rainbow(np.linspace(0,1,len(experiments)+3)))
plt.figure(figsize=(12,7),dpi=300)
plt.plot(actuals,actuals,c=next(color),markersize=2,label='Data')
for _, row in DataFrame(experiments).iterrows():
plt.plot(actuals, row["Predictions"],'o',c=next(color),
markersize=2,label=row['Algorithm'])
plt.title('Scatter Plot Prediction v/s Data')
plt.grid(True)
plt.legend()
plt.show()
"""
Explanation: Now lets visualize our results
End of explanation
"""
color=iter(cm.rainbow(np.linspace(0,1,len(experiments)+3)))
next(color)
plt.figure(figsize=(13,8),dpi=300)
for index, row in DataFrame(experiments).iterrows():
relative_error = (row["Predictions"] - actuals) / (1 + abs(actuals))
plt.plot(np.sort(relative_error),'o',c=next(color),
markersize=2,label=row['Algorithm'])
plt.title('Relative Error Prediction v/s Data')
plt.ylabel('Relative Error')
plt.grid(True)
plt.legend()
plt.axis([0,len(actuals),-1,1])
plt.show()
"""
Explanation: More visualizations
End of explanation
"""
def boxplot(algorithm):
prediction = (experiments["Predictions"]
[experiments["Algorithm"].index(algorithm)])
plt.title(algorithm)
plt.boxplot( (prediction - actuals) / (1 + abs(actuals)) )
plt.show()
boxplot("Bagging")
boxplot("Big Random Forest")
"""
Explanation: Even more visualizations
End of explanation
"""
experiments["Results"] = []
for o in experiments["Objects"]:
experiments["Results"].append(
model_selection.cross_val_score(o(), y=df_hist['Sales'],
X=df_hist.drop("Sales", axis=1),
cv=5).mean())
DataFrame(experiments).drop(["Objects", "Predictions"],
axis=1).set_index("Algorithm")
"""
Explanation: Visualizations only take us so far
Let's use model_selection.cross_val_score to automate the process of train/test split based assessment. (K-Fold Cross Validation)
End of explanation
"""
fitted = (experiments["Objects"]
[experiments["Algorithm"].index("Big Random Forest")]().
fit(y=df_hist["Sales"], X=df_hist.drop("Sales", axis=1)))
"""
Explanation: Now use the complete historical table to create a predictor object with the best algorithm
End of explanation
"""
df_superbowl_original = pandas.read_excel("super_bowl_promotion_data.xlsx")
df_superbowl = get_dummies(df_superbowl_original,
prefix={k:"dmy_%s"%k for k in categorical_columns},
columns = list(categorical_columns))
assert "Sales" not in df_superbowl.columns
assert {"Sales"}.union(df_superbowl.columns).issubset(set(df_hist.columns))
len(df_superbowl)
"""
Explanation: We will make predictions for the "Sales"-less table of current data
End of explanation
"""
for fld in set(df_hist.columns).difference(df_superbowl.columns, {"Sales"}):
assert fld.startswith("dmy_")
df_superbowl[fld] = 0
"""
Explanation: Note that the current data table might have less categorical range than the historical data.
End of explanation
"""
df_superbowl = df_superbowl[list(df_hist.drop("Sales", axis=1).columns)]
predicted = fitted.predict(df_superbowl)
"""
Explanation: Take care!! sklearn has no concept of columns. We make sure that the df_superbowl columns are ordered consistently with the df_hist independent column sub-matrix.
End of explanation
"""
forecast_sales = df_superbowl_original[["Product", "Cost Per Unit"]].copy()
forecast_sales["Sales"] = predicted
forecast_sales.set_index(['Product','Cost Per Unit'], inplace=True)
soda_family = {'11 Down': 'Clear', 'AB Root Beer': 'Dark',
'Alpine Stream': 'Clear', 'Bright': 'Clear',
'Crisp Clear': 'Clear', 'DC Kola': 'Dark',
'Koala Kola': 'Dark', 'Mr. Popper': 'Dark',
'Popsi Kola': 'Dark'}
family = set(soda_family[j] for j in soda_family)
soda = set(j for j in soda_family)
max_prom = {f:2 for f in family}
max_investment = 750
product_prices = set(forecast_sales.index.values)
normal_price = {b:0 for b in soda}
for b,p in product_prices:
normal_price[b] = max(normal_price[b],p)
"""
Explanation: Prediction in hand, we commence optimization!
LaTeX summary of family of equations
$$
\begin{array}{ll}
\max & sales\
s.t. & X_{b,p}\in{0,1}\quad\forall (b,p)\in Prod\
& \sum\left(X_{b,p}:{(b,p)\in Prod} \right)=1\quad\forall b\in Soda\
& \sum\left(X_{b,p}:{(b,p)\in Prod, p\neq p_o, T(b)=t} \right) \leq max_t\quad\forall t\
& sales = \sum\left(f_{b,p} X_{b,p}:{(b,p)\in Prod}\right)\
& revenue = \sum\left(f_{b,p} p X_{b,p}:{(b,p)\in Prod} \right)\
& investment = \sum\left(\left(f_{b,p} - f_{b,p_o}\right)+ p_o X{b,p}:{(b,p)\in Prod} \right)\
& investment <= max_{investment}
\end{array}
$$
Putting the optimization input set together
End of explanation
"""
meaningful_discounts = 0
for b,p in product_prices:
if forecast_sales.Sales[b,p] > forecast_sales.Sales[b,normal_price[b]]:
meaningful_discounts += 1
meaningful_discounts, len(forecast_sales) - len(soda)
"""
Explanation: Note that not all estimated discounts yield a boost in sales.
End of explanation
"""
import gurobipy as gu
model = gu.Model()
select_price = model.addVars(product_prices,vtype=gu.GRB.BINARY,name='X')
sales = model.addVar(name='sales')
revenue = model.addVar(name='revenue')
investment = model.addVar(ub=max_investment, name='investment')
gusum = gu.quicksum
"""
Explanation: Building a MIP model
$$
\begin{array}{l}
X_{b,p}\in{0,1}\quad\forall (b,p)\in Prod\
0 \leq sales\
0 \leq revenue\
0 \leq investment \leq max_{investment}
\end{array}
$$
End of explanation
"""
model.addConstr(sales == select_price.prod(forecast_sales.Sales), name='sales')
model.addConstr(revenue == gusum(forecast_sales.Sales[b,p] * p *
select_price[b,p] for b,p in product_prices),
name='revenue')
model.addConstr(investment ==
gusum(max(0,forecast_sales.Sales[b,p] -
forecast_sales.Sales[b,normal_price[b]]) *
normal_price[b] * select_price[b,p]
for b,p in product_prices),
name='investment')
model.update()
"""
Explanation: $$
sales = \sum\left(f_{b,p} X_{b,p}:{(b,p)\in Prod}\right)\
revenue = \sum\left(f_{b,p} p X_{b,p}:{(b,p)\in Prod} \right)\
investment = \sum\left(\left(f_{b,p} - f_{b,p_o}\right)+ p_o X{b,p}:{(b,p)\in Prod} \right)
$$
End of explanation
"""
model.addConstrs((select_price.sum(b,'*') == 1 for b in soda), name='OnePrice')
model.addConstrs((gusum(select_price[b,p] for b,p in product_prices if
soda_family[b] == f and p != normal_price[b] )
<= max_prom[f] for f in family),
name='MaxProm')
model.update()
"""
Explanation: $$
\sum\left(X_{b,p}:{(b,p)\in Prod} \right)=1\quad\forall b\in Soda\
\sum\left(X_{b,p}:{(b,p)\in Prod, p\neq p_o, T(b)=t} \right) \leq max_t
$$
End of explanation
"""
model.setObjective(sales, sense=gu.GRB.MAXIMIZE)
model.optimize()
model.status == gu.GRB.OPTIMAL
"""
Explanation: Optimize and results
End of explanation
"""
sales.X, revenue.X, investment.X
price_selections = {"Product":[], "Price":[], "Is Discount":[], "Family":[]}
for b, p in product_prices:
if abs(select_price[b,p].X -1) < 0.0001: # i.e. almost one
price_selections["Product"].append(b)
price_selections["Price"].append(p)
price_selections["Is Discount"].append(p < normal_price[b])
price_selections["Family"].append(soda_family[b])
(DataFrame(price_selections).set_index("Product")
[["Price", "Is Discount", "Family"]].sort_values("Family"))
"""
Explanation: Only the paranoid survive
Carefully sanity check the solution.
End of explanation
"""
simulated_KPI = {'Sales':[],'Revenue':[],'Investment':[]}
Z = select_price
num_infeas = 0
for i in range(100):
np.random.seed(i)
fitted = RandomForestRegressor(n_estimators=100,
n_jobs=4).fit(y=df_hist["Sales"],
X=df_hist.drop("Sales", axis=1))
forecast = df_superbowl_original[['Product', 'Cost Per Unit']].copy()
forecast["Sales"] = fitted.predict(df_superbowl)
forecast = forecast.set_index(['Product','Cost Per Unit'])
sales, revenue, investment = 0, 0, 0
for b,p in product_prices:
sales += forecast.Sales[b,p] * Z[b,p].X
revenue += forecast.Sales[b,p] * p * Z[b,p].X
investment += (max(0,forecast.Sales[b,p] -
forecast.Sales[b,normal_price[b]]) *
normal_price[b] * Z[b,p].X)
if investment > max_investment:
num_infeas += 1
simulated_KPI['Sales'].append(sales)
simulated_KPI['Revenue'].append(revenue)
simulated_KPI['Investment'].append(investment)
data = {'Sales','Revenue','Investment'}
color=iter(cm.rainbow(np.linspace(0,1,3)))
for t in data:
plt.figure(figsize=(7,4),dpi=300)
plt.hist(simulated_KPI[t],50,normed=1,color=next(color), alpha=0.75)
plt.ylabel('Probability')
plt.xlabel(t)
plt.grid(True)
plt.show()
num_infeas
"""
Explanation: Create a range of predictions to simulate the behavior of our solution under a range of conditions.
End of explanation
"""
|
egrinstein/egrinstein.github.io | _posts/.ipynb_checkpoints/matplotlib-checkpoint.ipynb | mit | import matplotlib.pyplot as plt
%matplotlib inline
X = [0,1,2,3,4]
Fx = [x**2 for x in X]
fig = plt.plot(X,Fx)
plt.show(fig)
"""
Explanation: Matplotlib -- A Mostly Formal Introduction
Matplotlib is Python's most used library for scientific visualization. However, there are many ways to use it, and its syntax can be a little misleading. A good starting point is in https://matplotlib.org/faq/usage_faq.html. This post complements the aforemention link by being a little more hands-on, without becoming an example-based tutorial. This tutorial is aimed at people who use Matplotlib, but is not sure of what they are doing. I am doing this as a reference to myself, to be honest.
First thing you have to remember is that there are two ways of using Matplotlib, the global way and the local, object oriented one. The latter is more readable and flexible, while the former is quicker to write.
With the global approach, you are always working with one implicit figure. Every time you order the library to draw something, it will do it in this elusive entity. In the local approach, you will create Figure objects and explicitely say you want to draw things in it. I will be focusing on the figure approach.
Figures are the top-level container for drawings. They have a specific size (in inches). Inside them lie Axes (not axis!) objects, which are the actual graphs we think as plots. So, a figure is basically useful so we can dispose many Axes however we want, in a 2x1 grid, for example. When you create an Axes object inside a picture, you specify where it will be placed. You select a rectangular slice of the parent Figure to be occupied by the Axes.
An empty picture is not even white. It is not even transparent, for all that matters. If you want a small rectangle around it, you have to create an Axes, and then draw it. If you want to plot a one variable function, you would have to plot every one of its points, and then connect neighbors by small lines. Luckily, there is the plot function, which is perfect for most of our functional needs. You give it your x (inputs) and your f(x) (outputs). It creates a figure with an Axes in it. It this Axes, it draws everything we need: x ticks, y ticks, a rectangle around it and, of course, the points connected by lines we know and love.
End of explanation
"""
fig,axes = plt.subplots(2,2)
F0 = [x**0 for x in X]
F1 = [x**1 for x in X]
F2 = [x**2 for x in X]
F3 = [x**3 for x in X]
axes[0,0].plot(X,F0)
axes[0,1].plot(X,F1)
axes[1,0].plot(X,F2)
axes[1,1].plot(X,F3)
plt.show(fig)
"""
Explanation: Even though we can dispose the Axes how we want inside the figure,
we usually do the same configuration over and over, creating equal-sized,
equally-spaced rectangular Axes. So there is a shorthand to create a Figure with this kind of display: the subplots function. This one creates a Figure with a set of Axes disposed in a rectangular fashion inside. You dictate the fashion: The first argument dictates how many lines the grid will have, the second the number of columns. The subplot will give you a figure and a matrix of Axes. You access each Axes by its row and column, and fill it with plots, for instance. Let's create a 2x2 subplot, and fill it with different functions:
End of explanation
"""
fig = plt.figure()
ax12 = fig.add_subplot(2,1,1) # This one fills
# the top half of the picture
ax3 = fig.add_subplot(2,2,3) # These ones fill half of the
ax4 = fig.add_subplot(2,2,4) # space left each.
ax12.plot(X,Fx)
ax3.plot(X,F0)
ax4.plot(X,F1)
plt.show(fig)
"""
Explanation: Another useful way to create grids of plots is by creating a figure and then adding subplots to it with the add_subplot function. With add subplots, you specify the grid structure you are imagining in your mind, and he will return you an Axes with these dimensions. This function takes 3 parameters: the number of rows you are imagining, the number of columns, and the number of the section of the grid you want to access. So, for example, if you want a 2x2 grid and you want the bottom left square, you should do a add_subplot(2,2,3) (it counts by row, then by column).
This way is better suited for making irregular grid. For example, suppose we want a graph to fill half the Figure and other two to fill the bottom. We would do:
End of explanation
"""
|
tclaudioe/Scientific-Computing | SC1v2/04b_BONUS_conjugate_gradient_method.ipynb | bsd-3-clause | import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import solve_triangular
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
# pip install memory_profiler
%load_ext memory_profiler
np.random.seed(0)
from ipywidgets import interact, IntSlider
import matplotlib as mpl
mpl.rcParams['font.size'] = 14
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['ytick.labelsize'] = 14
def plot_matrices_with_values(ax,M,flag_values):
N=M.shape[0]
cmap = plt.get_cmap('GnBu')
ax.matshow(M, cmap=cmap)
if flag_values:
for i in np.arange(0, N):
for j in np.arange(0, N):
ax.text(i, j, '{:.2f}'.format(M[i,j]), va='center', ha='center', color='r')
"""
Explanation: <center>
<h1> ILI285 - Computación Científica I / INF285 - Computación Científica </h1>
<h2> Conjugate Gradient Method </h2>
<h2> <a href="#acknowledgements"> [S]cientific [C]omputing [T]eam </a> </h2>
<h2> Version: 1.16</h2>
</center>
Table of Contents
Introduction
Gradient Descent
Conjugate Gradient Method
Let's Play: Practical Exercises and Profiling
Acknowledgements
End of explanation
"""
def gradient_descent(A, b, x0, n_iter=10, tol=1e-10):
n = A.shape[0]
#array with solutions
X = np.full((n_iter, n),np.nan)
X[0] = x0
for k in range(1, n_iter):
r = b - np.dot(A, X[k-1])
if np.linalg.norm(r)<tol: # The algorithm "converged"
X[k:] = X[k-1]
return X
break
alpha = np.dot(r, r)/np.dot(r, np.dot(A, r))
X[k] = X[k-1] + alpha*r
return X
"""
Explanation: <div id='intro' />
Introduction
Welcome to another edition of our Jupyter Notebooks. Here, we'll teach you how to solve $A\,x = b$ with $A$ being a symmetric positive-definite matrix, but the following methods have a key difference with the previous ones: these do not depend on a matrix factorization. The two methods that we'll see are called the Gradient Descent and the Conjugate Gradient Method. On the latter, we'll also see the benefits of preconditioning.
<div id='GDragon' />
Gradient Descent
This is an iterative method. If you remember the iterative methods in the previous Notebook, to find the next approximate solution $\mathbf{x}{k+1}$ you'd add a vector to the current approximate solution, $\mathbf{x}_k$, that is: $\mathbf{x}{k+1} = \mathbf{x}k + \text{vector}$. In this method, $\text{vector}$ is $\alpha{k}\,\mathbf{r}_k$, where $\mathbf{r}_k$ is the residue ($\mathbf{b} - A\,\mathbf{x}_k$) and $\alpha_k = \cfrac{(\mathbf{r}_k)^T\,\mathbf{r}_k}{(\mathbf{r}_k)^T\,A\,\mathbf{r}_k}$, starting with some initial guess $\mathbf{x}_0$. Let's look at the implementation below:
End of explanation
"""
"""
Randomly generates an nxn symmetric positive-
definite matrix A.
"""
def generate_spd_matrix(n):
A = np.random.random((n,n))
#constructing symmetry
A += A.T
#symmetric+diagonally dominant -> symmetric positive-definite
deltas = 0.1*np.random.random(n)
row_sum = A.sum(axis=1)-np.diag(A)
np.fill_diagonal(A, row_sum+deltas)
return A
"""
Explanation: Now let's try our algorithm! But first, let's borrow a function to generate a random symmetric positive-definite matrix, kindly provided by the previous notebook, and another one to calculate the vectorized euclidean metric.
End of explanation
"""
def show_small_example_GD(n_size=3, n_iter=10):
np.random.seed(0)
A = generate_spd_matrix(n_size)
b = np.ones(n_size)
x0 = np.zeros(n_size)
X = gradient_descent(A, b, x0, n_iter)
sol = np.linalg.solve(A, b)
print('Gradiente descent : ',X[-1])
print('np solver : ',sol)
print('norm(difference): \t',np.linalg.norm(X[-1] - sol)) # difference between gradient_descent's solution and Numpy's solver solution
interact(show_small_example_GD,n_size=(3,50,1),n_iter=(5,50,1))
"""
Explanation: We'll try our algorithm with some matrices of different sizes, and we'll compare it with the solution given by Numpy's solver.
End of explanation
"""
def conjugate_gradient(A, b, x0, full_output=False, tol=1e-16):
n = A.shape[0]
X = np.full((n+1, n),np.nan) # Storing partial solutions x_i
R = np.full((n+1, n),np.nan) # Storing residues r_i=b-A\,x_i
D = np.full((n+1, n),np.nan) # Storing conjugate directions d_i
alphas = np.full(n,np.nan) # Storing alpha's
betas = np.full(n,np.nan) # Storing beta's
X[0] = x0 # initial guess: x_0
R[0] = b - np.dot(A, x0) # initial residue: r_0=b-A\,x_0
D[0] = R[0] # initial direction: d_0
n_residuals = np.full(n+1,np.nan) # norm of residuals over iteration: ||r_i||_2
n_residuals[0] = np.linalg.norm(R[0]) # initilizing residual: ||r_0||_2
x_sol=x0 # first approximation of solution
for k in np.arange(n):
if np.linalg.norm(R[k])<=tol: # The algorithm converged
if full_output:
return X[:k+1], D[:k+1], R[:k+1], alphas[:k+1], betas[:k+1], n_residuals[:k+1]
else:
return x_sol
# This is the 'first' version of the algorithm
alphas[k] = np.dot(D[k], R[k]) / np.dot(D[k], np.dot(A, D[k]))
X[k+1] = X[k] + alphas[k]*D[k]
R[k+1] = R[k] - alphas[k]*np.dot(A, D[k])
n_residuals[k+1] = np.linalg.norm(R[k+1])
betas[k] = np.dot(D[k],np.dot(A,R[k+1]))/np.dot(D[k],np.dot(A,D[k]))
D[k+1] = R[k+1] - betas[k]*D[k]
x_sol=X[k+1]
if full_output:
return X, D, R, alphas, betas, n_residuals
else:
return x_sol
# This function computes the A-inner product
# between each pair of vectors provided in V.
# If 'A' is not provided, it becomes the
# traditional inner product.
def compute_A_orthogonality(V,A='identity'):
m = V.shape[0]
n = V.shape[1]
if isinstance(A, str):
A=np.eye(n)
output = np.full((m-1,m-1),np.nan)
for i in range(m-1):
for j in range(m-1):
output[i,j]=np.dot(V[i],np.dot(A,V[j]))
return output
def show_small_example_CG(n_size=2,flag_image=False,flag_image_values=True):
np.random.seed(0)
A = generate_spd_matrix(n_size)
b = np.ones(n_size)
x0 = np.zeros(n_size)
X, D, R, alphas, betas, n_residuals = conjugate_gradient(A, b, x0, True)
if flag_image:
outR=compute_A_orthogonality(R)
outD=compute_A_orthogonality(D,A)
M=8
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(2*M,M))
plot_matrices_with_values(ax1,np.log10(np.abs(outR)+1e-16),flag_image_values)
ax1.set_title(r'$\log_{10}(|\mathbf{r}_i^T \, \mathbf{r}_j|+10^{-16})$',pad=20)
plot_matrices_with_values(ax2,np.log10(np.abs(outD)+1e-16),flag_image_values)
ax2.set_title(r'$\log_{10}(|\mathbf{d}_i^T\,A\,\mathbf{d}_j|+10^{-16})$',pad=20)
plt.sca(ax3)
plt.semilogy(n_residuals,'.')
plt.grid(True)
plt.ylabel(r'$||\mathbf{r}_i||$')
plt.xlabel(r'$i$')
plt.title('n= %d'%n_size)
plt.sca(ax4)
plt.plot(alphas,'.',label=r'$\alpha_i$',markersize=10)
plt.plot(betas,'.',label=r'$\beta_i$',markersize=10)
plt.grid(True)
plt.legend()
plt.xlabel(r'$i$')
plt.show()
else:
print('n_residuals:')
print(n_residuals)
print('alphas:')
print(alphas)
print('betas:')
print(betas)
print('R:')
print(R)
print('X:')
print(X)
print('D:')
print(D)
interact(show_small_example_CG,n_size=(2,50,1),flag_image=False,flag_image_values=True)
def plot_iterative_solution(A,b,X,R,D,n=0,elev=30,azim=310):
L=lambda x: np.dot(x,np.dot(A,x))-np.dot(b,x)
fig=plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121, projection='3d')
ax2 = fig.add_subplot(122, projection='3d')
# Plotting the residual vectors
for v in R[:n+1]:
# We use ax1 for the actual values and ax1 for the normalized values.
# We normalize it just for plotting purposes, otherwise the last
# vectors look too tiny.
ax1.quiver(0, 0, 0, v[0], v[1], v[2],color='blue')
ax2.quiver(0, 0, 0, v[0]/np.linalg.norm(v), v[1]/np.linalg.norm(v), v[2]/np.linalg.norm(v),color='blue')
# Plotting the residual vectors
for v in X[1:n+1]:
ax1.quiver(0, 0, 0, v[0], v[1], v[2],color='red')
ax2.quiver(0, 0, 0, v[0]/np.linalg.norm(v), v[1]/np.linalg.norm(v), v[2]/np.linalg.norm(v),color='red')
# Plotting the direction vectors
for v in D[:n]:
ax1.quiver(0, 0, 0, v[0], v[1], v[2],color='green',linewidth=10,alpha=0.5)
ax2.quiver(0, 0, 0, v[0]/np.linalg.norm(v), v[1]/np.linalg.norm(v),
v[2]/np.linalg.norm(v),color='green',linewidth=10,alpha=0.5)
# plotting evolution of solution
v = X[0]
ax1.quiver(0, 0, 0, v[0], v[1], v[2], color='black', linestyle='dashed')
ax2.quiver(0, 0, 0, v[0]/np.linalg.norm(v), v[1]/np.linalg.norm(v), v[2]/np.linalg.norm(v),color='black',linestyle='dashed')
for k in np.arange(1,n+1):
v = X[k]-X[k-1]
vp= X[k-1]
ax1.quiver(vp[0], vp[1], vp[2], v[0], v[1], v[2], color='magenta',linewidth=10,alpha=0.5)
v = X[k]/np.linalg.norm(X[k])-X[k-1]/np.linalg.norm(X[k-1])
vp= X[k-1]/np.linalg.norm(X[k-1])
ax2.quiver(vp[0], vp[1], vp[2], v[0], v[1], v[2],color='magenta',linewidth=10,alpha=0.5)
#for v in X[]
ax1.set_xlim(min(0,np.min(X[:,0]),np.min(R[:,0])),max(0,np.max(X[:,0]),np.max(R[:,0])))
ax1.set_ylim(min(0,np.min(X[:,1]),np.min(R[:,1])),max(0,np.max(X[:,1]),np.max(R[:,1])))
ax1.set_zlim(min(0,np.min(X[:,2]),np.min(R[:,2])),max(0,np.max(X[:,2]),np.max(R[:,2])))
ax2.set_xlim(-1,1)
ax2.set_ylim(-1,1)
ax2.set_zlim(-1,1)
#fig.tight_layout()
ax1.view_init(elev,azim)
ax2.view_init(elev,azim)
plt.title('r-blue, x-red, d-green, x-mag, x0-black')
plt.show()
# Setting a standard name for the variables
np.random.seed(0)
A = generate_spd_matrix(3)
b = np.ones(3)
x0 = np.ones(3)
X, D, R, alphas, betas, n_residuals = conjugate_gradient(A, b, x0, True)
# For plotting with widgets
n_widget = IntSlider(min=0, max=b.shape[0], step=1, value=0)
elev_widget = IntSlider(min=-180, max=180, step=10, value=-180)
azim_widget = IntSlider(min=0, max=360, step=10, value=30)
solution_evolution = lambda n,elev,azim: plot_iterative_solution(A,b,X,R,D,n,elev,azim)
interact(solution_evolution,n=n_widget,elev=elev_widget,azim=azim_widget)
"""
Explanation: As we can see, we're getting ok solutions with 15 iterations, even for larger matrices.
A variant of this method is currently used in training neural networks and in Data Science in general, the main difference is that they call the \alpha parameter 'learning rate' and keep it constant.
Another important reason is that sometimes in Data Science they need to solve a nonlinear system of equations rather than a linear one, the good thing is that to solve nonlinear system of equations we do it by a sequence of linear system of equations!
Now, we will discuss a younger sibling, the Conjugate Gradient Method, which is the prefered when the associated matrix is symmetric and positive definite.
<div id='CGM' />
Conjugate Gradient Method
This method works by succesively eliminating the $n$ orthogonal components of the error, one by one. The method arrives at the solution with the following finite loop:
End of explanation
"""
def relative_error(X, r_sol):
n_steps = X.shape[0]
n_r_sol = np.linalg.norm(r_sol)
E = np.zeros(n_steps)
for i in range(n_steps):
E[i] = np.linalg.norm(X[i] - r_sol) / n_r_sol
return E
"""
Explanation: The science behind this algorithm is in the classnotes and in the textbook (Numerical Analysis, 2nd Edition, Timothy Sauer). Now let's try it!
Here are some questions to think about:
* What are the advantages and disadvantages of each method: gradient_descent and conjugate_gradient?
* In which cases can the Conjugate Gradient Method converge in less than $n$ iterations?
* What will happen if you use the Gradient Descent or Conjugate Gradient Method with non-symmetric, non-positive-definite matrices?
<div id='LP' />
Let's Play: Practical Exercises and Profiling
First of all, define a function to calculate the progress of the relative error for a given method, that is, input the array of approximate solutions X and the real solution provided by Numpy's solver r_sol and return an array with the relative error for each step.
End of explanation
"""
def show_output_for_non_symmetric_and_npd(np_seed=0):
np.random.seed(np_seed)
n = 10
A = 10 * np.random.random((n,n))
b = 10 * np.random.random(n)
x0 = np.zeros(n)
X1 = gradient_descent(A, b, x0, n)
X2, D, R, alphas, betas, n_residuals = conjugate_gradient(A, b, x0, True)
r_sol = np.linalg.solve(A, b)
E1 = relative_error(X1, r_sol)
E2 = relative_error(X2, r_sol)
iterations1 = np.linspace(1, n, n)
iterations2 = np.linspace(1, X2.shape[0], X2.shape[0])
plt.figure(figsize=(10,5))
plt.xlabel('Iteration')
plt.ylabel('Relative Error')
plt.title('Evolution of the Relative Forward Error for each method')
plt.semilogy(iterations1, E1, 'rd', markersize=8, label='GD') # Red diamonds are for Gradient Descent
plt.semilogy(iterations2, E2, 'b.', markersize=8, label='CG') # Blue dots are for Conjugate Gradient
plt.grid(True)
plt.legend(loc='best')
plt.show()
interact(show_output_for_non_symmetric_and_npd,np_seed=(0,100,1))
"""
Explanation: Trying the two methods with a small non-symmetric, non-positive-definite matrix and plotting the forward error for all the methods.
End of explanation
"""
def show_output_for_symmetric_and_pd(np_seed=0,n=100):
np.random.seed(np_seed)
A = generate_spd_matrix(n)
b = np.random.random(n)
x0 = np.zeros(n)
X1 = gradient_descent(A, b, x0, n)
X2, D, R, alphas, betas, n_residuals = conjugate_gradient(A, b, x0, True)
r_sol = np.linalg.solve(A, b)
E1 = relative_error(X1, r_sol)
E2 = relative_error(X2, r_sol)
iterations1 = np.linspace(1, n, n)
iterations2 = np.linspace(1, X2.shape[0], X2.shape[0])
plt.figure(figsize=(10,5))
plt.xlabel('Iteration')
plt.ylabel('Relative Error')
plt.title('Evolution of the Relative Forward Error for each method')
plt.semilogy(iterations1, E1, 'rd', markersize=8, label='GD') # Red diamonds are for Gradient Descent
plt.semilogy(iterations2, E2, 'b.', markersize=8, label='CG') # Blue dots are for Conjugate Gradient
plt.grid(True)
plt.legend(loc='best')
plt.xlim([0,40])
plt.show()
interact(show_output_for_symmetric_and_pd,np_seed=(0,100,1),n=(10,1000,10))
"""
Explanation: As you can see, if the matrix doesn't meet the requirements for these methods, the results can be quite terrible.
Let's try again, this time using an appropriate matrix.
End of explanation
"""
A = generate_spd_matrix(100)
b = np.ones(100)
x0 = np.random.random(100)
%timeit gradient_descent(A, b, x0, n_iter=100, tol=1e-5)
%timeit conjugate_gradient(A, b, x0, tol=1e-5)
# Commented because it is taking too long, we need to review this!
# %memit gradient_descent(A, b, x0, n_iter=100, tol=1e-5)
# %memit conjugate_gradient(A, b, x0, tol=1e-5)
"""
Explanation: Amazing! We started with a huge relative error and reduced it to practically zero in just under 10 iterations (the algorithms all have 100 iterations but we're showing you the first 40).
We can clearly see that the Conjugate Gradient Method converges faster than the Gradient Descent method, even for larger matrices.
We can see that, reached a certain size for the matrix, the amount of iterations needed to reach a small error remains more or less the same. We encourage you to try other kinds of matrices to see how the algorithms behave, and experiment with the code. Now let's move on to profiling.
Of course, you win some, you lose some. Accelerating the convergence of the algorithm means you have to spend more of other resources. We'll use the functions %timeit and %memit to see how the algorithms behave.
End of explanation
"""
|
marko911/deep-learning | tv-script-generation/dlnd_tv_script_generation.ipynb | mit | """
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
"""
Explanation: TV Script Generation
In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.
Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
End of explanation
"""
view_sentence_range = (100, 110)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
import numpy as np
import problem_unittests as tests
from collections import Counter
from string import punctuation
print(tf.__version__)
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
counts = Counter(text)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: i for i,word in enumerate(vocab,1)}
int_to_vocab = dict(enumerate(vocab,1))
return (vocab_to_int, int_to_vocab)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
"""
Explanation: Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call vocab_to_int
- Dictionary to go from the id to word, we'll call int_to_vocab
Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab)
End of explanation
"""
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
tokenizer = {
'.' : '||Period||',
',' : '||Comma||',
'"' : '||Quotation_Mark||',
';' : '||Semicolon||',
'!' : '||Exclamation_Mark||',
'?' : '||Question_Mark||',
'(' : '||Left_Parentheses||',
')' : '||Right_Parentheses||',
'--' : '||Dash||',
'\n': '||Return||'
}
return tokenizer
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
"""
Explanation: Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( . )
- Comma ( , )
- Quotation Mark ( " )
- Semicolon ( ; )
- Exclamation mark ( ! )
- Question mark ( ? )
- Left Parentheses ( ( )
- Right Parentheses ( ) )
- Dash ( -- )
- Return ( \n )
This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
print(tf.__version__)
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below:
- get_inputs
- get_init_cell
- get_embed
- build_rnn
- build_nn
- get_batches
Check the Version of TensorFlow and Access to GPU
End of explanation
"""
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
inputs = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None,None], name='target')
learning_rate = tf.placeholder(tf.float32,name='learning_rate')
# TODO: Implement Function
return inputs, targets, learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
"""
Explanation: Input
Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the TF Placeholder name parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following tuple (Input, Targets, LearningRate)
End of explanation
"""
def build_rnn_cell(size):
return tf.contrib.rnn.BasicLSTMCell(size, state_is_tuple=True)
def get_init_cell(batch_size, rnn_size, keep_prob=0.7):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
num_layers = 2
# drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([build_rnn_cell(rnn_size) for _ in range(num_layers)])
# cell=tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size) for _ in range(num_layers)])
# batch_size = tf.placeholder(tf.int32, [])
initial_state = cell.zero_state(batch_size, tf.float32)
initial_state = tf.identity(initial_state, name='initial_state')
return cell, initial_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
"""
Explanation: Build RNN Cell and Initialize
Stack one or more BasicLSTMCells in a MultiRNNCell.
- The Rnn size should be set using rnn_size
- Initalize Cell State using the MultiRNNCell's zero_state() function
- Apply the name "initial_state" to the initial state using tf.identity()
Return the cell and initial state in the following tuple (Cell, InitialState)
End of explanation
"""
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1,1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
"""
Explanation: Word Embedding
Apply embedding to input_data using TensorFlow. Return the embedded sequence.
End of explanation
"""
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
outputs, final_state = tf.nn.dynamic_rnn(cell,inputs, dtype=tf.float32)
final_state = tf.identity(final_state, name='final_state')
return outputs, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
"""
Explanation: Build RNN
You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.
- Build the RNN using the tf.nn.dynamic_rnn()
- Apply the name "final_state" to the final state using tf.identity()
Return the outputs and final_state state in the following tuple (Outputs, FinalState)
End of explanation
"""
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
embedded = get_embed(input_data,vocab_size, embed_dim)
outputs,final_state = build_rnn(cell, embedded)
logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None, weights_initializer = tf.truncated_normal_initializer(stddev=0.1),
biases_initializer=tf.zeros_initializer()
)
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.
- Build RNN using cell and your build_rnn(cell, inputs) function.
- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState)
End of explanation
"""
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
slice_size = batch_size*seq_length
n_batches = int(len(int_text)/slice_size)
inputs = np.array(int_text[:n_batches*slice_size])
targets = np.array(int_text[1:n_batches*slice_size + 1])
inputs = np.stack(np.split(inputs,batch_size))
targets = np.stack(np.split(targets, batch_size))
batches = []
for b in range(n_batches):
x = inputs[:,b*seq_length:(b+1)*seq_length]
y = targets[:,b*seq_length: (b+1)*seq_length]
batches.append([x,y])
batches = np.array(batches)
return batches
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
"""
Explanation: Batches
Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:
- The first element is a single batch of input with the shape [batch size, sequence length]
- The second element is a single batch of targets with the shape [batch size, sequence length]
If you can't fill the last batch with enough data, drop the last batch.
For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3) would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2 3], [ 7 8 9]],
# Batch of targets
[[ 2 3 4], [ 8 9 10]]
],
# Second Batch
[
# Batch of Input
[[ 4 5 6], [10 11 12]],
# Batch of targets
[[ 5 6 7], [11 12 13]]
]
]
```
End of explanation
"""
# Number of Epochs
num_epochs = 60
# Batch Size
batch_size = 20
# RNN Size
rnn_size = 512
# Embedding Dimension Size
embed_dim = 300
# Sequence Length
seq_length = 20
# Learning Rate
learning_rate = 0.001
# Show stats for every n number of batches
show_every_n_batches = 10
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set num_epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set embed_dim to the size of the embedding.
Set seq_length to the length of sequence.
Set learning_rate to the learning rate.
Set show_every_n_batches to the number of batches the neural network should print progress.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
"""
Explanation: Save Parameters
Save seq_length and save_dir for generating a new TV script.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
inp = loaded_graph.get_tensor_by_name('input:0')
init_state = loaded_graph.get_tensor_by_name('initial_state:0')
final_state = loaded_graph.get_tensor_by_name('final_state:0')
probs = loaded_graph.get_tensor_by_name('probs:0')
return inp, init_state, final_state, probs
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
"""
Explanation: Implement Generate Functions
Get Tensors
Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
End of explanation
"""
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
word = np.random.choice(list(int_to_vocab.values()), p=probabilities)
return word
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
"""
Explanation: Choose Word
Implement the pick_word() function to select the next word using probabilities.
End of explanation
"""
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
"""
Explanation: Generate TV Script
This will generate the TV script for you. Set gen_length to the length of TV script you want to generate.
End of explanation
"""
|
jhillairet/scikit-rf | doc/source/examples/metrology/Multiline TRL.ipynb | bsd-3-clause | %matplotlib inline
import skrf
from skrf.media import CPW, Coaxial
import numpy as np
import matplotlib.pyplot as plt
skrf.stylely()
"""
Explanation: Multiline TRL
Multiline TRL is a two-port VNA calibration utilizing at least two transmission lines with different physical lengths and at least one reflective standard that is identical on both ports. The electrical parameters of the lines don't need to be known, but the transmission lines should have identical construction (same propagation constant and characteristic impedance). The reflect standard reflection coefficient doesn't need to be known exactly, phase needs to be known with 90 degree accuracy.
If the measured phase differences of the lines is a multiple of 180 degrees the calibration is singular. The calibration accuracy is worse the closer the line measurement phases are to the singularities, the best accuracy is obtained in the two lines case when the phase difference is 90 degrees. Multiple lines can be used to extend the frequency range where the calibration is accurate.
This example demonstrates how to use skrf's NIST-style Multiline calibration (NISTMultilineTRL). First a simple application is presented, followed by a full simulation to demonstrate the improvements in calibration accuracy vs the number of lines. All data is used in the demonstration is generated by skrf, and the code for this is given at the end of the example.
Simple Multiline TRL
Setup
End of explanation
"""
# Load all measurement data into a dictionary
data = skrf.read_all_networks('multiline_trl_data/')
# Pull out measurements by name into an ordered list
measured_names = ['thru','reflect','linep3mm','line2p3mm']
measured = [data[k] for k in measured_names]
# Switch terms
gamma_f,gamma_r = data['gamma_f'],data['gamma_r']
# DUT
dut_meas = data['DUT']
# 50 ohm termination
res_50ohm_meas = data['res_50ohm']
"""
Explanation: Load data into skrf
End of explanation
"""
# define the line lengths in meters (including thru)
l = [0, 0.3e-3, 2.3e-3]
# Do the calibration
cal = skrf.NISTMultilineTRL(
measured = measured, # Measured standards
Grefls = [-1], # Reflection coefficient of the reflect, -1 for short
l = l, # Lengths of the lines
er_est = 7, # Estimate of transmission line effective permittivity
switch_terms = (gamma_f, gamma_r) # Switch terms
)
# Correct the DUT using the above calibration
corrected = cal.apply_cal(dut_meas)
corrected.plot_s_db()
"""
Explanation: Simple Multiline TRL
End of explanation
"""
# Run NIST Multiline TRL calibration with different combinations of lines
# Put through and reflect to their own list ...
mtr = measured[:2]
# and lines on their own
mlines = measured[2:]
line_len = l[1:]
cals = []
duts = []
line_combinations = [[0], [1], [0,1]]
for used_lines in line_combinations:
m = mtr + [mlines[i] for i in used_lines]
# Add thru length to list of line lengths
l = [l[0]] + [line_len[i] for i in used_lines]
# Do the calibration
cal = skrf.NISTMultilineTRL(
measured = m, # Measured standards
Grefls = [-1], # Reflection coefficient of the reflect, -1 for short
l = l, # Lengths of the lines
er_est = 7, # Estimate of transmission line effective permittivity
switch_terms = (gamma_f, gamma_r) # Switch terms
)
# Correct the DUT using the above calibration
corrected = cal.apply_cal(dut_meas)
corrected.name = 'DUT, lines {}'.format(used_lines)
duts.append(corrected)
cals.append(cal)
"""
Explanation: Compare calibrations with different combinations of lines
Here we loop through different line combinations to demonstrate the difference in the calibration accuracy.
End of explanation
"""
plt.figure()
plt.title('DUT S21')
for dut in duts:
dut.plot_s_db(m=1, n=0)
"""
Explanation: Transmission of the corrected DUT
Plot the corrected DUT calibrated with different combination of calibration lines.
End of explanation
"""
plt.figure()
plt.title('DUT S11')
for dut in duts:
dut.plot_s_db(m=0, n=0)
"""
Explanation: S11 of corrected DUT with different amount of calibration lines
S11 shows bigger changes.
With one short line low frequencies are very noisy
With only the long line the calibration is very inaccurate at frequencies where the phase difference of the thru and line is close to a multiple of 180 degrees
With both lines calibration accuracy is good everywhere
End of explanation
"""
f_ghz = dut.frequency.f_scaled
plt.figure()
plt.title('Calibration normalized standard deviation')
for e, cal in enumerate(cals):
plt.plot(f_ghz, cal.nstd, label='Lines: {}'.format(line_combinations[e]))
plt.ylim([0,20])
plt.legend(loc='upper right')
dut.frequency.labelXAxis()
"""
Explanation: Normalized standard deviation of different calibrations
Normalized standard deviation can be used to measure the accuracy of the calibration. Lower number means calibration is less sensitive to the measurement noise.
TRL calibration with one 90 degrees long line has normalized standard deviation of 1.
TRL calibration with one 180 degree long lossless line is singular and has infinite normalized standard deviation.
With multiple lines normalized standard deviation less than one is possible.
Note that the nstd is normalized such that it doesn't consider the actual measurement noise. It's calculated only from the solved propagation constant and line lengths. The threshold of how large it can be depends on the DUT being measured, measurement noise and the required accuracy of the measurement. If there are large spikes, such as are visible in the long line case below, that's a sign that the calibration is very close to singular at that frequency and the measurement accuracy is going to be poor.
End of explanation
"""
# Define calibration standard media
freq = dut.frequency
cpw = CPW(freq, z0=55, w=40e-6, s=25e-6, ep_r=12.9,
t=5e-6, rho=2e-8)
# Get the cal with the both lines
cal = cals[-1]
# Calculate CPW complex permittivity from the propagation constant
c = 299792458
er_eff = -(c*cpw.gamma/(2*np.pi*freq.f))**2
plt.figure()
plt.title('CPW effective permittivity (real part)')
plt.plot(f_ghz, cal.er_eff.real, label='Solved er_eff')
plt.plot(f_ghz, er_eff.real, label='Actual er_eff')
plt.xlabel('Frequency (GHz)')
plt.legend(loc='lower right')
"""
Explanation: Calculate effective complex relative permittivity of transmission lines used in the calibration
Effective complex relative permittivity $\epsilon_{r,eff}$ of a transmission line is related to the propagation constant $\gamma$ as:
$\gamma = \frac{2\pi f}{c}\sqrt{\epsilon_{r,eff}}$, where $c$ equals the speed of light and $f$ is frequency.
In general it's a complex value with the imaginary part indicating losses.
CPW line effective permittivity can be approximated as average of substrate and air permittivities.
End of explanation
"""
plt.figure()
plt.title('Solved and actual reflection coefficient of the reflect standard')
cal.apply_cal(measured[1]).plot_s_deg(n=0, m=0, label='Solved')
cpw.delay_short(10e-6, 'm').plot_s_deg(n=0, m=0, label='Actual')
"""
Explanation: TRL calibration accuracy is the best when line length difference is 90 degrees. Solved propagation constant and effective permittivity however are more accurate the bigger the line length difference is. At low frequencies the estimate is noisier due to the line phase difference being small.
Plot the phase of the solved reflection coefficient
Since we know the ideals in this simulation we can re-define them here, and compare the determined reflect to the actual reflect. (see below for simulation details)
End of explanation
"""
cal_shift = skrf.NISTMultilineTRL(
measured = measured, # Measured standards
Grefls = [-1], # Reflection coefficient of the reflect, -1 for short
l = l, # Lengths of the lines
er_est = 7, # Estimate of transmission line effective permittivity
switch_terms = (gamma_f, gamma_r), # Switch terms
# Shift reference planes twords VNA by this amount (in m) on both ports
ref_plane = -50e-6
)
# Correct the DUT using the above calibration
corrected_thru = cal.apply_cal(measured[0])
corrected_thru_shifted = cal_shift.apply_cal(measured[0])
corrected_thru.plot_s_deg(m=1, n=0, label='Thru phase')
corrected_thru_shifted.plot_s_deg(m=1, n=0, label='Reference plane shifted thru phase')
"""
Explanation: Reference plane shift
Because propagation constant of the media is solved during the calibration it's possible to shift the reference plane by a specified distance.
The reference plane shift can be specified with ref_plane argument. The shift should be specified in meters, negative lengths is towards the VNA. By default the same shift is applied to both ports. Unequal shift on the two ports is supported by passing a two element list.
End of explanation
"""
cal_ref = skrf.NISTMultilineTRL(
measured = measured, # Measured standards
Grefls = [-1], # Reflection coefficient of the reflect, -1 for short
l = l, # Lengths of the lines
er_est = 7, # Estimate of transmission line effective permittivity
switch_terms = (gamma_f, gamma_r), # Switch terms
z0_line = 55, # Line actual characteristic impedance
z0_ref = 50 # Calibration reference impedance
)
cal.apply_cal(res_50ohm_meas).s11.plot_s_db(label='50 $\Omega$ termination |$S_{11}$|, Z_ref = line')
cal_ref.apply_cal(res_50ohm_meas).s11.plot_s_db(label='50 $\Omega$ termination |$S_{11}$|, Z_ref = 50 $\Omega$')
"""
Explanation: Calibration reference impedance renormalization
The reference impedance of the calibration is by default the transmission line characteristic impedance. If we know the actual characteristic impedance of the lines we can give it to the calibration routine with the z0_line argument to renormalize the measured S-parameters to a fixed reference z0_ref.
If the conductance per unit length (G) is much lower than the capacitive reactance per unit length ($j\omega C_0$), the characteristic impedance of the transmission line can be written in terms of the propagation constant $\gamma$ and capacitance per unit length $C_0$:
$Z_0 = \gamma/(j 2 \pi f C_0)$
If $C_0$ is known it can be given to the calibration routine with c0 parameter to renormalize the calibration reference impedance to z0_ref (defaults to 50 ohms) assuming G = 0.
If the line is lossy the characteristic impedance is complex valued and giving a single c0 instead of a fixed z0_line is usually more accurate.
In this case we know that the line characteristic impedance is actually 55 ohms. To renormalize the calibration from 55 ohms to 50 ohms we need to give z0_line=55 argument to the calibration routine.
End of explanation
"""
freq = skrf.F(1,100,201)
# CPW media used for DUT and the calibration standards
cpw = CPW(freq, z0=55, w=40e-6, s=25e-6, ep_r=12.9,
t=5e-6, rho=2e-8)
# 1.0 mm coaxial media for calibration error boxes
coax1mm = Coaxial(freq, z0=50, Dint=0.44e-3, Dout=1.0e-3, sigma=1e8)
f_ghz = cpw.frequency.f*1e-9
"""
Explanation: After renormalization the 50 ohm termination measurement shows good matching. It's not perfectly matched due to the noise in the measurements.
Simulation to generate the input data
Here is how we made the data used above.
Create frequency and Media
End of explanation
"""
X = coax1mm.line(1, 'm', z0=58, name='X', embed=True)
Y = coax1mm.line(1.1, 'm', z0=40, name='Y', embed=True)
plt.figure()
plt.title('Error networks')
X.plot_s_db()
Y.plot_s_db()
# Realistic looking switch terms
gamma_f = coax1mm.delay_load(0.2, 21e-3, 'm', z0=60, embed=True)
gamma_r = coax1mm.delay_load(0.25, 16e-3, 'm', z0=56, embed=True)
plt.figure()
plt.title('Switch terms')
gamma_f.plot_s_db()
gamma_r.plot_s_db()
"""
Explanation: Make realistic looking error networks.
Propagation constant determination is iterative and doesn't work as well when the error networks are randomly generated
End of explanation
"""
# Lengths of the lines used in the calibration, units are in meters
line_len = [0.3e-3, 2.3e-3]
lines = [cpw.line(l, 'm') for l in line_len]
# Attenuator with mismatched feed lines
dut_feed = cpw.line(1.5e-3, 'm', z0=60, embed=True)
dut = dut_feed**cpw.attenuator(-10)**dut_feed
res_50ohm = cpw.resistor(50) ** cpw.short(nports=2) ** cpw.resistor(50)
# Through and non-ideal short
# Real reflection coefficient is solved during the calibration
short = cpw.delay_short(10e-6, 'm')
actuals = [
cpw.thru(),
skrf.two_port_reflect(short, short),
]
actuals.extend(lines)
# Measured
measured = [X**k**Y for k in actuals]
# Switch termination
measured = [skrf.terminate(m, gamma_f, gamma_r) for m in measured]
# Add little noise to the measurements
for m in measured:
m.add_noise_polar(0.001, 0.1)
names = ['thru', 'reflect', 'linep3mm', 'line2p3mm']
for k,name in enumerate(names):
measured[k].name=name
# Noiseless DUT so that all the noise will be from the calibration
dut_meas = skrf.terminate(X**dut**Y, gamma_f, gamma_r)
dut_meas.name = 'DUT'
res_50ohm_meas = skrf.terminate(X**res_50ohm**Y, gamma_f, gamma_r)
res_50ohm_meas.name = 'res_50ohm'
# Put through and reflect to their own list ...
mtr = measured[:2]
# and lines on their own
mlines = measured[2:]
# write data to disk
write_data = False
if write_data:
[k.write_touchstone(dir='multiline_trl_data/') for k in measured]
gamma_f.write_touchstone('multiline_trl_data/gamma_f.s1p')
gamma_r.write_touchstone('multiline_trl_data/gamma_r.s1p')
dut_meas.write_touchstone(dir='multiline_trl_data/')
res_50ohm_meas.write_touchstone(dir='multiline_trl_data/')
"""
Explanation: Generate Fictitious measurements
End of explanation
"""
|
LimeeZ/phys292-2015-work | assignments/assignment11/OptimizationEx01.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
"""
Explanation: Optimization Exercise 1
Imports
End of explanation
"""
def hat(x,a,b):
v = -a*x**2 + b*x**4
return v
assert hat(0.0, 1.0, 1.0)==0.0
assert hat(0.0, 1.0, 1.0)==0.0
assert hat(1.0, 10.0, 1.0)==-9.0
"""
Explanation: Hat potential
The following potential is often used in Physics and other fields to describe symmetry breaking and is often known as the "hat potential":
$$ V(x) = -a x^2 + b x^4 $$
Write a function hat(x,a,b) that returns the value of this function:
End of explanation
"""
a = 5.0
b = 1.0
x1 = np.arange(-3,3,0.1)
plt.plot(x1, hat(x1, 5,1))
assert True # leave this to grade the plot
"""
Explanation: Plot this function over the range $x\in\left[-3,3\right]$ with $b=1.0$ and $a=5.0$:
End of explanation
"""
def hat(x):
b = 1
a = 5
v = -a*x**2 + b*x**4
return v
xmin1 = opt.minimize(hat,-1.5)['x'][0]
xmin2 = opt.minimize(hat,1.5)['x'][0]
xmins = np.array([xmin1,xmin2])
print(xmin1)
print(xmin2)
x1 = np.arange(-3,3,0.1)
plt.plot(x1, hat(x1))
plt.scatter(xmins,hat(xmins), c = 'r',marker = 'o')
plt.grid(True)
plt.title('Hat Potential')
plt.xlabel('Range')
plt.ylabel('Potential')
assert True # leave this for grading the plot
"""
Explanation: Write code that finds the two local minima of this function for $b=1.0$ and $a=5.0$.
Use scipy.optimize.minimize to find the minima. You will have to think carefully about how to get this function to find both minima.
Print the x values of the minima.
Plot the function as a blue line.
On the same axes, show the minima as red circles.
Customize your visualization to make it beatiful and effective.
End of explanation
"""
|
rvperry/phys202-2015-work | assignments/assignment10/ODEsEx01.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.integrate import odeint
from IPython.html.widgets import interact, fixed
"""
Explanation: Ordinary Differential Equations Exercise 1
Imports
End of explanation
"""
def solve_euler(derivs, y0, x):
"""Solve a 1d ODE using Euler's method.
Parameters
----------
derivs : function
The derivative of the diff-eq with the signature deriv(y,x) where
y and x are floats.
y0 : float
The initial condition y[0] = y(x[0]).
x : np.ndarray, list, tuple
The array of times at which of solve the diff-eq.
Returns
-------
y : np.ndarray
Array of solutions y[i] = y(x[i])
"""
h=x[1]-x[0]
y=np.empty(len(x))
y[0]=y0
for i in range(len(x)-1):
y[i+1]=y[i]+derivs(x[i],y[i])*h
return y
solve_euler(lambda y, x: 1, 0, [0,1,2])
assert np.allclose(solve_euler(lambda y, x: 1, 0, [0,1,2]), [0,1,2])
"""
Explanation: Euler's method
Euler's method is the simplest numerical approach for solving a first order ODE numerically. Given the differential equation
$$ \frac{dy}{dx} = f(y(x), x) $$
with the initial condition:
$$ y(x_0)=y_0 $$
Euler's method performs updates using the equations:
$$ y_{n+1} = y_n + h f(y_n,x_n) $$
$$ h = x_{n+1} - x_n $$
Write a function solve_euler that implements the Euler method for a 1d ODE and follows the specification described in the docstring:
End of explanation
"""
def solve_midpoint(derivs, y0, x):
"""Solve a 1d ODE using the Midpoint method.
Parameters
----------
derivs : function
The derivative of the diff-eq with the signature deriv(y,x) where y
and x are floats.
y0 : float
The initial condition y[0] = y(x[0]).
x : np.ndarray, list, tuple
The array of times at which of solve the diff-eq.
Returns
-------
y : np.ndarray
Array of solutions y[i] = y(x[i])
"""
h=x[1]-x[0]
y=np.empty(len(x))
y[0]=y0
for i in range(len(x)-1):
y[i+1]=y[i]+h*derivs(y[i]+.5*h*derivs(x[i],y[i]),x[i]+.5*h)
return y
assert np.allclose(solve_midpoint(lambda y, x: 1, 0, [0,1,2]), [0,1,2])
"""
Explanation: The midpoint method is another numerical method for solving the above differential equation. In general it is more accurate than the Euler method. It uses the update equation:
$$ y_{n+1} = y_n + h f\left(y_n+\frac{h}{2}f(y_n,x_n),x_n+\frac{h}{2}\right) $$
Write a function solve_midpoint that implements the midpoint method for a 1d ODE and follows the specification described in the docstring:
End of explanation
"""
def solve_exact(x):
"""compute the exact solution to dy/dx = x + 2y.
Parameters
----------
x : np.ndarray
Array of x values to compute the solution at.
Returns
-------
y : np.ndarray
Array of solutions at y[i] = y(x[i]).
"""
y=np.empty(len(x))
for i in range(len(x)):
y[i]=.25*np.exp(2*x[i])-.5*x[i]-.25
return y
raise NotImplementedError()
assert np.allclose(solve_exact(np.array([0,1,2])),np.array([0., 1.09726402, 12.39953751]))
"""
Explanation: You are now going to solve the following differential equation:
$$
\frac{dy}{dx} = x + 2y
$$
which has the analytical solution:
$$
y(x) = 0.25 e^{2x} - 0.5 x - 0.25
$$
First, write a solve_exact function that compute the exact solution and follows the specification described in the docstring:
End of explanation
"""
odeint?
x=np.linspace(0,1,11)
def derivs(x,y):
d=x+2*y
return d
E=solve_euler(derivs,0,x)
M=solve_midpoint(derivs, 0, x)
O=odeint(derivs,0,x)
X=solve_exact(x)
print('Euler=',E)
print('Midpoint=',M)
print('Odeint=',O)
print('Exact=',X)
plt.figure(figsize=(10,10))
plt.subplot(2,1,1)
plt.plot(x,E,label='Euler')
plt.plot(x,M,label='Midpoint')
plt.plot(x,O,label='Odeint')
plt.plot(x,X,label='Exact')
plt.xlabel('x')
plt.ylabel('y')
plt.title('ODE Y vs X')
plt.legend()
plt.subplot(2,1,2)
plt.plot(x,E-X,label='Euler')
plt.plot(x,M-X,label='Midpoint')
plt.plot(x,O-X,label='Odeint')
plt.legend()
assert True # leave this for grading the plots
"""
Explanation: In the following cell you are going to solve the above ODE using four different algorithms:
Euler's method
Midpoint method
odeint
Exact
Here are the details:
Generate an array of x values with $N=11$ points over the interval $[0,1]$ ($h=0.1$).
Define the derivs function for the above differential equation.
Using the solve_euler, solve_midpoint, odeint and solve_exact functions to compute
the solutions using the 4 approaches.
Visualize the solutions on a sigle figure with two subplots:
Plot the $y(x)$ versus $x$ for each of the 4 approaches.
Plot $\left|y(x)-y_{exact}(x)\right|$ versus $x$ for each of the 3 numerical approaches.
Your visualization should have legends, labeled axes, titles and be customized for beauty and effectiveness.
While your final plot will use $N=10$ points, first try making $N$ larger and smaller to see how that affects the errors of the different approaches.
End of explanation
"""
|
phungkh/phys202-2015-work | assignments/assignment11/OptimizationEx01.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
"""
Explanation: Optimization Exercise 1
Imports
End of explanation
"""
def hat(x,a,b):
return (-a*x**2 + b*x**4)
assert hat(0.0, 1.0, 1.0)==0.0
assert hat(0.0, 1.0, 1.0)==0.0
assert hat(1.0, 10.0, 1.0)==-9.0
"""
Explanation: Hat potential
The following potential is often used in Physics and other fields to describe symmetry breaking and is often known as the "hat potential":
$$ V(x) = -a x^2 + b x^4 $$
Write a function hat(x,a,b) that returns the value of this function:
End of explanation
"""
a = 5.0
b = 1.0
x = np.linspace(-3,3,1000)
plt.plot(x, hat(x,a,b))
assert True # leave this to grade the plot
"""
Explanation: Plot this function over the range $x\in\left[-3,3\right]$ with $b=1.0$ and $a=5.0$:
End of explanation
"""
min1 = opt.minimize(hat, x0 =-1.7,args=(a,b))
min2=opt.minimize(hat, x0 =1.7, args=(a,b))
print(min1,min2)
print('Our minimas are x=-1.58113883 and x=1.58113882')
plt.figure(figsize=(7,5))
plt.plot(x,hat(x,a,b), color = 'b',label='hat potential')
plt.box(False)
plt.title('Hat Potential')
plt.scatter(x=-1.58113883,y=hat(x=-1.58113883,a=5,b=1), color='r', label='min1')
plt.scatter(x=1.58113883,y=hat(x=-1.58113883,a=5,b=1), color='r',label='min2')
plt.legend()
assert True # leave this for grading the plot
"""
Explanation: Write code that finds the two local minima of this function for $b=1.0$ and $a=5.0$.
Use scipy.optimize.minimize to find the minima. You will have to think carefully about how to get this function to find both minima.
Print the x values of the minima.
Plot the function as a blue line.
On the same axes, show the minima as red circles.
Customize your visualization to make it beatiful and effective.
End of explanation
"""
|
SunPower/PVMismatch | pvmismatch/contrib/xlsio/example_workflow/example_workflow.ipynb | bsd-3-clause | import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from pvmismatch.pvmismatch_lib import (pvcell, pvconstants, pvmodule,
pvstring, pvsystem)
from pvmismatch.contrib import xlsio
"""
Explanation: Experimenting with shadow patterns and cell temperatures on a PV system
Pvmmxlsio was made for easy manual experimentation with different shadow patterns and temperature inputs on PVMismatch PV system models.
Importing necessary modules
End of explanation
"""
str_len = 6 # number of modules in a string
str_num = 3 # number of (parallel connected) strings in the array
v_bypass = np.float64(-0.5) # [V] trigger voltage of bypass diode
cell_area = np.float64(246.49) # [cm^2] cell area
Isc0_T0 = 9.68 # [A] reference short circuit current
ncols_per_substr=[2]*3 # 3 bypass diodes with 2 series connected cell-columns
nrows=10 # number of cell rows in the module
"""
Explanation: PVMismatch
Defining cell, module and PV system parameters:
End of explanation
"""
pv_mod_pattern = pvmodule.standard_cellpos_pat(nrows=nrows,
ncols_per_substr=ncols_per_substr)
pv_mod = pvmodule.PVmodule(cell_pos=pv_mod_pattern, pvcells=None,
pvconst=None, Vbypass=v_bypass, cellArea=cell_area)
pv_cells = pv_mod.pvcells
for c in pv_cells:
c.update(Isc0_T0 = Isc0_T0) # updating short circuit currents
pv_mod.setSuns(cells=list(range(0, len(pv_cells))), Ee=[1]*len(pv_cells))
pv_str = pvstring.PVstring(numberMods=str_len, pvmods=[pv_mod]*str_len)
pv_sys = pvsystem.PVsystem(numberStrs=str_num, pvstrs=[pv_str]*str_num,
numberMods=[str_len]*str_num,
pvmods=[pv_mod]*str_len)
"""
Explanation: Building a PV system in PVMismatch:
End of explanation
"""
output_xls_name=r'ExcelLayoutFromPVMM.xlsx' # the mod. & sys. layout in xls
xlsio.system_layout_to_xls(output_xls_name, pv_sys, write_bpd_act=False)
print('PV power with 1 suns: ', pv_sys.calcSystem()[2].max(), ' [W]')
plot = pv_sys.plotSys()
plt.show(block=False)
"""
Explanation: PVMismatch xls I/O
Creating a human-readable xls of the PV system layout with the PV cell indexes, irradiances and temperatures with pvmmxlsio.
Also calculating the PV system power with PVMismatch using the default 1000 W/m2 irradiance.
End of explanation
"""
for i in list(range(1,7)):
input_xls_name=r'ExcelLayoutFromPVMM_input{}.xlsx'.format(i)
xlsio.set_input_from_xls(input_xls_name, pv_sys, str_num, str_len)
print('PV power with irradiances read in from {}: '.format(input_xls_name),
pv_sys.calcSystem()[2].max(), ' [W]')
# updating the input excel with the bypass diode activation
xlsio.system_layout_to_xls(output_xls_name=input_xls_name, pv_sys=pv_sys,
write_bpd_act=True)
pv_sys.plotSys()
plt.show(block=False)
"""
Explanation: Now let's change the Irradiance in the xls files and save them with a different name.
I have already done it, see ExcelLayoutFromPVMM_input_1-6.xlsx files.
The 3 yellow columns are the 3 PV strings we defined earlier.
Each cell in the xls file is a PV cell
Labels for PV modules (in the xls files):
Rows: m_cr, where m is the number of the given module within the string and cr is the cell row within the module
Columns: s_b_cc, where s is the number of the string in the PV system, b is the number of the bypass diode in the module and cc is the number of the PV cell column within a PV module substring. Therefore a cell with the labels "2_7" (m_cr) and "1_2_1" (s_b_cc) is a PV cell in the 8th cell row of the second column of the 3rd bypass diode of the 3rd module of the 2nd string (everything is zero-indexed).
Now we can read in the irradiance patterns we made and calculate the PV system power with PVMismatch:
End of explanation
"""
|
google-research/google-research | group_agnostic_fairness/data_utils/CreateUCISyntheticDataset.ipynb | apache-2.0 | from __future__ import division
import pandas as pd
import numpy as np
import json
import collections
import os
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_context('paper',font_scale=1.5)
dataset_base_dir = './group_agnostic_fairness/data/uci_adult/'
def sample_data(data_df, num=None, restrictions=None):
"""A recursive function that samples data according to the restrictions.
Sampling is done with replacement.
The restrictions determine how the data should be sampled.
For example,
column_types = ['sex:Female','income:>50K']
percentages = [[0.33,0.6]]
Returns an object of type Restriction, which can be used to sample a dataset
in which fraction of females is 0.33, and female-base-rate (females with income >50k) is 0.66.
Data for non-female group is sampled uniformly at random,
hence will have similar base-rate as in the original distribution.
Args:
data_df: A pandas DataFrame containing the dataset that should be sampled.
num: The total number of samples needed.
restrictions: A list of restrictions. Each restriction contains a column
name, a list of types (a value for a bucket), and a list of percentages.
Returns:
A pandas DataFrame of size 'num' that holds up to each of the restrictions.
"""
if not num:
num = data_df.shape[0]
if not restrictions:
return data_df.sample(n=num,replace=True)
num_other = num
other_subset = np.full(data_df.shape[0], True)
subsamples = []
for tp, percent in zip(restrictions[0].types, restrictions[0].percentages):
subset = data_df[restrictions[0].column_name] == tp
num_samples = (int)(num * percent)
subsamples.append(
sample_data(data_df[subset], num_samples, restrictions[1:]))
num_other -= num_samples
other_subset = other_subset & (-subset)
if num_other > 0:
subsamples.append(
sample_data(data_df[other_subset], num_other, restrictions=None))
# Concat and shuffle
sample_df = pd.concat(subsamples, ignore_index=True).sample(frac=1.0)
return sample_df
def sample_data_and_flip_class_label(data_df, frac, flip_dict):
""" Samples specified fraction of samples and flips their class label.
flip_dict: A dictionary of the form {current_value:new_value}.
Example {'<=50K':'>50K','>50K':'<=50K'}
Returns:
A pandas DataFrame with specified fraction of column values flipped
"""
flip_df = data_df.sample(frac=frac)
flip_df.replace(to_replace=flip_dict,inplace=True)
keep_df = data_df.drop(flip_df.index).sample(frac=1.0)
# Concat and shuffle
sample_df = pd.concat([keep_df,flip_df]).sample(frac=1.0)
return sample_df
Restriction = collections.namedtuple(
'Restriction', ['column_name', 'types', 'percentages'])
def create_restrictions(column_types, percentages):
"""Creates a list of restrictions.
For example,
column_types = ['sex:Female','income:>50K']
percentages = [[0.33,0.6]]
Returns an object of type Restriction, which can be used to sample a dataset
in which fraction of females is 0.33, and female-base-rate (females with income >50k) is 0.66.
Data for non-female group is sampled uniformly at random, hence will have similar base-rate as in the original distribution.
Args:
column_types: A list of column type strings. They must be of the form
'(column name):(type name)'.
percentages: A list of percentages for each column type.
Returns:
A list of restrictions
"""
# Parse through the lists and create columns. We can have multiple types for
# one column name.
restrictions_dict = collections.defaultdict(list)
percentages_dict = collections.defaultdict(list)
if column_types and percentages:
for column_type, percentage in zip(column_types, percentages):
column, tp = column_type.split(':')
restrictions_dict[column].append(tp)
percentages_dict[column].append(percentage)
# Now create the restrictions.
restrictions = []
for k in restrictions_dict:
restrictions.append(
Restriction(k, restrictions_dict[k], percentages_dict[k]))
return restrictions
"""
Explanation: Copyright 2020 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Overview
Generate synthetic datasets for various settings by sampling the original UCI_Adult dataset:
Pre-requisite: Download the Adult train and test data files can be downloaded from: https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test and save them in the ./group_agnostic_fairness/data/uci_adult folder.
This notebook contains code to create synthetic datasets over the original uci_adult dataset. We provide code to control various parameters like "base-rate", "group-size", and "label-noise" and generate synthetic dataset by over-sampling / under-samping original uci_adult training dataset.
End of explanation
"""
TRAIN_FILE = os.path.join(dataset_base_dir,'adult.data')
feature_names = [
"age", "workclass", "fnlwgt", "education", "education-num",
"marital-status", "occupation", "relationship", "race", "sex",
"capital-gain", "capital-loss", "hours-per-week", "native-country", "income"
]
with open(TRAIN_FILE, "r") as TRAIN_FILE:
train_df = pd.read_csv(TRAIN_FILE,sep=',',names=feature_names)
train_df.head()
"""
Explanation: Load Training Dataset
End of explanation
"""
print('Female group-size in original train-data: {}'.format(len(train_df[(train_df.sex=='Female')])/len(train_df)))
print('Male group-size in original data: {}'.format(len(train_df[(train_df.sex!='Female')])/len(train_df)))
print('Female base-rate in original train-data: {}'.format(len(train_df[(train_df.sex=='Female') & (train_df.income == '>50K')])/len(train_df[(train_df.sex=='Female')])))
print('Male base-rate in original data: {}'.format(len(train_df[(train_df.sex!='Female') & (train_df.income == '>50K')])/len(train_df[(train_df.sex!='Female')])))
sns.catplot(data=train_df,x='income',hue='sex',kind='count',size=3)
plt.show()
"""
Explanation: Sample data to have a specified group-size: Testing if the data is sampled as expected:
Original data distribution
End of explanation
"""
column_types = ['sex:Female']
percentages = [0.4]
restrictions = create_restrictions(column_types,percentages)
print('Sampling data with restrictions: {}'.format(restrictions))
sampled_train_df = sample_data(train_df, restrictions=restrictions)
# Base-rate remains similar as in original distribution, group-size changes
print('Female group-size in synthetic data: {}'.format(len(sampled_train_df[(sampled_train_df.sex=='Female')])/len(sampled_train_df)))
print('Male group-size in synthetic data: {}'.format(len(sampled_train_df[(sampled_train_df.sex!='Female')])/len(sampled_train_df)))
print('Female base-rate in synthetic data: {}'.format(len(sampled_train_df[(sampled_train_df.sex=='Female') & (sampled_train_df.income == '>50K')])/len(sampled_train_df[(sampled_train_df.sex=='Female')])))
print('Male base-rate in synthetic data: {}'.format(len(sampled_train_df[(sampled_train_df.sex!='Female') & (sampled_train_df.income == '>50K')])/len(sampled_train_df[(sampled_train_df.sex!='Female')])))
# Group sizes changes, base-rate remain the same.
sns.catplot(data=sampled_train_df,x='income',hue='sex',hue_order=['Male','Female'],order=['<=50K','>50K'],kind='count',size=3)
plt.show()
"""
Explanation: Synthetic data distribution
End of explanation
"""
print('Female group-size in synthetic data: {}'.format(len(train_df[(train_df.sex=='Female')])/len(train_df)))
print('Male group-size in synthetic data: {}'.format(len(train_df[(train_df.sex!='Female')])/len(train_df)))
print('Female base-rate in original train-data: {}'.format(len(train_df[(train_df.sex=='Female') & (train_df.income == '>50K')])/len(train_df[(train_df.sex=='Female')])))
print('Male base-rate in original data: {}'.format(len(train_df[(train_df.sex!='Female') & (train_df.income == '>50K')])/len(train_df[(train_df.sex!='Female')])))
sns.catplot(data=train_df,x='income',hue='sex',kind='count',size=3)
plt.show()
"""
Explanation: Sample data to have a specified group base-rate: Testing if the data is sampled as expected:
Original data distribution
End of explanation
"""
column_types = ['sex:Female','income:>50K']
percentages = [0.33,0.30]
restrictions = create_restrictions(column_types,percentages)
print('Sampling data with restrictions: {}'.format(restrictions))
sampled_train_df = sample_data(train_df, restrictions=restrictions)
print('Female group-size in synthetic data: {}'.format(len(sampled_train_df[(sampled_train_df.sex=='Female')])/len(sampled_train_df)))
print('Male group-size in synthetic data: {}'.format(len(sampled_train_df[(sampled_train_df.sex!='Female')])/len(sampled_train_df)))
print('Female base-rate in synthetic data: {}'.format(len(sampled_train_df[(sampled_train_df.sex=='Female') & (sampled_train_df.income == '>50K')])/len(sampled_train_df[(sampled_train_df.sex=='Female')])))
print('Male base-rate in synthetic data: {}'.format(len(sampled_train_df[(sampled_train_df.sex!='Female') & (sampled_train_df.income == '>50K')])/len(sampled_train_df[(sampled_train_df.sex!='Female')])))
# Group sizes remain the same, base-rate changes.
sns.catplot(data=sampled_train_df,x='income',hue='sex',hue_order=['Male','Female'],order=['<=50K','>50K'],kind='count',size=3)
plt.show()
"""
Explanation: Synthetic data distribution
End of explanation
"""
fractions = [0.1, 0.2, 0.3, 0.4, 0.5]
for frac in fractions:
for version in np.arange(10):
sampled_train_df = sample_data_and_flip_class_label(train_df, frac=frac, flip_dict={'<=50K':'>50K','>50K':'<=50K'})
output_file_path = os.path.join(dataset_base_dir,'income_flip_labels{}_version{}_train.csv'.format(frac,version)
with open(output_file_path, mode="w") as output_file:
sampled_train_df.to_csv(output_file,index=False,columns=feature_names,header=False)
output_file.close()
"""
Explanation: Sample data with specified label-noise
End of explanation
"""
column_types = ['sex:Female']
_percentages = [[0.1],[0.2],[0.3],[0.4],[0.5],[0.6],[0.7],[0.8],[0.9]]
for percentages in _percentages:
for version in np.arange(10):
restrictions = create_restrictions(column_types,percentages)
sampled_train_df = sample_data(train_df, restrictions=restrictions)
output_file_path = os.path.join(dataset_base_dir,'female_groupsize{}_version{}_train.csv'.format(percentages[0],version)
with open(output_file_path, mode="w") as output_file:
sampled_train_df.to_csv(output_file,index=False,columns=feature_names,header=False)
output_file.close()
"""
Explanation: Sample data with specified group-sizes
End of explanation
"""
column_types = ['sex:Female','income:>50K']
_percentages = [[0.33,0.1],[0.33,0.2],[0.33,0.3],[0.33,0.4],[0.33,0.5],[0.33,0.6],[0.33,0.7],[0.33,0.8],[0.33,0.9]]
for percentages in _percentages:
for version in np.arange(10):
restrictions = create_restrictions(column_types,percentages)
sampled_train_df = sample_data(train_df, restrictions=restrictions)
output_file_path = os.path.join(dataset_base_dir,'female_baserate{}_version{}_train.csv'.format(percentages[1],version)
with open(output_file_path, mode="w") as output_file:
sampled_train_df.to_csv(output_file,index=False,columns=feature_names,header=False)
output_file.close()
"""
Explanation: Sample data with specified base-rates
End of explanation
"""
|
balarsen/pymc_learning | DirichletProcess/Sunspot_example.ipynb | bsd-3-clause | # pymc3.distributions.DensityDist?
import matplotlib.pyplot as plt
import matplotlib as mpl
from pymc3 import Model, Normal, Slice
from pymc3 import sample
from pymc3 import traceplot
from pymc3.distributions import Interpolated
from theano import as_op
import theano.tensor as tt
import numpy as np
from scipy import stats
from matplotlib import pyplot as plt
import numpy as np
import pymc3 as pm
import scipy as sp
import seaborn as sns
from statsmodels.datasets import get_rdataset
from theano import tensor as tt
%matplotlib inline
%load_ext version_information
%version_information pymc3, statsmodels, pandas
"""
Explanation: The Dirichlet process mixture model is incredibly flexible in terms of the family of parametric component distributions {fθ | fθ∈Θ}{fθ | fθ∈Θ}. We illustrate this flexibility below by using Poisson component distributions to estimate the density of sunspots per year.
End of explanation
"""
sunspot_df = get_rdataset('sunspot.year', cache=True).data
sunspot_df.head()
sunspot_df.plot(x='time')
"""
Explanation: Generating data
End of explanation
"""
SEED = 8675309 # from random.org
np.random.seed(SEED)
K = 50
N = sunspot_df.shape[0]
def stick_breaking(beta):
portion_remaining = tt.concatenate([[1], tt.extra_ops.cumprod(1 - beta)[:-1]])
return beta * portion_remaining
with pm.Model() as model:
alpha = pm.Gamma('alpha', 1., 1.)
beta = pm.Beta('beta', 1, alpha, shape=K)
w = pm.Deterministic('w', stick_breaking(beta))
mu = pm.Uniform('mu', 0., 300., shape=K)
obs = pm.Mixture('obs', w, pm.Poisson.dist(mu), observed=sunspot_df['sunspot.year'])
with model:
step = pm.Metropolis()
trace = pm.sample(10000, step=step, tune=90000, random_seed=SEED, njobs=6)
"""
Explanation: Model specification
Our initial beliefs about the parameters are quite informative (sd=1) and a bit off the true values.
We will use the model:
$\alpha \sim Gamma(1,1) \
\beta_1,...,\beta_k \sim Beat(1,\alpha) \
w_i = \beta_i \prod^i_{j=i-1}(1-\beta_j \
\lambda_i,...,\lambda_k \sim U(0,300) \
x | w_i,\lambda_i \sim \sum^K_{i=1}w_i Poisson(\lambda_i)$
End of explanation
"""
pm.traceplot(trace, varnames=['alpha']);
"""
Explanation: For the sunspot model, the posterior distribution of αα is concentrated between 0.6 and 1.2, indicating that we should expect more components to contribute non-negligible amounts to the mixture than for the Old Faithful waiting time model.
End of explanation
"""
fig, ax = plt.subplots(figsize=(8, 6))
plot_w = np.arange(K) + 1
ax.bar(plot_w - 0.5, trace['w'].mean(axis=0), width=1., lw=0);
ax.set_xlim(0.5, K);
ax.set_xlabel('Component');
ax.set_ylabel('Posterior expected mixture weight');
"""
Explanation: Indeed, we see that between ten and fifteen mixture components have appreciable posterior expected weight.
End of explanation
"""
x_plot = np.arange(250)
post_pmf_contribs = sp.stats.poisson.pmf(np.atleast_3d(x_plot),
trace['mu'][:, np.newaxis, :])
post_pmfs = (trace['w'][:, np.newaxis, :] * post_pmf_contribs).sum(axis=-1)
post_pmf_low, post_pmf_high = np.percentile(post_pmfs, [2.5, 97.5], axis=0)
fig, ax = plt.subplots(figsize=(8, 6))
ax.hist(sunspot_df['sunspot.year'].values, bins=40, normed=True, lw=0, alpha=0.75);
ax.fill_between(x_plot, post_pmf_low, post_pmf_high,
color='gray', alpha=0.45)
ax.plot(x_plot, post_pmfs[0],
c='gray', label='Posterior sample densities');
ax.plot(x_plot, post_pmfs[::200].T, c='gray');
ax.plot(x_plot, post_pmfs.mean(axis=0),
c='k', label='Posterior expected density');
ax.set_xlabel('Yearly sunspot count');
ax.set_yticklabels([]);
ax.legend(loc=1);
"""
Explanation: We now calculate and plot the fitted density estimate.
End of explanation
"""
fig, ax = plt.subplots(figsize=(8, 6))
ax.hist(sunspot_df['sunspot.year'].values, bins=40, normed=True, lw=0, alpha=0.75);
ax.plot(x_plot, post_pmfs.mean(axis=0),
c='k', label='Posterior expected density');
ax.plot(x_plot, (trace['w'][:, np.newaxis, :] * post_pmf_contribs).mean(axis=0)[:, 0],
'--', c='k', label='Posterior expected\nmixture components\n(weighted)');
ax.plot(x_plot, (trace['w'][:, np.newaxis, :] * post_pmf_contribs).mean(axis=0),
'--', c='k');
ax.set_xlabel('Yearly sunspot count');
ax.set_yticklabels([]);
ax.legend(loc=1);
"""
Explanation: Again, we can decompose the posterior expected density into weighted mixture densities.
End of explanation
"""
|
lmcinnes/hdbscan | notebooks/Performance data generation .ipynb | bsd-3-clause | import sklearn.datasets
import numpy as np
import pandas as pd
import subprocess
import time
"""
Explanation: Performance timings data generation
We need to generate data comparing performance of the reference implementation of HDBSCAN and various historical versions of the hdbscan library. We need to do this varying over dataset size so we can get an idea of scaling, and we also need to consider various dimension sizes. To get all this done we'll need some handy modules: sklearn.datasets to generate fake data for clustering; numpy and pandas for easy manipulation of vectors and dataframes of results; and subprocess and time so we can actually fork off and time the actual Java refeence implementation.
End of explanation
"""
def get_reference_timings(data, filename='tmp_data.csv',
jarfile='/Users/leland/Source/HDBSCAN_Star/HDBSCAN_Star.jar',
min_points=5, min_cluster_size=5):
# Create the required csv file
pd.DataFrame(data).to_csv('tmp_data.csv', header=False, index=False)
# Run the clustering via a subprocess call and grab the output as it
# has timing information to be parsed
start_time = time.time()
internal_timing = subprocess.check_output(['java', '-jar', jarfile,
'file={}'.format(filename),
'minPts={}'.format(min_points),
'minClSize={}'.format(min_cluster_size),
'compact=true'])
time_taken = time.time() - start_time
# Parse internal timing info into a pandas series for later use
result_dict = {}
for line in internal_timing.split('\n'):
if ':' in line:
key, value = line.split(':')
key = key.replace(' (ms)', '')
key = key.replace('Time to ', '')
key = key.replace('Overall ', '')
value = int(value)
result_dict[key] = value
internal_timing = pd.Series(result_dict)
return time_taken, internal_timing
"""
Explanation: Now we need a function to actually time the reference implementation. We can do external timing use the time module, and the Java program also returns internal timings, which we can parse out and save to a dataframe. In practice this is just a matter of using subprocess an the appropriate commandline parameters for the reference code.
End of explanation
"""
internal_timing = {}
external_timing = {}
for dataset_dimension in (2,5,10,25,50):
for dataset_size in np.arange(1,17) * 8000:
data, _ = sklearn.datasets.make_blobs(dataset_size,
n_features=dataset_dimension,
centers=dataset_dimension)
(external_timing[(dataset_dimension, dataset_size)],
internal_timing[(dataset_dimension, dataset_size)]) = get_reference_timings(data)
internal_timing_df = pd.DataFrame(internal_timing).T
external_timing_series = pd.Series(external_timing)
"""
Explanation: With that in hand we can run the code over a range of dimensions and dataset sizes and aggregate the results together in indexed pandas series or dataframes.
End of explanation
"""
internal_timing_df.to_csv('reference_impl_internal_timings.csv')
external_timing_series.to_csv('reference_impl_external_timings.csv')
"""
Explanation: Now it is just a matter of saving these off to disk for later use.
End of explanation
"""
import hdbscan01
import hdbscan02
import hdbscan03
import hdbscan04
import hdbscan05
import hdbscan
"""
Explanation: Now we need to build up hdbscan timings, preferably over a range of hdbscan versions to show how the performance of the code has evolved (and improved!). To do this I pulled down historical versions and fudged them so that they exist in different namespaces and can live side by side. We can import them all like so ...
End of explanation
"""
hdbscan01_timings = {}
for dataset_dimension in (2,5,10,25,50):
for dataset_size in np.arange(1,17) * 2000:
data, _ = sklearn.datasets.make_blobs(dataset_size,
n_features=dataset_dimension,
centers=dataset_dimension)
start_time = time.time()
hdbscan01.HDBSCAN().fit(data)
time_taken = time.time() - start_time
hdbscan01_timings[(dataset_dimension, dataset_size)] = time_taken
hdbscan01_timings_series = pd.Series(hdbscan01_timings).T
hdbscan01_timings_series.to_csv('hdbscan01_timings.csv')
hdbscan02_timings = {}
for dataset_dimension in (2,5,10,25,50):
for dataset_size in np.arange(1,17) * 2000:
data, _ = sklearn.datasets.make_blobs(dataset_size,
n_features=dataset_dimension,
centers=dataset_dimension)
start_time = time.time()
hdbscan02.HDBSCAN().fit(data)
time_taken = time.time() - start_time
hdbscan02_timings[(dataset_dimension, dataset_size)] = time_taken
hdbscan02_timings_series = pd.Series(hdbscan02_timings).T
hdbscan02_timings_series.to_csv('hdbscan02_timings.csv')
hdbscan03_timings = {}
for dataset_dimension in (2,5,10,25,50):
for dataset_size in np.arange(1,17) * 4000:
data, _ = sklearn.datasets.make_blobs(dataset_size,
n_features=dataset_dimension,
centers=dataset_dimension)
start_time = time.time()
hdbscan03.HDBSCAN().fit(data)
time_taken = time.time() - start_time
hdbscan03_timings[(dataset_dimension, dataset_size)] = time_taken
hdbscan03_timings_series = pd.Series(hdbscan03_timings).T
hdbscan03_timings_series.to_csv('hdbscan03_timings.csv')
hdbscan04_timings = {}
for dataset_dimension in (2,5,10,25,50):
for dataset_size in np.arange(1,17) * 8000:
data, _ = sklearn.datasets.make_blobs(dataset_size,
n_features=dataset_dimension,
centers=dataset_dimension)
start_time = time.time()
hdbscan04.HDBSCAN().fit(data)
time_taken = time.time() - start_time
hdbscan04_timings[(dataset_dimension, dataset_size)] = time_taken
hdbscan04_timings_series = pd.Series(hdbscan04_timings).T
hdbscan04_timings_series.to_csv('hdbscan04_timings.csv')
hdbscan05_timings = {}
for dataset_dimension in (2,5,10,25,50):
for dataset_size in np.arange(1,17) * 8000:
data, _ = sklearn.datasets.make_blobs(dataset_size,
n_features=dataset_dimension,
centers=dataset_dimension)
start_time = time.time()
hdbscan05.HDBSCAN().fit(data)
time_taken = time.time() - start_time
hdbscan05_timings[(dataset_dimension, dataset_size)] = time_taken
hdbscan05_timings_series = pd.Series(hdbscan05_timings).T
hdbscan05_timings_series.to_csv('hdbscan05_timings.csv')
"""
Explanation: Now we simply go through each version and run over a range of dimensions and dataset sizes (ranging up to smaller sizes in the case of early versions which were memory constrained).
End of explanation
"""
hdbscan06_timings = {}
for dataset_dimension in (2,5,10,25,50):
for dataset_size in np.arange(1,17) * 8000:
data, _ = sklearn.datasets.make_blobs(dataset_size,
n_features=dataset_dimension,
centers=dataset_dimension)
start_time = time.time()
hdbscan.HDBSCAN().fit(data)
time_taken = time.time() - start_time
hdbscan06_timings[(dataset_dimension, dataset_size)] = time_taken
hdbscan06_timings_series = pd.Series(hdbscan06_timings).T
hdbscan06_timings_series.to_csv('hdbscan06_timings.csv')
"""
Explanation: Finally we can run the current code (soon to be version 0.6).
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.15/_downloads/plot_tf_dics.ipynb | bsd-3-clause | # Author: Roman Goj <roman.goj@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne.event import make_fixed_length_events
from mne.datasets import sample
from mne.time_frequency import csd_epochs
from mne.beamformer import tf_dics
from mne.viz import plot_source_spectrogram
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
noise_fname = data_path + '/MEG/sample/ernoise_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
subjects_dir = data_path + '/subjects'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
"""
Explanation: Time-frequency beamforming using DICS
Compute DICS source power [1]_ in a grid of time-frequency windows and
display results.
References
.. [1] Dalal et al. Five-dimensional neuroimaging: Localization of the
time-frequency dynamics of cortical activity.
NeuroImage (2008) vol. 40 (4) pp. 1686-1700
End of explanation
"""
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.info['bads'] = ['MEG 2443'] # 1 bad MEG channel
# Pick a selection of magnetometer channels. A subset of all channels was used
# to speed up the example. For a solution based on all MEG channels use
# meg=True, selection=None and add mag=4e-12 to the reject dictionary.
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads',
selection=left_temporal_channels)
raw.pick_channels([raw.ch_names[pick] for pick in picks])
reject = dict(mag=4e-12)
# Re-normalize our empty-room projectors, which should be fine after
# subselection
raw.info.normalize_proj()
# Setting time windows. Note that tmin and tmax are set so that time-frequency
# beamforming will be performed for a wider range of time points than will
# later be displayed on the final spectrogram. This ensures that all time bins
# displayed represent an average of an equal number of time windows.
tmin, tmax, tstep = -0.55, 0.75, 0.05 # s
tmin_plot, tmax_plot = -0.3, 0.5 # s
# Read epochs
event_id = 1
events = mne.read_events(event_fname)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=None, preload=True, proj=True, reject=reject)
# Read empty room noise raw data
raw_noise = mne.io.read_raw_fif(noise_fname, preload=True)
raw_noise.info['bads'] = ['MEG 2443'] # 1 bad MEG channel
raw_noise.pick_channels([raw_noise.ch_names[pick] for pick in picks])
raw_noise.info.normalize_proj()
# Create noise epochs and make sure the number of noise epochs corresponds to
# the number of data epochs
events_noise = make_fixed_length_events(raw_noise, event_id)
epochs_noise = mne.Epochs(raw_noise, events_noise, event_id, tmin_plot,
tmax_plot, baseline=None, preload=True, proj=True,
reject=reject)
epochs_noise.info.normalize_proj()
epochs_noise.apply_proj()
# then make sure the number of epochs is the same
epochs_noise = epochs_noise[:len(epochs.events)]
# Read forward operator
forward = mne.read_forward_solution(fname_fwd)
# Read label
label = mne.read_label(fname_label)
"""
Explanation: Read raw data
End of explanation
"""
# Setting frequency bins as in Dalal et al. 2008
freq_bins = [(4, 12), (12, 30), (30, 55), (65, 300)] # Hz
win_lengths = [0.3, 0.2, 0.15, 0.1] # s
# Then set FFTs length for each frequency range.
# Should be a power of 2 to be faster.
n_ffts = [256, 128, 128, 128]
# Subtract evoked response prior to computation?
subtract_evoked = False
# Calculating noise cross-spectral density from empty room noise for each
# frequency bin and the corresponding time window length. To calculate noise
# from the baseline period in the data, change epochs_noise to epochs
noise_csds = []
for freq_bin, win_length, n_fft in zip(freq_bins, win_lengths, n_ffts):
noise_csd = csd_epochs(epochs_noise, mode='fourier',
fmin=freq_bin[0], fmax=freq_bin[1],
fsum=True, tmin=-win_length, tmax=0,
n_fft=n_fft)
noise_csds.append(noise_csd)
# Computing DICS solutions for time-frequency windows in a label in source
# space for faster computation, use label=None for full solution
stcs = tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths,
freq_bins=freq_bins, subtract_evoked=subtract_evoked,
n_ffts=n_ffts, reg=0.001, label=label)
# Plotting source spectrogram for source with maximum activity
# Note that tmin and tmax are set to display a time range that is smaller than
# the one for which beamforming estimates were calculated. This ensures that
# all time bins shown are a result of smoothing across an identical number of
# time windows.
plot_source_spectrogram(stcs, freq_bins, tmin=tmin_plot, tmax=tmax_plot,
source_index=None, colorbar=True)
"""
Explanation: Time-frequency beamforming based on DICS
End of explanation
"""
|
phockett/ePSproc | notebooks/LF_AF_verification_tests_060720_tidy.ipynb | gpl-3.0 | # Imports
import numpy as np
import pandas as pd
import xarray as xr
# Special functions
# from scipy.special import sph_harm
import spherical_functions as sf
import quaternion
# Performance & benchmarking libraries
# from joblib import Memory
# import xyzpy as xyz
import numba as nb
# Timings with ttictoc or time
# https://github.com/hector-sab/ttictoc
# from ttictoc import TicToc
import time
# Package fns.
# For module testing, include path to module here
import sys
import os
if sys.platform == "win32":
modPath = r'D:\code\github\ePSproc' # Win test machine
else:
modPath = r'/home/femtolab/github/ePSproc/' # Linux test machine
sys.path.append(modPath)
import epsproc as ep
# TODO: tidy this up!
from epsproc.util import matEleSelector
from epsproc.geomFunc import geomCalc, geomUtils
from epsproc.geomFunc.lfblmGeom import lfblmXprod
# Plotters
from epsproc.plot import hvPlotters
hvPlotters.setPlotters()
"""
Explanation: ePSproc LF/AF function verification & tests
06/07/20 v3 Updated plotting codes & added AF tests.
26/06/20 v2
19/06/20 v1
For LF and AF calculations, trying to get to the bottom of issues with magnitudes and/or phases and/or formalism differences with raw ePS matrix elements.
Formalism
Test cases:
ePS matrix elements with formalism from [1], for LF cross-sections and $\beta_{2}$
ePSproc AF calculations, for LF cross-sections and $\beta_{2}$.
The AF calculations should reduce to the LF case for an isotropic ensemble, and both cases should match the "direct" ePS GetCro outputs (LF). Hopefully this should clear up any outstanding issues with normalisation, units, scale-factors, phase conventions etc. For details of the AF code, see the method dev notes.
(For MF verification, see the MFPADs and associated $\beta_{LM}$ notebooks, where the numerics are verified for the NO2 test case, although the total cross-sections may still have issues (for more discussion, see the Matlab code release software paper). The geometric tensor version of the MF calculations is also verified against the same test case.)
[1] Cross section and asymmetry parameter calculation for sulfur 1s photoionization of SF6, A. P. P. Natalense and R. R. Lucchese, J. Chem. Phys. 111, 5344 (1999), http://dx.doi.org/10.1063/1.479794
[2] Reid, Katharine L., and Jonathan G. Underwood. “Extracting Molecular Axis Alignment from Photoelectron Angular Distributions.” The Journal of Chemical Physics 112, no. 8 (2000): 3643. https://doi.org/10.1063/1.480517.
[3] Underwood, Jonathan G., and Katharine L. Reid. “Time-Resolved Photoelectron Angular Distributions as a Probe of Intramolecular Dynamics: Connecting the Molecular Frame and the Laboratory Frame.” The Journal of Chemical Physics 113, no. 3 (2000): 1067. https://doi.org/10.1063/1.481918.
[4] Stolow, Albert, and Jonathan G. Underwood. “Time-Resolved Photoelectron Spectroscopy of Non-Adiabatic Dynamics in Polyatomic Molecules.” In Advances in Chemical Physics, edited by Stuart A. Rice, 139:497–584. Advances in Chemical Physics. Hoboken, NJ, USA: John Wiley & Sons, Inc., 2008. https://doi.org/10.1002/9780470259498.ch6.
Formalism: LF case with CG terms
As given in ref. [1]. This is now implemented in implemented in ePSproc.lfblmGeom. NOTE - that the $M$ term here is an MF projection term, and should be summed over for the final LF result.
The matrix elements $I_{\mathbf{k},\hat{n}}^{(L,V)}$ of Eqs. (8)
and (9) can be expanded in terms of the $X_{lh}^{p\mu}$ functions
of Eq. (7) as$^{14}$
\begin{equation}
I_{\mathbf{k},\hat{n}}^{(L,V)}=\left[\frac{4\pi}{3}\right]^{1/2}\sum_{p\mu lhv}I_{lhv}^{p\mu(L,V)}X_{lh}^{p\mu}(\hat{k})X_{1v}^{p_{v}\mu_{v}}(\hat{n}).
\end{equation}
{[}Note here the final term gives polarization (dipole) terms, with
$l=1$, $h=v$, corresponding to a photon with one unit of angular
momentum and projections $v=-1,0,1$, correlated with irreducible
representations $p_{v}\mu_{v}$.{]}
The differential cross section is given by
\begin{equation}
\frac{d\sigma^{L,V}}{d\Omega_{\mathbf{k}}}=\frac{\sigma^{L,V}}{4\pi}[1+\beta_{\mathbf{k}}^{L,V}P_{2}(\cos\theta)],
\end{equation}
where the asymmetry parameter can be written as$^{14}$
\begin{eqnarray}
\beta_{\mathbf{k}}^{L,V} & = & \frac{3}{5}\frac{1}{\sum_{p\mu lhv}|I_{\mathbf{k},\hat{n}}^{(L,V)}|^{2}}\sum_{\stackrel{p\mu lhvmm_{v}}{p'\mu'l'h'v'm'm'{v}}}(-1)^{m'-m{v}}I_{\mathbf{k},\hat{n}}^{(L,V)}\nonumber \
& \times & \left(I_{\mathbf{k},\hat{n}}^{(L,V)}\right)^{}b_{lhm}^{p\mu}b_{l'h'm'}^{p'\mu'}b_{1vm_{v}}^{p_{v}\mu_{v}}b_{1v'm'{v}}^{p'{v}\mu'{v}*}\nonumber \
& \times & [(2l+1)(2l'+1)]^{1/2}(1100|20)(l'l00|20)\nonumber \
& \times & (11-m'{v}m_{v}|2M')(l'l-m'm|2-M'),
\end{eqnarray}
and the $(l'lm'm|L'M')$ are the usual Clebsch--Gordan coefficients.
The total cross section is
\begin{equation}
\sigma^{L,V}=\frac{4\pi^{2}}{3c}E\sum_{p\mu lhv}|I_{\mathbf{k},\hat{n}}^{(L,V)}|^{2},
\end{equation}
where c is the speed of light.
AF formalism
The original (full) form for the AF equations, as implemented in ePSproc.afblm (NOTE - there are some corrections to be made here, which are not yet implemented in the base code, but are now in the geometric version):
\begin{eqnarray}
\beta_{L,-M}^{\mu_{i},\mu_{f}} & = & \sum_{l,m,\mu}\sum_{l',m',\mu'}(-1)^{M}(-1)^{m}(-1)^{(\mu'-\mu_{0})}\left(\frac{(2l+1)(2l'+1)(2L+1)}{4\pi}\right)^{1/2}\left(\begin{array}{ccc}
l & l' & L\
0 & 0 & 0
\end{array}\right)\left(\begin{array}{ccc}
l & l' & L\
-m & m' & -M
\end{array}\right)\nonumber \
& \times & I_{l,m,\mu}^{p_{i}\mu_{i},p_{f}\mu_{f}}(E)I_{l',m',\mu'}^{p_{i}\mu_{i},p_{f}\mu_{f}*}(E)\
& \times & \sum_{P,R,R'}(2P+1)(-1)^{(R'-R)}\left(\begin{array}{ccc}
1 & 1 & P\
\mu_{0} & -\mu_{0} & R
\end{array}\right)\left(\begin{array}{ccc}
1 & 1 & P\
\mu & -\mu' & R'
\end{array}\right)\
& \times & \sum_{K,Q,S}(2K+1)^{1/2}(-1)^{K+Q}\left(\begin{array}{ccc}
P & K & L\
R & -Q & -M
\end{array}\right)\left(\begin{array}{ccc}
P & K & L\
R' & -S & S-R'
\end{array}\right)A_{Q,S}^{K}(t)
\end{eqnarray}
Where $I_{l,m,\mu}^{p_{i}\mu_{i},p_{f}\mu_{f}}(E)$ are the energy-dependent dipole matrix elements, and $A_{Q,S}^{K}(t)$ define the alignment parameters.
In terms of the geometric parameters, this can be rewritten as:
\begin{eqnarray}
\beta_{L,-M}^{\mu_{i},\mu_{f}} & =(-1)^{M} & \sum_{P,R',R}{[P]^{\frac{1}{2}}}{E_{P-R}(\hat{e};\mu_{0})}\sum_{l,m,\mu}\sum_{l',m',\mu'}(-1)^{(\mu'-\mu_{0})}{\Lambda_{R'}(\mu,P,R')B_{L,-M}(l,l',m,m')}I_{l,m,\mu}^{p_{i}\mu_{i},p_{f}\mu_{f}}(E)I_{l',m',\mu'}^{p_{i}\mu_{i},p_{f}\mu_{f}*}(E)\sum_{K,Q,S}\Delta_{L,M}(K,Q,S)A_{Q,S}^{K}(t)\label{eq:BLM-tidy-prod-2}
\end{eqnarray}
See the method dev notebook for more details. Both methods gave the same results for N2 test cases, so are at least consistent, but do not currently match ePS GetCro outputs for the LF case.
Numerics
In both LF and AF cases, the numerics tested herein are based on the geometric tensor expansion code, which has been verified for the MF case as noted above (for PADs at a single energy).
A few additional notes on the implementations...
The matrix elements used are taken from the DumpIdy output segments of the ePS output file, which provide "phase corrected and properly normalized dynamical coefs".
The matrix elements output by ePS are assumed to correspond to $I_{lhv}^{p\mu(L,V)}$ as defined above.
The Scale Factor (SF) "to sqrt Mbarn" output with the matrix elements is assumed to correspond to the $\frac{4\pi^{2}}{3c}E$ term defined above, plus any other required numerical factors ($4\pi$ terms and similar).
The SF is energy dependent, but not continuum (or partial wave) dependent.
If correct, then using matrix elements * scale factor, should give correct results (as a function of $E$), while omitting the scale factor should still give correct PADs at any given $E$, but incorrect total cross-section and energy scaling.
This may be incorrect, and some other assumptions are tested herein.
The AF and LF case should match for an isotropic distribution, defined as $A^{0}_{0,0}=1$. Additional normalisation required here...?
A factor of $\sqrt{(2K+1)}/8\pi^2$ might be required for correct normalisation, although shouldn't matter in this case. (See eqn. 47 in [4].)
For the LF case, as defined above, conversion from Legendre-normalised $\beta$ to spherical harmonic normalised $\beta$ is required for comparison with the AF formalism, where $\beta^{Sph}_{L,0} = \sqrt{(2L+1)/4\pi}\beta^{Lg}$
Set up
End of explanation
"""
# Load data from modPath\data
dataPath = os.path.join(modPath, 'data', 'photoionization')
dataFile = os.path.join(dataPath, 'n2_3sg_0.1-50.1eV_A2.inp.out') # Set for sample N2 data for testing
# Scan data file
dataSet = ep.readMatEle(fileIn = dataFile)
dataXS = ep.readMatEle(fileIn = dataFile, recordType = 'CrossSection') # XS info currently not set in NO2 sample file.
"""
Explanation: Test N2 case
Load data
End of explanation
"""
# Plot cross sections using Xarray functionality
dataXS[0].sel({'Type':'L', 'XC':'SIGMA'}).plot.line(x='Eke');
# Plot B2
dataXS[0].sel({'Type':'L', 'XC':'BETA'}).plot.line(x='Eke');
"""
Explanation: Reference results from GetCro
End of explanation
"""
dataSet[0].coords
# Set parameters
SFflag = False # Multiply matrix elements by SF?
symSum = False # Sum over symmetries?
phaseConvention = 'S'
thres = 1e-2
selDims = {'it':1, 'Type':'L'}
thresDims = 'Eke'
# Set terms for testing - NOTE ORDERING HERE may affect CG term!!!
dlistMatE = ['lp', 'l', 'L', 'mp', 'm', 'M'] # Match published terms
dlistP = ['p1', 'p2', 'L', 'mup', 'mu', 'M']
# dlistMatE = ['l', 'lp', 'L', 'm', 'mp', 'M'] # Standard terms
# dlistP = ['p1', 'p2', 'L', 'mu', 'mup', 'M']
# Set matrix elements
matE = dataSet[0].copy()
# Calculate betas
BetaNormXS, BetaNorm, BetaRaw, XSmatE = lfblmXprod(matE, symSum = symSum, SFflag = SFflag,
thres = thres, thresDims = thresDims, selDims = selDims,
phaseConvention = phaseConvention,
dlistMatE = dlistMatE, dlistP = dlistP)
# Here BetaNormXS includes the correct normalisation term as per the original formalism, and XSmatE is the sum of the squared matrix elements, as used for the normalisation (== cross section without correct scaling).
plotThres = None
ep.util.matEleSelector(XSmatE, thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke', col='Sym');
ep.util.matEleSelector(BetaNormXS, thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke', col='Sym');
# Summing over M gives the final LF terms, as defined above.
# The B0 term (==cross section) is not correctly scaled here.
# The B2 term matches the GetCro reference results.
ep.util.matEleSelector(BetaNormXS.unstack('LM').sum('M'), thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke', col='Sym');
"""
Explanation: Test LF calculations (CG version)
Without sym summation
End of explanation
"""
# Set parameters
SFflag = False # Multiply matrix elements by SF?
symSum = True # Sum over symmetries?
phaseConvention = 'S'
thres = 1e-2
selDims = {'it':1, 'Type':'L'}
thresDims = 'Eke'
# Set terms for testing - NOTE ORDERING HERE may affect CG term!!!
dlistMatE = ['lp', 'l', 'L', 'mp', 'm', 'M'] # Match published terms
dlistP = ['p1', 'p2', 'L', 'mup', 'mu', 'M']
# dlistMatE = ['l', 'lp', 'L', 'm', 'mp', 'M'] # Standard terms
# dlistP = ['p1', 'p2', 'L', 'mu', 'mup', 'M']
# Set matrix elements
matE = dataSet[0].copy()
# Calculate betas
BetaNormXS, BetaNorm, BetaRaw, XSmatE = lfblmXprod(matE, symSum = symSum, SFflag = SFflag,
thres = thres, thresDims = thresDims, selDims = selDims,
phaseConvention = phaseConvention,
dlistMatE = dlistMatE, dlistP = dlistP)
# Here BetaNormXS includes the correct normalisation term as per the original formalism, and XSmatE is the sum of the squared matrix elements, as used for the normalisation (== cross section without correct scaling).
plotThres = None
ep.util.matEleSelector(XSmatE, thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke');
ep.util.matEleSelector(BetaNormXS, thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke');
# Summing over M gives the final LF terms, as defined above.
# The B0 term (==cross section) is not correctly scaled here.
# The B2 term matches the GetCro reference results.
ep.util.matEleSelector(BetaNormXS.unstack('LM').sum('M'), thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke');
dataXS[0].sel({'Type':'L', 'XC':'BETA', 'Total':'All'}).plot.line(x='Eke', linestyle='dashed'); # Reference results
"""
Explanation: With sym summation
End of explanation
"""
# Check value for XS
# BetaNormXS.unstack('LM').sum('M').sel({'L':0})
# FACTOR OF 3 - just degeneracy term in summation. WHAT ABOUT SF...?
((XSmatE)*1/3).real.plot.line(x='Eke')
# Compare with reference results
(dataXS[0].sel({'Type':'L', 'XC':'SIGMA', 'Sym':'All'})).plot.line(x='Eke', linestyle='dashed');
# OK
BetaNormXS_sph = ep.conversion.conv_BL_BLM(BetaNormXS.unstack('LM').sum('M'), to='sph')
# Check conversion to Sph normalised betas
ep.util.matEleSelector(BetaNormXS_sph, thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke');
"""
Explanation: *** 26/06/20 LF CG calcs OK for $\beta_2$, still need to fix cross-section scaling.
Conversion & normalisation
This result is $\beta_2$ for a Legendre Polynomial expansion, as shown above.
Test:
Conversion to $Y_{L,M}$ form.
LF-PADs to check numerics.
Confirm total cross-section value & normalisation.
End of explanation
"""
phaseConvention = 'E' # Set phase conventions used in the numerics - for ePolyScat matrix elements, set to 'E', to match defns. above.
symSum = True # Sum over symmetry groups, or keep separate?
SFflag = True # Include scaling factor to Mb in calculation?
SFflagRenorm = False # Renorm terms
BLMRenorm = False
thres = 1e-6
RX = ep.setPolGeoms() # Set default pol geoms (z,x,y), or will be set by mfblmXprod() defaults - FOR AF case this is only used to set 'z' geom for unity wigner D's - should rationalise this!
start = time.time()
mTermST, mTermS, mTermTest = ep.geomFunc.afblmXprod(dataSet[0], QNs = None, RX=RX, thres = thres, selDims = {'it':1, 'Type':'L'}, thresDims='Eke',
symSum=symSum, SFflag=SFflag, phaseConvention=phaseConvention, BLMRenorm=BLMRenorm)
end = time.time()
print('Elapsed time = {0} seconds, for {1} energy points, {2} polarizations, threshold={3}.'.format((end-start), mTermST.Eke.size, RX.size, thres))
# Elapsed time = 3.3885273933410645 seconds, for 51 energy points, 3 polarizations, threshold=0.01.
# Elapsed time = 5.059587478637695 seconds, for 51 energy points, 3 polarizations, threshold=0.0001.
mTermST.XS.real.squeeze().plot.line(x='Eke');
ep.util.matEleSelector(mTermST, thres = 0.1, dims='Eke').real.squeeze().plot.line(x='Eke');
mTermST_lg = ep.conversion.conv_BL_BLM(mTermST, to='lg')
ep.util.matEleSelector(mTermST_lg, thres = 0.1, dims='Eke').real.squeeze().plot.line(x='Eke');
ep.util.matEleSelector(mTermST_lg/mTermST_lg.sel({'L':0,'M':0}), thres = 0.1, dims='Eke').real.squeeze().plot.line(x='Eke');
ep.util.matEleSelector(BetaNormXS_sph, thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke', linestyle='dashed');
# Check B2 differences
diffVals = ((mTermST_lg/mTermST_lg.sel({'L':0,'M':0})).unstack('LM')/BetaNormXS_sph)
ep.util.matEleSelector(diffVals.sel({'L':2}), thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke');
# May just be pi factors issue?
# Close to pi/3, possible missing renorm + degen factor?
# Hmmm - have energy dependent norm factor. Weird - this is present both with and without SF included, although abs values change.
((mTermST.XS.real)/dataXS[0].sel({'Type':'L', 'XC':'SIGMA', 'Sym':'All'})) #.real.plot.line(x='Eke')
mTermST_sph
"""
Explanation: Test vs. AF code
Should be identical for unaligned case.
06/07/20 - quick testing, things are CLOSE to reference results above, but there still seems to be inconsistencies somewhere, possibly in renorm only?
End of explanation
"""
|
rbiswas4/simlib | example/ExploringOpSimOutputs.ipynb | mit | import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
# Required packages sqlachemy, pandas (both are part of anaconda distribution, or can be installed with a python installer)
# One step requires the LSST stack, can be skipped for a particular OPSIM database in question
import opsimsummary as oss
import opsimsummary.summarize_opsim as so
from sqlalchemy import create_engine
import pandas as pd
print so.__file__
# This step requires LSST SIMS package MAF. The main goal of this step is to set DD and WFD to integer keys that
# label an observation as Deep Drilling or for Wide Fast Deep.
# If you want to skip this step, you can use the next cell by uncommenting it, and commenting out this cell, if all you
# care about is the database used in this example. But there is no guarantee that the numbers in the cell below will work
# on other versions of opsim database outputs
from lsst.sims.maf import db
from lsst.sims.maf.utils import opsimUtils
# DD = 56
# WFD = 54
"""
Explanation: This is a notebook to explore opSim outputs in different ways, mostly useful to supernova analysis. We will look at the opsim output called (minion_1016)[https://confluence.lsstcorp.org/display/SIM/Summary+Table+Column+Descriptions].
Other relevant OpSim outputs are listed on a confluence page. All of the newer outputs are in the format of sqlite databases (zipped up to save space), while older OpSim versions are in ASCII files. The database table which I think has all of the information we will need is the Summary Table. The quantities in the columns of this table are described (here)[https://confluence.lsstcorp.org/display/SIM/Summary+Table+Column+Descriptions]. The OpSim outputs are official products that the LSST project provides. This notebook will demonstrate a way of exploring these outputs (which is by no means official or project supplied).
Note: The Summary Table is only about a GB in size, and has ~ million rows. For newish laptops, this is a small table and can be easily read into memory. For more constrained memory systems or computations that require a lot of memory, this may be a bad thing to do.
Gotcha: The column obsHistID is unique identifier of OpSim observations. Very often, you might end up with a table with multiple rows with the same obsHistID, but with other columns (like propID) having different values. These are not different observations, and a SN light curve corresponding to these observations should include only one of these.
A bit more information about the columns from a SN perspective
The LSST observations are over roughly half of the sky (~20000 sq degrees, or ~2\pi). This is covered by
overlapping pointings of the telescope each covering (~10 sq degrees). These pointings are currently thought of as being located on a grid along with dithers. The grid locations of the pointings are assigned a unique integer called fieldID which is associated with its location :(fieldRa, fieldDec ). The actual location of the pointings (including dithers) are in (ditheredRA, ditheredDec). There are a number of columns that hold quantities of similar information, and the exact definitions of each quantity is provided in the description. For most purposes filtSkyBrightness and FWHMeff rather than the other similar looking quantities are probably recommended. PropID (also described there) refers to proposals under which the observation was taken. For the basic SN purposes, we will want the proposals (Wide Fast Deep (WFD)) and (Deep Drilling Fields (DDF)). The propID for these quantities differs from one opsim output to another. To find out what these are, you can use OpSim utils as in this notebook or look at the PROPOSAL table (5 lines) like this : and pick the integers corresponding to DDcosmology1.conf and Universal ...
SELECT * FROM PROPOSAL;
362|../conf/survey/GalacticPlaneProp.conf|WL|27692624|enigma|1189
363|../conf/survey/SouthCelestialPole-18.conf|WL|27692368|enigma|1189
364|../conf/survey/Universal-18-0824B.conf|WLTSS|27692112|enigma|1189
365|../conf/survey/NorthEclipticSpur-18c.conf|WLTSS|27692240|enigma|1189
366|../conf/survey/DDcosmology1.conf|WLTSS|29065424|enigma|1189
End of explanation
"""
# Change dbname to point at your own location of the opsim output
dbname = '/Users/rbiswas/data/LSST/OpSimData/minion_1016_sqlite.db'
opsdb = db.OpsimDatabase(dbname)
propID, propTags = opsdb.fetchPropInfo()
DD = propTags['DD'][0]
WFD = propTags['WFD'][0]
print("The propID for the Deep Drilling Field {0:2d}".format(DD))
print("The propID for the Wide Fast Deep Field {0:2d}".format(WFD))
"""
Explanation: Read in OpSim output for modern versions: (sqlite formats)
Here we will use the opsim output minion_1016
I have downloaded this database, unzipped and use the variable dbname to point to its location
End of explanation
"""
engine = create_engine('sqlite:///' + dbname)
"""
Explanation: Read in the OpSim DataBase into a pandas dataFrame
Here we will read the opsim database into a pandas.DataFrame
End of explanation
"""
# Load to a dataframe
# Summary = pd.read_hdf('storage.h5', 'table') # This loads the table from a hdf file which I store as intermediate result, this is extremely quick
# Summary = pd.read_sql_table('Summary', engine, index_col='obsHistID'), #'loads all of the summary table'
# EnigmaDeep = pd.read_sql_query('SELECT * FROM SUMMARY WHERE PROPID is 366', engine) # loads only the DDF
"""
Explanation: The opsim database is a large file (approx 4.0 GB), but still possible to read into memory on new computers. You usually only need the Summary Table, which is about 900 MB. If you are only interested in the Deep Drilling Fields, you can use the read_sql_query to only select information pertaining to Deep Drilling Observations. This has a memory footprint of about 40 MB.
Obviously, you can reduce this further by narrowing down the columns to those of interest only. For the entire Summary Table, this step takes a few minutes on my computer.
If you are going to do the read from disk step very often, you can further reduce the time used by storing the output on disk as a hdf5 file and reading that into memory
We will look at three different Summaries of OpSim Runs. A summary of the
1. Deep Drilling fields: These are the observations corresponding to propID of the variable DD above, and are restricted to a handful of fields
2. WFD (Main) Survey: These are the observations corresponding to the propID of the variables WFD
3. Combined Survey: These are observations combining DEEP and WFD in the DDF. Note that this leads to duplicate observations which must be subsequently dropped.
End of explanation
"""
OpSim_combined = pd.read_sql_query('SELECT * FROM SUMMARY WHERE PROPID in ({0}, {1})'.format(DD, WFD), engine, index_col='obsHistID')
OpSim_combined.head()
"""
Explanation: If we knew ahead of time the proposal ID, then we could have done this quicker using
End of explanation
"""
len(OpSim_combined) == len(OpSim_combined.index.unique())
"""
Explanation: This could have duplicates unlike in the case of the OpSim Deep. This is because there are now two proposal IDs both of which may correspond to the same observation. We can check that this is indeed the case by:
End of explanation
"""
OpSim_combined.reset_index().drop_duplicates(subset='obsHistID', inplace=True)
OpSim_combined.head()
len(OpSim_combined) == len(OpSim_combined.index.unique())
OpSim_Deep = pd.read_sql_query('SELECT * FROM SUMMARY WHERE PROPID is ' + str(DD), engine, index_col='obsHistID')
"""
Explanation: Dropping the duplicate pointings can be done in the following way. The reset_index() makes 'obsHistID' an ordinary column rather than an index, drop_duplicates drops duplicate rows where
End of explanation
"""
OpSim_combined.propID.unique()
OpSim_Deep.propID.unique()
# Get fieldID closest to ra, dec
fieldIDFromRADec = oss.fieldID(OpSim_combined, np.radians(190.), np.radians(-83.0))
print fieldIDFromRADec
"""
Explanation: We can also sub-select this from the all-encompassing Summay Table. This can be done in two way:
End of explanation
"""
OpSimDeepSummary = so.SummaryOpsim(OpSim_Deep)
OpSimCombinedSummary = so.SummaryOpsim(OpSim_combined)
"""
Explanation: Some properties of the OpSim Outputs
Construct our Summary
End of explanation
"""
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='mollweide');
fig = OpSimDeepSummary.showFields(ax=fig.axes[0], marker='o', s=40)
"""
Explanation: Plot the location of deep fields
End of explanation
"""
DDF_fieldIDs = OpSim_Deep.fieldID.unique()
DDF_fieldIDs
grouped = OpSim_Deep[['night', 'fieldID']].groupby(['night'])
fig, ax = plt.subplots()
grouped.agg({'fieldID': lambda x: x.unique().size}).hist(ax=ax)
fig.savefig('DeepOnly_uniqueDDFFields.png')
"""
Explanation: How often do multiple DDF fields get observed ?
End of explanation
"""
OpSimCombined_DDF_Fields = OpSim_combined.query('fieldID in @DDF_fieldIDs')
len(OpSimCombined_DDF_Fields)
OpSimCombined_DDF_Fields.fieldID.unique()
combinedGrouped = OpSimCombined_DDF_Fields.groupby('night')
fig, ax = plt.subplots()
combinedGrouped.agg({'fieldID': lambda x: x.unique().size}).hist(ax=ax)
fig.savefig('Combined_DDFvisits.png')
"""
Explanation: What about if we count WFD visits to these fields ?
End of explanation
"""
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='mollweide');
opsimFieldSummary = so.SummaryOpsim(OpSim_combined.query('fieldID==347'))
fig = opsimFieldSummary.showFields(ax=fig.axes[0], marker='o', s=40)
"""
Explanation: Example plotting of a single field
End of explanation
"""
OpSimCombinedSummary.showFields(ax=ax, marker='o', color='r', s=8)
fieldIDFromRADec = oss.fieldID(OpSim_Deep, np.radians(53.), np.radians(-28.))
print fieldIDFromRADec
fieldIDFromRADec = oss.fieldID(OpSim_Deep, np.radians(0.), np.radians(-45.))
print fieldIDFromRADec
fieldIDFromRADec = oss.fieldID(OpSim_combined, np.radians(53.), np.radians(-28.))
print fieldIDFromRADec
fieldIDFromRADec = oss.fieldID(OpSim_combined, np.radians(85.7), np.radians(-14.4))
print fieldIDFromRADec
fieldList = OpSimCombinedSummary.fieldIds
"""
Explanation: Plot all fields
End of explanation
"""
OpSim_combined.columns
DDF_fieldIDs
selectedFields = OpSim_combined.query('fieldID in @DDF_fieldIDs')[['expMJD', 'propID', 'filter', 'fieldRA', 'fieldDec', 'fieldID', 'fiveSigmaDepth']]#.head()
selectedFields.head()
selectedFields.to_hdf('DDF_Fields_Info.hdf', 'summaryInfo')
# CHECK
read_table = pd.read_hdf('DDF_Fields_Info.hdf', 'summaryInfo')
read_table.head()
from pandas.util.testing import assert_frame_equal
assert_frame_equal(read_table, selectedFields)
"""
Explanation: Obtain a list of filters, mjds, depths, and field ra, dec for deep fields
End of explanation
"""
firstSeasonDeep = OpSimDeepSummary.cadence_plot(fieldID=1427, observedOnly=False, sql_query='night < 366')
firstSeasonCombined = OpSimCombinedSummary.cadence_plot(fieldID=1427, observedOnly=False, sql_query='night < 366')
firstSeasonCombined[0].savefig('minion_1427.pdf')
firstSeason_main[0].savefig('minion_1430.pdf')
firstSeason = OpSimDeepSummary.cadence_plot(fieldID=744, observedOnly=False, sql_query='night < 732',
nightMin=0, nightMax=732)
tenCadence = OpSimCombinedSummary.cadence_plot(fieldID=fieldList[2000], observedOnly=False, sql_query='night < 3500', nightMax=3500)
"""
Explanation: First Season
We can visualize the cadence during the first season using the cadence plot for a particular field: The following plot shows how many visits we have in different filters on a particular night:
End of explanation
"""
SN = OpSimDeepSummary.cadence_plot(summarydf=OpSimDeepSummary.df, fieldID=1427, #racol='fieldRA', deccol='fieldDec',
observedOnly=False, mjd_center=59640., mjd_range=[-30., 50.])
# ax = plt.gca()
# ax.axvline(49540, color='r', lw=2.)
# ax.xaxis.get_major_formatter().set_useOffset(False)
SN[0].savefig('SN_observaton.pdf')
"""
Explanation: Suppose we have a supernova with a peak around a particular MJD of 49540, and we want to see what the observations happened around it:
End of explanation
"""
SN_matrix.sum(axis=1).sum()
EnigmaDeep.query('fieldID == 744 and expMJD < 49590 and expMJD > 49510').expMJD.size
SN_matrix[SN_matrix > 0.5] = 1
SN_matrix.sum().sum()
len(SN_matrix.sum(axis=1).dropna())
nightlySN_matrix = SN_matrix.copy(deep=True)
nightlySN_matrix[SN_matrix > 0.5] =1
nightlySN_matrix.sum(axis=1).dropna().sum()
nightlySN_matrix.sum(axis=1).dropna().size
nightlySN_matrix.sum(ax)
"""
Explanation: Scratch
End of explanation
"""
|
tensorflow/workshops | extras/tfhub-text/movie-classification.ipynb | apache-2.0 | import os
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
import json
import pickle
import urllib
from sklearn.preprocessing import MultiLabelBinarizer
print(tf.__version__)
"""
Explanation: Building a text classification model with TF Hub
In this notebook, we'll walk you through building a model to predict the genres of a movie given its description. The emphasis here is not on accuracy, but instead how to use TF Hub layers in a text classification model.
To start, import the necessary dependencies for this project.
End of explanation
"""
# Download the data from GCS
!wget 'https://storage.googleapis.com/movies_data/movies_metadata.csv'
"""
Explanation: The dataset
We need a lot of text inputs to train our model. For this model we'll use this awesome movies dataset from Kaggle. To simplify things I've made the movies_metadata.csv file available in a public Cloud Storage bucket so we can download it with wget. I've preprocessed the dataset already to limit the number of genres we'll use for our model, but first let's take a look at the original data so we can see what we're working with.
End of explanation
"""
data = pd.read_csv('movies_metadata.csv')
data.head()
"""
Explanation: Next we'll convert the dataset to a Pandas dataframe and print the first 5 rows. For this model we're only using 2 of these columns: genres and overview.
End of explanation
"""
urllib.request.urlretrieve('https://storage.googleapis.com/bq-imports/descriptions.p', 'descriptions.p')
urllib.request.urlretrieve('https://storage.googleapis.com/bq-imports/genres.p', 'genres.p')
descriptions = pickle.load(open('descriptions.p', 'rb'))
genres = pickle.load(open('genres.p', 'rb'))
"""
Explanation: Preparing the data for our model
I've done some preprocessing to limit the dataset to the top 9 genres, and I've saved the Pandas dataframes as public Pickle files in GCS. Here we download those files. The resulting descriptions and genres variables are Pandas Series containing all descriptions and genres from our dataset respectively.
End of explanation
"""
train_size = int(len(descriptions) * .8)
train_descriptions = descriptions[:train_size].astype('str')
train_genres = genres[:train_size]
test_descriptions = descriptions[train_size:].astype('str')
test_genres = genres[train_size:]
"""
Explanation: Splitting our data
When we train our model, we'll use 80% of the data for training and set aside 20% of the data to evaluate how our model performed.
End of explanation
"""
encoder = MultiLabelBinarizer()
encoder.fit_transform(train_genres)
train_encoded = encoder.transform(train_genres)
test_encoded = encoder.transform(test_genres)
num_classes = len(encoder.classes_)
# Print all possible genres and the labels for the first movie in our training dataset
print(encoder.classes_)
print(train_encoded[0])
"""
Explanation: Formatting our labels
When we train our model we'll provide the labels (in this case genres) associated with each movie. We can't pass the genres in as strings directly, we'll transform them into multi-hot vectors. Since we have 9 genres, we'll have a 9 element vector for each movie with 0s and 1s indicating which genres are present in each description.
End of explanation
"""
description_embeddings = hub.text_embedding_column("descriptions", module_spec="https://tfhub.dev/google/universal-sentence-encoder/2", trainable=False)
"""
Explanation: Create our TF Hub embedding layer
TF Hub provides a library of existing pre-trained model checkpoints for various kinds of models (images, text, and more) In this model we'll use the TF Hub universal-sentence-encoder module for our pre-trained word embeddings. We only need one line of code to instantiate module. When we train our model, it'll convert our array of movie description strings to embeddings. When we train our model, we'll use this as a feature column.
End of explanation
"""
multi_label_head = tf.contrib.estimator.multi_label_head(
num_classes,
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE
)
features = {
"descriptions": np.array(train_descriptions).astype(np.str)
}
labels = np.array(train_encoded).astype(np.int32)
train_input_fn = tf.estimator.inputs.numpy_input_fn(features, labels, shuffle=True, batch_size=32, num_epochs=25)
estimator = tf.contrib.estimator.DNNEstimator(
head=multi_label_head,
hidden_units=[64,10],
feature_columns=[description_embeddings])
"""
Explanation: Instantiating our DNNEstimator Model
The first parameter we pass to our DNNEstimator is called a head, and defines the type of labels our model should expect. Since we want our model to output multiple labels, we’ll use multi_label_head here. Then we'll convert our features and labels to numpy arrays and instantiate our Estimator. batch_size and num_epochs are hyperparameters - you should experiment with different values to see what works best on your dataset.
End of explanation
"""
estimator.train(input_fn=train_input_fn)
# Define our eval input_fn and run eval
eval_input_fn = tf.estimator.inputs.numpy_input_fn({"descriptions": np.array(test_descriptions).astype(np.str)}, test_encoded.astype(np.int32), shuffle=False)
estimator.evaluate(input_fn=eval_input_fn)
"""
Explanation: Training and serving our model
To train our model, we simply call train() passing it the input function we defined above. Once our model is trained, we'll define an evaluation input function similar to the one above and call evaluate(). When this completes we'll get a few metrics we can use to evaluate our model's accuracy.
End of explanation
"""
# Test our model on some raw description data
raw_test = [
"An examination of our dietary choices and the food we put in our bodies. Based on Jonathan Safran Foer's memoir.", # Documentary
"After escaping an attack by what he claims was a 70-foot shark, Jonas Taylor must confront his fears to save those trapped in a sunken submersible.", # Action, Adventure
"A teenager tries to survive the last week of her disastrous eighth-grade year before leaving to start high school.", # Comedy
]
# Generate predictions
predict_input_fn = tf.estimator.inputs.numpy_input_fn({"descriptions": np.array(raw_test).astype(np.str)}, shuffle=False)
results = estimator.predict(predict_input_fn)
# Display predictions
for movie_genres in results:
top_2 = movie_genres['probabilities'].argsort()[-2:][::-1]
for genre in top_2:
text_genre = encoder.classes_[genre]
print(text_genre + ': ' + str(round(movie_genres['probabilities'][genre] * 100, 2)) + '%')
print('')
"""
Explanation: Generating predictions on new data
Now for the most fun part! Let's generate predictions on movie descriptions our model hasn't seen before. We'll define an array of 3 new description strings (the comments indicate the correct genres) and create a predict_input_fn. Then we'll display the top 2 genres along with their confidence percentages for each of the 3 movies.
End of explanation
"""
|
axbaretto/beam | examples/notebooks/documentation/transforms/python/element-wise/flatmap-py.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License")
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Explanation: <a href="https://colab.research.google.com/github/apache/beam/blob/master//Users/dcavazos/src/beam/examples/notebooks/documentation/transforms/python/element-wise/flatmap-py.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"/></a>
<table align="left"><td><a target="_blank" href="https://beam.apache.org/documentation/transforms/python/elementwise/flatmap"><img src="https://beam.apache.org/images/logos/full-color/name-bottom/beam-logo-full-color-name-bottom-100.png" width="32" height="32" />View the docs</a></td></table>
End of explanation
"""
!pip install --quiet -U apache-beam
"""
Explanation: FlatMap
<script type="text/javascript">
localStorage.setItem('language', 'language-py')
</script>
<table align="left" style="margin-right:1em">
<td>
<a class="button" target="_blank" href="https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.core.html#apache_beam.transforms.core.FlatMap"><img src="https://beam.apache.org/images/logos/sdks/python.png" width="32px" height="32px" alt="Pydoc"/> Pydoc</a>
</td>
</table>
<br/><br/><br/>
Applies a simple 1-to-many mapping function over each element in the collection.
The many elements are flattened into the resulting collection.
Setup
To run a code cell, you can click the Run cell button at the top left of the cell,
or select it and press Shift+Enter.
Try modifying a code cell and re-running it to see what happens.
To learn more about Colab, see
Welcome to Colaboratory!.
First, let's install the apache-beam module.
End of explanation
"""
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry 🥕Carrot 🍆Eggplant',
'🍅Tomato 🥔Potato',
])
| 'Split words' >> beam.FlatMap(str.split)
| beam.Map(print)
)
"""
Explanation: Examples
In the following examples, we create a pipeline with a PCollection of produce with their icon, name, and duration.
Then, we apply FlatMap in multiple ways to yield zero or more elements per each input element into the resulting PCollection.
FlatMap accepts a function that returns an iterable,
where each of the output iterable's elements is an element of the resulting PCollection.
Example 1: FlatMap with a predefined function
We use the function str.split which takes a single str element and outputs a list of strs.
This pipeline splits the input element using whitespaces, creating a list of zero or more elements.
End of explanation
"""
import apache_beam as beam
def split_words(text):
return text.split(',')
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry,🥕Carrot,🍆Eggplant',
'🍅Tomato,🥔Potato',
])
| 'Split words' >> beam.FlatMap(split_words)
| beam.Map(print)
)
"""
Explanation: <table align="left" style="margin-right:1em">
<td>
<a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> View source code</a>
</td>
</table>
<br/><br/><br/>
Example 2: FlatMap with a function
We define a function split_words which splits an input str element using the delimiter ',' and outputs a list of strs.
End of explanation
"""
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
['🍓Strawberry', '🥕Carrot', '🍆Eggplant'],
['🍅Tomato', '🥔Potato'],
])
| 'Flatten lists' >> beam.FlatMap(lambda elements: elements)
| beam.Map(print)
)
"""
Explanation: <table align="left" style="margin-right:1em">
<td>
<a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> View source code</a>
</td>
</table>
<br/><br/><br/>
Example 3: FlatMap with a lambda function
For this example, we want to flatten a PCollection of lists of strs into a PCollection of strs.
Each input element is already an iterable, where each element is what we want in the resulting PCollection.
We use a lambda function that returns the same input element it received.
End of explanation
"""
import apache_beam as beam
def generate_elements(elements):
for element in elements:
yield element
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
['🍓Strawberry', '🥕Carrot', '🍆Eggplant'],
['🍅Tomato', '🥔Potato'],
])
| 'Flatten lists' >> beam.FlatMap(generate_elements)
| beam.Map(print)
)
"""
Explanation: <table align="left" style="margin-right:1em">
<td>
<a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> View source code</a>
</td>
</table>
<br/><br/><br/>
Example 4: FlatMap with a generator
For this example, we want to flatten a PCollection of lists of strs into a PCollection of strs.
We use a generator to iterate over the input list and yield each of the elements.
Each yielded result in the generator is an element in the resulting PCollection.
End of explanation
"""
import apache_beam as beam
def format_plant(icon, plant):
if icon:
yield '{}{}'.format(icon, plant)
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
('🍓', 'Strawberry'),
('🥕', 'Carrot'),
('🍆', 'Eggplant'),
('🍅', 'Tomato'),
('🥔', 'Potato'),
(None, 'Invalid'),
])
| 'Format' >> beam.FlatMapTuple(format_plant)
| beam.Map(print)
)
"""
Explanation: <table align="left" style="margin-right:1em">
<td>
<a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> View source code</a>
</td>
</table>
<br/><br/><br/>
Example 5: FlatMapTuple for key-value pairs
If your PCollection consists of (key, value) pairs,
you can use FlatMapTuple to unpack them into different function arguments.
End of explanation
"""
import apache_beam as beam
def split_words(text, delimiter=None):
return text.split(delimiter)
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry,🥕Carrot,🍆Eggplant',
'🍅Tomato,🥔Potato',
])
| 'Split words' >> beam.FlatMap(split_words, delimiter=',')
| beam.Map(print)
)
"""
Explanation: <table align="left" style="margin-right:1em">
<td>
<a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> View source code</a>
</td>
</table>
<br/><br/><br/>
Example 6: FlatMap with multiple arguments
You can pass functions with multiple arguments to FlatMap.
They are passed as additional positional arguments or keyword arguments to the function.
In this example, split_words takes text and delimiter as arguments.
End of explanation
"""
import apache_beam as beam
with beam.Pipeline() as pipeline:
delimiter = pipeline | 'Create delimiter' >> beam.Create([','])
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry,🥕Carrot,🍆Eggplant',
'🍅Tomato,🥔Potato',
])
| 'Split words' >> beam.FlatMap(
lambda text, delimiter: text.split(delimiter),
delimiter=beam.pvalue.AsSingleton(delimiter),
)
| beam.Map(print)
)
"""
Explanation: <table align="left" style="margin-right:1em">
<td>
<a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> View source code</a>
</td>
</table>
<br/><br/><br/>
Example 7: FlatMap with side inputs as singletons
If the PCollection has a single value, such as the average from another computation,
passing the PCollection as a singleton accesses that value.
In this example, we pass a PCollection the value ',' as a singleton.
We then use that value as the delimiter for the str.split method.
End of explanation
"""
import apache_beam as beam
def normalize_and_validate_durations(plant, valid_durations):
plant['duration'] = plant['duration'].lower()
if plant['duration'] in valid_durations:
yield plant
with beam.Pipeline() as pipeline:
valid_durations = pipeline | 'Valid durations' >> beam.Create([
'annual',
'biennial',
'perennial',
])
valid_plants = (
pipeline
| 'Gardening plants' >> beam.Create([
{'icon': '🍓', 'name': 'Strawberry', 'duration': 'Perennial'},
{'icon': '🥕', 'name': 'Carrot', 'duration': 'BIENNIAL'},
{'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'},
{'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'},
{'icon': '🥔', 'name': 'Potato', 'duration': 'unknown'},
])
| 'Normalize and validate durations' >> beam.FlatMap(
normalize_and_validate_durations,
valid_durations=beam.pvalue.AsIter(valid_durations),
)
| beam.Map(print)
)
"""
Explanation: <table align="left" style="margin-right:1em">
<td>
<a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> View source code</a>
</td>
</table>
<br/><br/><br/>
Example 8: FlatMap with side inputs as iterators
If the PCollection has multiple values, pass the PCollection as an iterator.
This accesses elements lazily as they are needed,
so it is possible to iterate over large PCollections that won't fit into memory.
End of explanation
"""
import apache_beam as beam
def replace_duration_if_valid(plant, durations):
if plant['duration'] in durations:
plant['duration'] = durations[plant['duration']]
yield plant
with beam.Pipeline() as pipeline:
durations = pipeline | 'Durations dict' >> beam.Create([
(0, 'annual'),
(1, 'biennial'),
(2, 'perennial'),
])
valid_plants = (
pipeline
| 'Gardening plants' >> beam.Create([
{'icon': '🍓', 'name': 'Strawberry', 'duration': 2},
{'icon': '🥕', 'name': 'Carrot', 'duration': 1},
{'icon': '🍆', 'name': 'Eggplant', 'duration': 2},
{'icon': '🍅', 'name': 'Tomato', 'duration': 0},
{'icon': '🥔', 'name': 'Potato', 'duration': -1},
])
| 'Replace duration if valid' >> beam.FlatMap(
replace_duration_if_valid,
durations=beam.pvalue.AsDict(durations),
)
| beam.Map(print)
)
"""
Explanation: <table align="left" style="margin-right:1em">
<td>
<a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/element_wise/flat_map.py"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> View source code</a>
</td>
</table>
<br/><br/><br/>
Note: You can pass the PCollection as a list with beam.pvalue.AsList(pcollection),
but this requires that all the elements fit into memory.
Example 9: FlatMap with side inputs as dictionaries
If a PCollection is small enough to fit into memory, then that PCollection can be passed as a dictionary.
Each element must be a (key, value) pair.
Note that all the elements of the PCollection must fit into memory for this.
If the PCollection won't fit into memory, use beam.pvalue.AsIter(pcollection) instead.
End of explanation
"""
|
maartenbreddels/vaex | docs/source/example_ml_titanic.ipynb | mit | import vaex
import vaex.ml
import numpy as np
import pylab as plt
"""
Explanation: <style>
pre {
white-space: pre-wrap !important;
}
.table-striped > tbody > tr:nth-of-type(odd) {
background-color: #f9f9f9;
}
.table-striped > tbody > tr:nth-of-type(even) {
background-color: white;
}
.table-striped td, .table-striped th, .table-striped tr {
border: 1px solid black;
border-collapse: collapse;
margin: 1em 2em;
}
.rendered_html td, .rendered_html th {
text-align: left;
vertical-align: middle;
padding: 4px;
}
</style>
Machine Learning (advanced): the Titanic dataset
If you want to try out this notebook with a live Python kernel, use mybinder:
<a class="reference external image-reference" href="https://mybinder.org/v2/gh/vaexio/vaex/latest?filepath=docs%2Fsource%2Fexample_ml_titanic.ipynb"><img alt="https://mybinder.org/badge_logo.svg" src="https://mybinder.org/badge_logo.svg" width="150px"></a>
In the following is a more involved machine learning example, in which we will use a larger variety of method in veax to do data cleaning, feature engineering, pre-processing and finally to train a couple of models. To do this, we will use the well known Titanic dataset. Our task is to predict which passengers are more likely to have survived the disaster.
Before we begin, thare there are two important notes to consider:
- The following example is not to provide a competitive score for any competitions that might use the Titanic dataset. It's primary goal is to show how various methods provided by vaex and vaex.ml can be used to clean data, create new features, and do general data manipulations in a machine learning context.
- While the Titanic dataset is rather small in side, all the methods and operations presented in the solution below will work on a dataset of arbitrary size, as long as it fits on the hard-drive of your machine.
Now, with that out of the way, let's get started!
End of explanation
"""
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
"""
Explanation: Adjusting matplotlib parmeters
Intermezzo: we modify some of the matplotlib default settings, just to make the plots a bit more legible.
End of explanation
"""
# Load the titanic dataset
df = vaex.ml.datasets.load_titanic()
# See the description
df.info()
"""
Explanation: Get the data
First of all we need to read in the data. Since the Titanic dataset is quite well known for trying out different classification algorithms, as well as commonly used as a teaching tool for aspiring data scientists, it ships (no pun intended) together with vaex.ml. So let's read it in, see the description of its contents, and get a preview of the data.
End of explanation
"""
# The dataset is ordered, so let's shuffle it
df = df.sample(frac=1, random_state=31)
"""
Explanation: Shuffling
From the preview of the DataFrame we notice that the data is sorted alphabetically by name and by passenger class.
Thus we need to shuffle it before we split it into train and test sets.
End of explanation
"""
# Train and test split, no shuffling occurs
df_train, df_test = df.ml.train_test_split(test_size=0.2, verbose=False)
"""
Explanation: Shuffling for large datasets
As mentioned in The ML introduction tutorial, shuffling large datasets in-memory is not a good idea. In case you work with a large dataset, consider shuffling while exporting:
df.export("shuffled", shuffle=True)
df = vaex.open("shuffled.hdf5)
df_train, df_test = df.ml.train_test_split(test_size=0.2)
Split into train and test
Once the data is shuffled, let's split it into train and test sets. The test set will comprise 20% of the data. Note that we do not shuffle the data for you, since vaex cannot assume your data fits into memory, you are responsible for either writing it in shuffled order on disk, or shuffle it in memory (the previous step).
End of explanation
"""
# Inspect the target variable
train_survived_value_counts = df_train.survived.value_counts()
test_survived_value_counts = df_test.survived.value_counts()
plt.figure(figsize=(12, 4))
plt.subplot(121)
train_survived_value_counts.plot.bar()
train_sex_ratio = train_survived_value_counts[True]/train_survived_value_counts[False]
plt.title(f'Train set: survivied ratio: {train_sex_ratio:.2f}')
plt.ylabel('Number of passengers')
plt.subplot(122)
test_survived_value_counts.plot.bar()
test_sex_ratio = test_survived_value_counts[True]/test_survived_value_counts[False]
plt.title(f'Test set: surived ratio: {test_sex_ratio:.2f}')
plt.tight_layout()
plt.show()
"""
Explanation: Sanity checks
Before we move on to process the data, let's verify that our train and test sets are "similar" enough. We will not be very rigorous here, but just look at basic statistics of some of the key features.
For starters, let's check that the fraction of survivals is similar between the train and test sets.
End of explanation
"""
# Check the sex balance
train_sex_value_counts = df_train.sex.value_counts()
test_sex_value_counts = df_test.sex.value_counts()
plt.figure(figsize=(12, 4))
plt.subplot(121)
train_sex_value_counts.plot.bar()
train_sex_ratio = train_sex_value_counts['male']/train_sex_value_counts['female']
plt.title(f'Train set: male vs female ratio: {train_sex_ratio:.2f}')
plt.ylabel('Number of passengers')
plt.subplot(122)
test_sex_value_counts.plot.bar()
test_sex_ratio = test_sex_value_counts['male']/test_sex_value_counts['female']
plt.title(f'Test set: male vs female ratio: {test_sex_ratio:.2f}')
plt.tight_layout()
plt.show()
"""
Explanation: Next up, let's check whether the ratio of male to female passengers is not too dissimilar between the two sets.
End of explanation
"""
# Check the class balance
train_pclass_value_counts = df_train.pclass.value_counts()
test_pclass_value_counts = df_test.pclass.value_counts()
plt.figure(figsize=(12, 4))
plt.subplot(121)
plt.title('Train set: passenger class')
train_pclass_value_counts.plot.bar()
plt.subplot(122)
plt.title('Test set: passenger class')
test_pclass_value_counts.plot.bar()
plt.tight_layout()
plt.show()
"""
Explanation: Finally, lets check that the relative number of passenger per class is similar between the train and test sets.
End of explanation
"""
df_train.describe()
"""
Explanation: From the above diagnostics, we are satisfied that, at least in these few categories, the train and test are similar enough, and we can move forward.
Feature engineering
In this section we will use vaex to create meaningful features that will be used to train a classification model. To start with, let's get a high level overview of the training data.
End of explanation
"""
# Handle missing values
# Age - just do the mean of the training set for now
median_age = df_train.percentile_approx(expression='age', percentage=50.0)
df_train['age'] = df_train.age.fillna(value=median_age)
# Fare: the mean of the 5 most common ticket prices.
fill_fares = df_train.fare.value_counts(dropna=True)
fill_fare = fill_fares.iloc[:5].index.values.mean()
df_train['fare'] = df_train.fare.fillna(value=fill_fare)
# Cabing: this is a string column so let's mark it as "M" for "Missing"
df_train['cabin'] = df_train.cabin.fillna(value='M')
# Embarked: Similar as for Cabin, let's mark the missing values with "U" for unknown
fill_embarked = df_train.embarked.value_counts(dropna=True).index[0]
df_train['embarked'] = df_train.embarked.fillna(value=fill_embarked)
"""
Explanation: Imputing
We notice that there are 3 columns that have missing data, so our first task will be to impute the missing values with suitable substitutes. This is our strategy:
age: impute with the median age value
fare: impute with the mean fare of the 5 most common values.
cabin: impute with "M" for "Missing"
Embarked: Impute with with the most common value.
End of explanation
"""
# Engineer features from the names
# Titles
df_train['name_title'] = df_train['name'].str.replace('.* ([A-Z][a-z]+)\..*', "\\1", regex=True)
display(df_train['name_title'])
# Number of words in the name
df_train['name_num_words'] = df_train['name'].str.count("[ ]+", regex=True) + 1
display(df_train['name_num_words'])
"""
Explanation: String processing
Next up, let's engineer some new, more meaningful features out of the "raw" data that is present in the dataset.
Starting with the name of the passengers, we are going to extract the titles, as well as we are going to count the number of words a name contains. These features can be a loose proxy to the age and status of the passengers.
End of explanation
"""
# Extract the deck
df_train['deck'] = df_train.cabin.str.slice(start=0, stop=1)
display(df_train['deck'])
# Passengers under which name have several rooms booked, these are all for 1st class passengers
df_train['multi_cabin'] = ((df_train.cabin.str.count(pat='[A-Z]', regex=True) > 1) &\
~(df_train.deck == 'F')).astype('int')
display(df_train['multi_cabin'])
# Out of these, cabin has the most missing values, so let's create a feature tracking if a passenger had a cabin
df_train['has_cabin'] = df_train.cabin.notna().astype('int')
display(df_train['has_cabin'])
"""
Explanation: From the cabin colum, we will engineer 3 features:
- "deck": extacting the deck on which the cabin is located, which is encoded in each cabin value;
- "multi_cabin: a boolean feature indicating whether a passenger is allocated more than one cabin
- "has_cabin": since there were plenty of values in the original cabin column that had missing values, we are just going to build a feature which tells us whether a passenger had an assigned cabin or not.
End of explanation
"""
# Size of family that are on board: passenger + number of siblings, spouses, parents, children.
df_train['family_size'] = (df_train.sibsp + df_train.parch + 1)
display(df_train['family_size'])
# Whether or not a passenger is alone
df_train['is_alone'] = (df_train.family_size == 0).astype('int')
display(df_train['is_alone'])
"""
Explanation: More features
There are two features that give an indication whether a passenger is travelling alone, or with a famly.
These are the "sibsp" and "parch" columns that tell us the number of siblinds or spouses and the number of parents or children each passenger has on-board respectively. We are going to use this information to build two columns:
- "family_size" the size of the family of each passenger;
- "is_alone" an additional boolean feature which indicates whether a passenger is traveling without their family.
End of explanation
"""
# Create new features
df_train['age_times_class'] = df_train.age * df_train.pclass
# fare per person in the family
df_train['fare_per_family_member'] = df_train.fare / df_train.family_size
"""
Explanation: Finally, let's create two new features:
- age $\times$ class
- fare per family member, i.e. fare $/$ family_size
End of explanation
"""
label_encoder = vaex.ml.LabelEncoder(features=['sex', 'embarked', 'deck'], allow_unseen=True)
df_train = label_encoder.fit_transform(df_train)
# While doing a transform, previously unseen values will be encoded as "zero".
frequency_encoder = vaex.ml.FrequencyEncoder(features=['name_title'], unseen='zero')
df_train = frequency_encoder.fit_transform(df_train)
df_train
"""
Explanation: Modeling (part 1): gradient boosted trees
Since this dataset contains a lot of categorical features, we will start with a tree based model. This we will gear the following feature pre-processing towards the use of tree-based models.
Feature pre-processing for boosted tree models
The features "sex", "embarked", and "deck" can be simply label encoded. The feature "name_tite" contains certain a larger degree of cardinality, relative to the size of the training set, and in this case we will use the Frequency Encoder.
End of explanation
"""
# features to use for the trainin of the boosting model
encoded_features = df_train.get_column_names(regex='^freque|^label')
features = encoded_features + ['multi_cabin', 'name_num_words',
'has_cabin', 'is_alone',
'family_size', 'age_times_class',
'fare_per_family_member',
'age', 'fare']
# Preview the feature matrix
df_train[features].head(5)
"""
Explanation: Once all the categorical data is encoded, we can select the features we are going to use for training the model.
End of explanation
"""
import xgboost
import vaex.ml.sklearn
# Instantiate the xgboost model normally, using the scikit-learn API
xgb_model = xgboost.sklearn.XGBClassifier(max_depth=11,
learning_rate=0.1,
n_estimators=500,
subsample=0.75,
colsample_bylevel=1,
colsample_bytree=1,
scale_pos_weight=1.5,
reg_lambda=1.5,
reg_alpha=5,
n_jobs=-1,
random_state=42,
verbosity=0)
# Make it work with vaex (for the automagic pipeline and lazy predictions)
vaex_xgb_model = vaex.ml.sklearn.Predictor(features=features,
target='survived',
model=xgb_model,
prediction_name='prediction_xgb')
# Train the model
vaex_xgb_model.fit(df_train)
# Get the prediction of the model on the training data
df_train = vaex_xgb_model.transform(df_train)
# Preview the resulting train dataframe that contans the predictions
df_train
"""
Explanation: Estimator: xgboost
Now let's feed this data into an a tree based estimator. In this example we will use xgboost. In principle, any algorithm that follows the scikit-learn API convention, i.e. it contains the .fit, .predict methods is compatable with vaex. However, the data will be materialized, i.e. will be read into memory before it is passed on to the estimators. We are hard at work trying to make at least some of the estimators from scikit-learn run out-of-core!
End of explanation
"""
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
def binary_metrics(y_true, y_pred):
acc = accuracy_score(y_true=y_true, y_pred=y_pred)
f1 = f1_score(y_true=y_true, y_pred=y_pred)
roc = roc_auc_score(y_true=y_true, y_score=y_pred)
print(f'Accuracy: {acc:.3f}')
print(f'f1 score: {f1:.3f}')
print(f'roc-auc: {roc:.3f}')
"""
Explanation: Notice that in the above cell block, we call .transform on the vaex_xgb_model object. This adds the "prediction_xgb" column as virtual column in the output dataframe. This can be quite convenient when calculating various metrics and making diagnosic plots. Of course, one can call a .predict on the vaex_xgb_model object, which returns an in-memory numpy array object housing the predictions.
Performance on training set
Anyway, let's see what the performance is of the model on the training set. First let's create a convenience function that will help us get multiple metrics at once.
End of explanation
"""
print('Metrics for the training set:')
binary_metrics(y_true=df_train.survived.values, y_pred=df_train.prediction_xgb.values)
"""
Explanation: Now let's check the performance of the model on the training set.
End of explanation
"""
# state transfer to the test set
state = df_train.state_get()
df_test.state_set(state)
# Preview of the "transformed" test set
df_test.head(5)
"""
Explanation: Automatic pipelines
Now, let's inspect the performance of the model on the test set. You probably noticed that, unlike when using other libraries, we did not bother to create a pipeline while doing all the cleaning, inputing, feature engineering and categorial encoding. Well, we did not explicitly create a pipeline. In fact veax keeps track of all the changes one applies to a DataFrame in something called a state. A state is the place which contains all the informations regarding, for instance, the virtual columns we've created, which includes the newly engineered features, the categorically encoded columns, and even the model prediction! So all we need to do, is to extract the state from the training DataFrame, and apply it to the test DataFrame.
End of explanation
"""
print('Metrics for the test set:')
binary_metrics(y_true=df_test.survived.values, y_pred=df_test.prediction_xgb.values)
"""
Explanation: Notice that once we apply the state from the train to the test set, the test DataFrame contains all the features we created or modified in the training data, and even the predictions of the xgboost model!
The state is a simple Python dictionary, which can be easily stored as JSON to disk, which makes it very easy to deploy.
Performance on test set
Now it is trivial to check the model performance on the test set:
End of explanation
"""
plt.figure(figsize=(6, 9))
ind = np.argsort(xgb_model.feature_importances_)[::-1]
features_sorted = np.array(features)[ind]
importances_sorted = xgb_model.feature_importances_[ind]
plt.barh(y=range(len(features)), width=importances_sorted, height=0.2)
plt.title('Gain')
plt.yticks(ticks=range(len(features)), labels=features_sorted)
plt.gca().invert_yaxis()
plt.show()
"""
Explanation: Feature importance
Let's now look at the feature importance of the xgboost model.
End of explanation
"""
# One-hot encode categorical features
one_hot = vaex.ml.OneHotEncoder(features=['deck', 'family_size', 'name_title'])
df_train = one_hot.fit_transform(df_train)
# Standard scale numerical features
standard_scaler = vaex.ml.StandardScaler(features=['age', 'fare', 'fare_per_family_member'])
df_train = standard_scaler.fit_transform(df_train)
# Get the features for training a linear model
features_linear = df_train.get_column_names(regex='^deck_|^family_size_|^frequency_encoded_name_title_')
features_linear += df_train.get_column_names(regex='^standard_scaled_')
features_linear += ['label_encoded_sex']
features_linear
"""
Explanation: Modeling (part 2): Linear models & Ensembles
Given the randomness of the Titanic dataset , we can be satisfied with the performance of xgboost model above. Still, it is always usefull to try a variety of models and approaches, especially since vaex makes makes this process rather simple.
In the following part we will use a couple of linear models as our predictors, this time straight from scikit-learn. This requires us to pre-process the data in a slightly different way.
Feature pre-processing for linear models
When using linear models, the safest option is to encode categorical variables with the one-hot encoding scheme, especially if they have low cardinality. We will do this for the "family_size" and "deck" features. Note that the "sex" feature is already encoded since it has only unique values options.
The "name_title" feature is a bit more tricky. Since in its original form it has some values that only appear a couple of times, we will do a trick: we will one-hot encode the frequency encoded values. This will reduce cardinality of the feature, while also preserving the most important, i.e. most common values.
Regarding the "age" and "fare", to add some variance in the model, we will not convert them to categorical as before, but simply remove their mean and standard-deviations (standard-scaling). We will do the same to the "fare_per_family_member" feature.
Finally, we will drop out any other features.
End of explanation
"""
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
# The Support Vector Classifier
vaex_svc = vaex.ml.sklearn.Predictor(features=features_linear,
target='survived',
model=SVC(max_iter=1000, random_state=42),
prediction_name='prediction_svc')
# Logistic Regression
vaex_logistic = vaex.ml.sklearn.Predictor(features=features_linear,
target='survived',
model=LogisticRegression(max_iter=1000, random_state=42),
prediction_name='prediction_lr')
# Train the new models and apply the transformation to the train dataframe
for model in [vaex_svc, vaex_logistic]:
model.fit(df_train)
df_train = model.transform(df_train)
# Preview of the train DataFrame
df_train.head(5)
"""
Explanation: Estimators: SVC and LogisticRegression
End of explanation
"""
# Weighed mean of the classes
prediction_final = (df_train.prediction_xgb.astype('int') * 0.3 +
df_train.prediction_svc.astype('int') * 0.5 +
df_train.prediction_xgb.astype('int') * 0.2)
# Get the predicted class
prediction_final = (prediction_final >= 0.5)
# Add the expression to the train DataFrame
df_train['prediction_final'] = prediction_final
# Preview
df_train[df_train.get_column_names(regex='^predict')]
"""
Explanation: Ensemble
Just as before, the predictions from the SVC and the LogisticRegression classifiers are added as virtual columns in the training dataset. This is quite powerful, since now we can easily use them to create an ensemble! For example, let's do a weighted mean.
End of explanation
"""
# State transfer
state_new = df_train.state_get()
df_test.state_set(state_new)
# Preview
df_test.head(5)
"""
Explanation: Performance (part 2)
Applying the ensembler to the test set is just as easy as before. We just need to get the new state of the training DataFrame, and transfer it to the test DataFrame.
End of explanation
"""
pred_columns = df_train.get_column_names(regex='^prediction_')
for i in pred_columns:
print(i)
binary_metrics(y_true=df_test.survived.values, y_pred=df_test[i].values)
print(' ')
"""
Explanation: Finally, let's check the performance of all the individual models as well as on the ensembler, on the test set.
End of explanation
"""
|
kubeflow/kfp-tekton-backend | samples/contrib/arena-samples/standalonejob/standalone_pipeline.ipynb | apache-2.0 | ! arena data list
"""
Explanation: Arena Kubeflow Pipeline Notebook demo
Prepare data volume
You should prepare data volume user-susan by following docs.
And run arena data list to check if it's created.
End of explanation
"""
KFP_SERVICE="ml-pipeline.kubeflow.svc.cluster.local:8888"
KFP_PACKAGE = 'http://kubeflow.oss-cn-beijing.aliyuncs.com/kfp/0.1.14/kfp.tar.gz'
KFP_ARENA_PACKAGE = 'http://kubeflow.oss-cn-beijing.aliyuncs.com/kfp-arena/kfp-arena-0.3.tar.gz'
KUBEFLOW_PIPELINE_LINK = ''
MOUNT="['user-susan:/training']"
GPUs=1
"""
Explanation: Define the necessary environment variables and install the KubeFlow Pipeline SDK
We assume this notebook kernel has access to Python's site-packages and is in Python3.
Please fill in the below environment variables with you own settings.
KFP_PACKAGE: The latest release of kubeflow pipeline platform library.
KUBEFLOW_PIPELINE_LINK: The link to access the KubeFlow pipeline API.
MOUNT: The mount configuration to map data above into the training job. The format is 'data:/directory'
GPUs: The number of the GPUs for training.
End of explanation
"""
!pip3 install $KFP_PACKAGE --upgrade
"""
Explanation: Install the necessary python packages
Note: Please change pip3 to the package manager that's used for this Notebook Kernel.
End of explanation
"""
!pip3 install $KFP_ARENA_PACKAGE --upgrade
"""
Explanation: Note: Install arena's python package
End of explanation
"""
import arena
import kfp.dsl as dsl
@dsl.pipeline(
name='pipeline to run jobs',
description='shows how to run pipeline jobs.'
)
def sample_pipeline(learning_rate='0.01',
dropout='0.9',
model_version='1'):
"""A pipeline for end to end machine learning workflow."""
# 1. prepare data
prepare_data = arena.StandaloneOp(
name="prepare-data",
image="byrnedo/alpine-curl",
data=MOUNT,
command="mkdir -p /training/dataset/mnist && \
cd /training/dataset/mnist && \
curl -O https://code.aliyun.com/xiaozhou/tensorflow-sample-code/raw/master/data/t10k-images-idx3-ubyte.gz && \
curl -O https://code.aliyun.com/xiaozhou/tensorflow-sample-code/raw/master/data/t10k-labels-idx1-ubyte.gz && \
curl -O https://code.aliyun.com/xiaozhou/tensorflow-sample-code/raw/master/data/train-images-idx3-ubyte.gz && \
curl -O https://code.aliyun.com/xiaozhou/tensorflow-sample-code/raw/master/data/train-labels-idx1-ubyte.gz")
# 2. prepare source code
prepare_code = arena.StandaloneOp(
name="source-code",
image="alpine/git",
data=MOUNT,
command="mkdir -p /training/models/ && \
cd /training/models/ && \
if [ ! -d /training/models/tensorflow-sample-code ]; then https://github.com/cheyang/tensorflow-sample-code.git; else echo no need download;fi")
# 3. train the models
train = arena.StandaloneOp(
name="train",
image="tensorflow/tensorflow:1.11.0-gpu-py3",
gpus=GPUs,
data=MOUNT,
command="echo %s; \
echo %s; \
python /training/models/tensorflow-sample-code/tfjob/docker/mnist/main.py --max_steps 500 --data_dir /training/dataset/mnist --log_dir /training/output/mnist" % (prepare_data.output, prepare_code.output),
metric_name="Train-accuracy",
metric_unit="PERCENTAGE",
)
# 4. export the model
export_model = arena.StandaloneOp(
name="export-model",
image="tensorflow/tensorflow:1.11.0-py3",
data=MOUNT,
command="echo %s; \
python /training/models/tensorflow-sample-code/tfjob/docker/mnist/export_model.py --model_version=%s --checkpoint_step=400 --checkpoint_path=/training/output/mnist /training/output/models" % (train.output,model_version))
learning_rate = "0.001"
dropout = "0.8"
model_verison = "1"
arguments = {
'learning_rate': learning_rate,
'dropout': dropout,
'model_version': model_version,
}
import kfp
client = kfp.Client(host=KUBEFLOW_PIPELINE_LINK)
run = client.create_run_from_pipeline_func(sample_pipeline, arguments=arguments).run_info
print('The above run link is assuming you ran this cell on JupyterHub that is deployed on the same cluster. ' +
'The actual run link is ' + KUBEFLOW_PIPELINE_LINK + '/#/runs/details/' + run.id)
"""
Explanation: 2. Define pipeline tasks using the kfp library.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.16/_downloads/plot_object_epochs.ipynb | bsd-3-clause | import mne
import os.path as op
import numpy as np
from matplotlib import pyplot as plt
"""
Explanation: The :class:Epochs <mne.Epochs> data structure: epoched data
:class:Epochs <mne.Epochs> objects are a way of representing continuous
data as a collection of time-locked trials, stored in an array of shape
(n_events, n_channels, n_times). They are useful for many statistical
methods in neuroscience, and make it easy to quickly overview what occurs
during a trial.
End of explanation
"""
data_path = mne.datasets.sample.data_path()
# Load a dataset that contains events
raw = mne.io.read_raw_fif(
op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif'))
# If your raw object has a stim channel, you can construct an event array
# easily
events = mne.find_events(raw, stim_channel='STI 014')
# Show the number of events (number of rows)
print('Number of events:', len(events))
# Show all unique event codes (3rd column)
print('Unique event codes:', np.unique(events[:, 2]))
# Specify event codes of interest with descriptive labels.
# This dataset also has visual left (3) and right (4) events, but
# to save time and memory we'll just look at the auditory conditions
# for now.
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2}
"""
Explanation: :class:Epochs <mne.Epochs> objects can be created in three ways:
1. From a :class:Raw <mne.io.Raw> object, along with event times
2. From an :class:Epochs <mne.Epochs> object that has been saved as a
.fif file
3. From scratch using :class:EpochsArray <mne.EpochsArray>. See
tut_creating_data_structures
End of explanation
"""
epochs = mne.Epochs(raw, events, event_id, tmin=-0.1, tmax=1,
baseline=(None, 0), preload=True)
print(epochs)
"""
Explanation: Now, we can create an :class:mne.Epochs object with the events we've
extracted. Note that epochs constructed in this manner will not have their
data available until explicitly read into memory, which you can do with
:func:get_data <mne.Epochs.get_data>. Alternatively, you can use
preload=True.
Expose the raw data as epochs, cut from -0.1 s to 1.0 s relative to the event
onsets
End of explanation
"""
print(epochs.events[:3])
print(epochs.event_id)
"""
Explanation: Epochs behave similarly to :class:mne.io.Raw objects. They have an
:class:info <mne.Info> attribute that has all of the same
information, as well as a number of attributes unique to the events contained
within the object.
End of explanation
"""
print(epochs[1:5])
print(epochs['Auditory/Right'])
"""
Explanation: You can select subsets of epochs by indexing the :class:Epochs <mne.Epochs>
object directly. Alternatively, if you have epoch names specified in
event_id then you may index with strings instead.
End of explanation
"""
# These will be epochs objects
for i in range(3):
print(epochs[i])
# These will be arrays
for ep in epochs[:2]:
print(ep)
"""
Explanation: It is also possible to iterate through :class:Epochs <mne.Epochs> objects
in this way. Note that behavior is different if you iterate on Epochs
directly rather than indexing:
End of explanation
"""
epochs.drop([0], reason='User reason')
epochs.drop_bad(reject=dict(grad=2500e-13, mag=4e-12, eog=200e-6), flat=None)
print(epochs.drop_log)
epochs.plot_drop_log()
print('Selection from original events:\n%s' % epochs.selection)
print('Removed events (from numpy setdiff1d):\n%s'
% (np.setdiff1d(np.arange(len(events)), epochs.selection).tolist(),))
print('Removed events (from list comprehension -- should match!):\n%s'
% ([li for li, log in enumerate(epochs.drop_log) if len(log) > 0]))
"""
Explanation: You can manually remove epochs from the Epochs object by using
:func:epochs.drop(idx) <mne.Epochs.drop>, or by using rejection or flat
thresholds with :func:epochs.drop_bad(reject, flat) <mne.Epochs.drop_bad>.
You can also inspect the reason why epochs were dropped by looking at the
list stored in epochs.drop_log or plot them with
:func:epochs.plot_drop_log() <mne.Epochs.plot_drop_log>. The indices
from the original set of events are stored in epochs.selection.
End of explanation
"""
epochs_fname = op.join(data_path, 'MEG', 'sample', 'sample-epo.fif')
epochs.save(epochs_fname)
"""
Explanation: If you wish to save the epochs as a file, you can do it with
:func:mne.Epochs.save. To conform to MNE naming conventions, the
epochs file names should end with '-epo.fif'.
End of explanation
"""
epochs = mne.read_epochs(epochs_fname, preload=False)
"""
Explanation: Later on you can read the epochs with :func:mne.read_epochs. For reading
EEGLAB epochs files see :func:mne.read_epochs_eeglab. We can also use
preload=False to save memory, loading the epochs from disk on demand.
End of explanation
"""
ev_left = epochs['Auditory/Left'].average()
ev_right = epochs['Auditory/Right'].average()
f, axs = plt.subplots(3, 2, figsize=(10, 5))
_ = f.suptitle('Left / Right auditory', fontsize=20)
_ = ev_left.plot(axes=axs[:, 0], show=False, time_unit='s')
_ = ev_right.plot(axes=axs[:, 1], show=False, time_unit='s')
plt.tight_layout()
"""
Explanation: If you wish to look at the average across trial types, then you may do so,
creating an :class:Evoked <mne.Evoked> object in the process. Instances
of Evoked are usually created by calling :func:mne.Epochs.average. For
creating Evoked from other data structures see :class:mne.EvokedArray and
tut_creating_data_structures.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/cmcc/cmip6/models/cmcc-cm2-vhr4/atmos.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cmcc', 'cmcc-cm2-vhr4', 'atmos')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmos
MIP Era: CMIP6
Institute: CMCC
Source ID: CMCC-CM2-VHR4
Topic: Atmos
Sub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos.
Properties: 156 (127 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:50
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Overview
2. Key Properties --> Resolution
3. Key Properties --> Timestepping
4. Key Properties --> Orography
5. Grid --> Discretisation
6. Grid --> Discretisation --> Horizontal
7. Grid --> Discretisation --> Vertical
8. Dynamical Core
9. Dynamical Core --> Top Boundary
10. Dynamical Core --> Lateral Boundary
11. Dynamical Core --> Diffusion Horizontal
12. Dynamical Core --> Advection Tracers
13. Dynamical Core --> Advection Momentum
14. Radiation
15. Radiation --> Shortwave Radiation
16. Radiation --> Shortwave GHG
17. Radiation --> Shortwave Cloud Ice
18. Radiation --> Shortwave Cloud Liquid
19. Radiation --> Shortwave Cloud Inhomogeneity
20. Radiation --> Shortwave Aerosols
21. Radiation --> Shortwave Gases
22. Radiation --> Longwave Radiation
23. Radiation --> Longwave GHG
24. Radiation --> Longwave Cloud Ice
25. Radiation --> Longwave Cloud Liquid
26. Radiation --> Longwave Cloud Inhomogeneity
27. Radiation --> Longwave Aerosols
28. Radiation --> Longwave Gases
29. Turbulence Convection
30. Turbulence Convection --> Boundary Layer Turbulence
31. Turbulence Convection --> Deep Convection
32. Turbulence Convection --> Shallow Convection
33. Microphysics Precipitation
34. Microphysics Precipitation --> Large Scale Precipitation
35. Microphysics Precipitation --> Large Scale Cloud Microphysics
36. Cloud Scheme
37. Cloud Scheme --> Optical Cloud Properties
38. Cloud Scheme --> Sub Grid Scale Water Distribution
39. Cloud Scheme --> Sub Grid Scale Ice Distribution
40. Observation Simulation
41. Observation Simulation --> Isscp Attributes
42. Observation Simulation --> Cosp Attributes
43. Observation Simulation --> Radar Inputs
44. Observation Simulation --> Lidar Inputs
45. Gravity Waves
46. Gravity Waves --> Orographic Gravity Waves
47. Gravity Waves --> Non Orographic Gravity Waves
48. Solar
49. Solar --> Solar Pathways
50. Solar --> Solar Constant
51. Solar --> Orbital Parameters
52. Solar --> Insolation Ozone
53. Volcanos
54. Volcanos --> Volcanoes Treatment
1. Key Properties --> Overview
Top level key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of atmospheric model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the atmosphere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Resolution
Characteristics of the model resolution
2.1. Horizontal Resolution Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.4. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on the computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 2.5. High Top
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping
Characteristics of the atmosphere model time stepping
3.1. Timestep Dynamics
Is Required: TRUE Type: STRING Cardinality: 1.1
Timestep for the dynamics, e.g. 30 min.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep Shortwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the shortwave radiative transfer, e.g. 1.5 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestep Longwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the longwave radiative transfer, e.g. 3 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Orography
Characteristics of the model orography
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the orography.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
"""
Explanation: 4.2. Changes
Is Required: TRUE Type: ENUM Cardinality: 1.N
If the orography type is modified describe the time adaptation changes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid --> Discretisation
Atmosphere grid discretisation
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of grid discretisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Discretisation --> Horizontal
Atmosphere discretisation in the horizontal
6.1. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
"""
Explanation: 6.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.3. Scheme Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation function order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. Horizontal Pole
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal discretisation pole singularity treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.5. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Discretisation --> Vertical
Atmosphere discretisation in the vertical
7.1. Coordinate Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type of vertical coordinate system
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Dynamical Core
Characteristics of the dynamical core
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere dynamical core
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the dynamical core of the model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Timestepping Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Timestepping framework type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of the model prognostic variables
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Dynamical Core --> Top Boundary
Type of boundary layer at the top of the model
9.1. Top Boundary Condition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Top boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Top Heat
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary heat treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Top Wind
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary wind treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Dynamical Core --> Lateral Boundary
Type of lateral boundary condition (if the model is a regional model)
10.1. Condition
Is Required: FALSE Type: ENUM Cardinality: 0.1
Type of lateral boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Dynamical Core --> Diffusion Horizontal
Horizontal diffusion scheme
11.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Horizontal diffusion scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal diffusion scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Dynamical Core --> Advection Tracers
Tracer advection scheme
12.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Tracer advection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.3. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.4. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracer advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamical Core --> Advection Momentum
Momentum advection scheme
13.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Momentum advection schemes name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Scheme Staggering Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiation
Characteristics of the atmosphere radiation process
14.1. Aerosols
Is Required: TRUE Type: ENUM Cardinality: 1.N
Aerosols whose radiative effect is taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Radiation --> Shortwave Radiation
Properties of the shortwave radiation scheme
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of shortwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Shortwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Shortwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Shortwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiation --> Shortwave GHG
Representation of greenhouse gases in the shortwave radiation scheme
16.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiation --> Shortwave Cloud Ice
Shortwave radiative properties of ice crystals in clouds
17.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiation --> Shortwave Cloud Liquid
Shortwave radiative properties of liquid droplets in clouds
18.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiation --> Shortwave Cloud Inhomogeneity
Cloud inhomogeneity in the shortwave radiation scheme
19.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiation --> Shortwave Aerosols
Shortwave radiative properties of aerosols
20.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiation --> Shortwave Gases
Shortwave radiative properties of gases
21.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Radiation --> Longwave Radiation
Properties of the longwave radiation scheme
22.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of longwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the longwave radiation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Longwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Longwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 22.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Longwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiation --> Longwave GHG
Representation of greenhouse gases in the longwave radiation scheme
23.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiation --> Longwave Cloud Ice
Longwave radiative properties of ice crystals in clouds
24.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.2. Physical Reprenstation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiation --> Longwave Cloud Liquid
Longwave radiative properties of liquid droplets in clouds
25.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiation --> Longwave Cloud Inhomogeneity
Cloud inhomogeneity in the longwave radiation scheme
26.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiation --> Longwave Aerosols
Longwave radiative properties of aerosols
27.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiation --> Longwave Gases
Longwave radiative properties of gases
28.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Turbulence Convection
Atmosphere Convective Turbulence and Clouds
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere convection and turbulence
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Turbulence Convection --> Boundary Layer Turbulence
Properties of the boundary layer turbulence scheme
30.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Boundary layer turbulence scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Boundary layer turbulence scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Closure Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Boundary layer turbulence scheme closure order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Counter Gradient
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Uses boundary layer turbulence scheme counter gradient
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31. Turbulence Convection --> Deep Convection
Properties of the deep convection scheme
31.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Deep convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of deep convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Turbulence Convection --> Shallow Convection
Properties of the shallow convection scheme
32.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Shallow convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
shallow convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
"""
Explanation: 32.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
shallow convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33. Microphysics Precipitation
Large Scale Cloud Microphysics and Precipitation
33.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of large scale cloud microphysics and precipitation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Microphysics Precipitation --> Large Scale Precipitation
Properties of the large scale precipitation scheme
34.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the large scale precipitation parameterisation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34.2. Hydrometeors
Is Required: TRUE Type: ENUM Cardinality: 1.N
Precipitating hydrometeors taken into account in the large scale precipitation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Properties of the large scale cloud microphysics scheme
35.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the microphysics parameterisation scheme used for large scale clouds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Large scale cloud microphysics processes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Cloud Scheme
Characteristics of the cloud scheme
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the atmosphere cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
"""
Explanation: 36.3. Atmos Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Atmosphere components that are linked to the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.4. Uses Separate Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Different cloud schemes for the different types of clouds (convective, stratiform and boundary layer)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.6. Prognostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a prognostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.7. Diagnostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a diagnostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.8. Prognostic Variables
Is Required: FALSE Type: ENUM Cardinality: 0.N
List the prognostic variables used by the cloud scheme, if applicable.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 37. Cloud Scheme --> Optical Cloud Properties
Optical cloud properties
37.1. Cloud Overlap Method
Is Required: FALSE Type: ENUM Cardinality: 0.1
Method for taking into account overlapping of cloud layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Cloud Inhomogeneity
Is Required: FALSE Type: STRING Cardinality: 0.1
Method for taking into account cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Sub-grid scale water distribution
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale water distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 38.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale water distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 38.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale water distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 38.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale water distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Sub-grid scale ice distribution
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale ice distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 39.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale ice distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 39.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale ice distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 39.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale ice distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40. Observation Simulation
Characteristics of observation simulation
40.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of observation simulator characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Observation Simulation --> Isscp Attributes
ISSCP Characteristics
41.1. Top Height Estimation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator ISSCP top height estimation methodUo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. Top Height Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator ISSCP top height direction
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 42. Observation Simulation --> Cosp Attributes
CFMIP Observational Simulator Package attributes
42.1. Run Configuration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator COSP run configuration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.2. Number Of Grid Points
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of grid points
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.3. Number Of Sub Columns
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of sub-cloumns used to simulate sub-grid variability
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.4. Number Of Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of levels
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 43. Observation Simulation --> Radar Inputs
Characteristics of the cloud radar simulator
43.1. Frequency
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Cloud simulator radar frequency (Hz)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 43.2. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator radar type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.3. Gas Absorption
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses gas absorption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.4. Effective Radius
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses effective radius
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44. Observation Simulation --> Lidar Inputs
Characteristics of the cloud lidar simulator
44.1. Ice Types
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator lidar ice type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44.2. Overlap
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator lidar overlap
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 45. Gravity Waves
Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.
45.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of gravity wave parameterisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.2. Sponge Layer
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sponge layer in the upper levels in order to avoid gravity wave reflection at the top.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.3. Background
Is Required: TRUE Type: ENUM Cardinality: 1.1
Background wave distribution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.4. Subgrid Scale Orography
Is Required: TRUE Type: ENUM Cardinality: 1.N
Subgrid scale orography effects taken into account.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 46. Gravity Waves --> Orographic Gravity Waves
Gravity waves generated due to the presence of orography
46.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 47. Gravity Waves --> Non Orographic Gravity Waves
Gravity waves generated by non-orographic processes.
47.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the non-orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
"""
Explanation: 47.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 48. Solar
Top of atmosphere solar insolation characteristics
48.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of solar insolation of the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 49. Solar --> Solar Pathways
Pathways for solar forcing of the atmosphere
49.1. Pathways
Is Required: TRUE Type: ENUM Cardinality: 1.N
Pathways for the solar forcing of the atmosphere model domain
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 50. Solar --> Solar Constant
Solar constant and top of atmosphere insolation characteristics
50.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the solar constant.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 50.2. Fixed Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If the solar constant is fixed, enter the value of the solar constant (W m-2).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 50.3. Transient Characteristics
Is Required: TRUE Type: STRING Cardinality: 1.1
solar constant transient characteristics (W m-2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 51. Solar --> Orbital Parameters
Orbital parameters and top of atmosphere insolation characteristics
51.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 51.2. Fixed Reference Date
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Reference date for fixed orbital parameters (yyyy)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 51.3. Transient Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Description of transient orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 51.4. Computation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used for computing orbital parameters.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 52. Solar --> Insolation Ozone
Impact of solar insolation on stratospheric ozone
52.1. Solar Ozone Impact
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does top of atmosphere insolation impact on stratospheric ozone?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 53. Volcanos
Characteristics of the implementation of volcanoes
53.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the implementation of volcanic effects in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 54. Volcanos --> Volcanoes Treatment
Treatment of volcanoes in the atmosphere
54.1. Volcanoes Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How volcanic effects are modeled in the atmosphere.
End of explanation
"""
|
BinRoot/TensorFlow-Book | ch12_rank/Concept01_ranknet.ipynb | mit | import tensorflow as tf
import numpy as np
import random
%matplotlib inline
import matplotlib.pyplot as plt
"""
Explanation: Ch 12: Concept 01
Ranking by neural network
Import the relevant libraries
End of explanation
"""
n_features = 2
def get_data():
data_a = np.random.rand(10, n_features) + 1
data_b = np.random.rand(10, n_features)
plt.scatter(data_a[:, 0], data_a[:, 1], c='r', marker='x')
plt.scatter(data_b[:, 0], data_b[:, 1], c='g', marker='o')
plt.show()
return data_a, data_b
def get_data2():
data_a = np.asarray([[0.1, 0.9], [0.1, 0.8]])
data_b = np.asarray([[0.4,0.05], [0.45, 0.1]])
plt.scatter(data_a[:, 0], data_a[:, 1], c='r', marker='x')
plt.scatter(data_b[:, 0], data_b[:, 1], c='g', marker='o')
plt.xlim([0, 0.5])
plt.ylim([0, 1])
plt.axes().set_aspect('equal')
plt.show()
return data_a, data_b
data_a, data_b = get_data()
"""
Explanation: Let's fabricate some data. We'll call get_data() to generate two datasets: data_a and data_b.
We'll use the convention that points in data_a are ranked lower than those in data_b. So we need to learn a ranking function (i.e. utility function) that scores points in data_a lower.
End of explanation
"""
n_hidden = 10
"""
Explanation: Now, let's define our ranking model. It'll take in two items (x1 and x2), and return a score (s1 and s2) for each item.
Our model introduces a hyper-parameter called n_hidden to tweak the number of neurons in the hidden layer of the network.
End of explanation
"""
with tf.name_scope("input"):
x1 = tf.placeholder(tf.float32, [None, n_features], name="x1")
x2 = tf.placeholder(tf.float32, [None, n_features], name="x2")
dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_prob')
with tf.name_scope("hidden_layer"):
with tf.name_scope("weights"):
w1 = tf.Variable(tf.random_normal([n_features, n_hidden]), name="w1")
tf.summary.histogram("w1", w1)
b1 = tf.Variable(tf.random_normal([n_hidden]), name="b1")
tf.summary.histogram("b1", b1)
with tf.name_scope("output"):
h1 = tf.nn.dropout(tf.nn.relu(tf.matmul(x1,w1) + b1), keep_prob=dropout_keep_prob)
tf.summary.histogram("h1", h1)
h2 = tf.nn.dropout(tf.nn.relu(tf.matmul(x2, w1) + b1), keep_prob=dropout_keep_prob)
tf.summary.histogram("h2", h2)
with tf.name_scope("output_layer"):
with tf.name_scope("weights"):
w2 = tf.Variable(tf.random_normal([n_hidden, 1]), name="w2")
tf.summary.histogram("w2", w2)
b2 = tf.Variable(tf.random_normal([1]), name="b2")
tf.summary.histogram("b2", b2)
with tf.name_scope("output"):
s1 = tf.matmul(h1, w2) + b2
s2 = tf.matmul(h2, w2) + b2
"""
Explanation: When defining the model, let's organize it into separate scopes. That way, the TensorBoard visualization will look very clean.
End of explanation
"""
with tf.name_scope("loss"):
s12 = s1 - s2
s12_flat = tf.reshape(s12, [-1])
pred = tf.sigmoid(s12)
lable_p = tf.sigmoid(-tf.ones_like(s12))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=tf.zeros_like(s12_flat), logits=s12_flat + 1)
loss = tf.reduce_mean(cross_entropy)
tf.summary.scalar("loss", loss)
with tf.name_scope("train_op"):
train_op = tf.train.AdamOptimizer(0.001).minimize(loss)
"""
Explanation: The loss function will involve comparing s1 and s2.
Since we're trying to acheive the inequality Score(x1) < Score(x2), we need the loss function to insinuate s1 < s2.
In other words, the loss function tries to guarantee that s1 - s2 < 0.
End of explanation
"""
sess = tf.InteractiveSession()
summary_op = tf.summary.merge_all()
writer = tf.summary.FileWriter("tb_files", sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
"""
Explanation: Start the session and prepare peripheral ops.
End of explanation
"""
for epoch in range(0, 10000):
loss_val, _ = sess.run([loss, train_op], feed_dict={x1:data_a, x2:data_b, dropout_keep_prob:0.5})
if epoch % 100 == 0 :
summary_result = sess.run(summary_op, feed_dict={x1:data_a, x2:data_b, dropout_keep_prob:1})
writer.add_summary(summary_result, epoch)
# print("Epoch {}: Loss {}".format(epoch, loss_val))
"""
Explanation: Train the model with the training data.
End of explanation
"""
grid_size = 10
data_test = []
for y in np.linspace(0., 1., num=grid_size):
for x in np.linspace(0., 1., num=grid_size):
data_test.append([x, y])
"""
Explanation: Visualize the results on a grid by accumulating a list of points to test.
End of explanation
"""
def visualize_results(data_test):
plt.figure()
scores_test = sess.run(s1, feed_dict={x1:data_test, dropout_keep_prob:1})
scores_img = np.reshape(scores_test, [grid_size, grid_size])
plt.imshow(scores_img, origin='lower')
plt.colorbar()
visualize_results(data_test)
"""
Explanation: Run the model on all the test points and visualize the utility scores of each point by a color.
End of explanation
"""
|
rishuatgithub/MLPy | torch/PYTORCH_NOTEBOOKS/00-Crash-Course-Topics/00-Crash-Course-NumPy/01-NumPy-Indexing-and-Selection.ipynb | apache-2.0 | import numpy as np
#Creating sample array
arr = np.arange(0,11)
#Show
arr
"""
Explanation: <a href='http://www.pieriandata.com'><img src='../Pierian_Data_Logo.png'/></a>
<center><em>Copyright Pierian Data</em></center>
<center><em>For more information, visit us at <a href='http://www.pieriandata.com'>www.pieriandata.com</a></em></center>
NumPy Indexing and Selection
In this lecture we will discuss how to select elements or groups of elements from an array.
End of explanation
"""
#Get a value at an index
arr[8]
#Get values in a range
arr[1:5]
#Get values in a range
arr[0:5]
"""
Explanation: Bracket Indexing and Selection
The simplest way to pick one or some elements of an array looks very similar to python lists:
End of explanation
"""
#Setting a value with index range (Broadcasting)
arr[0:5]=100
#Show
arr
# Reset array, we'll see why I had to reset in a moment
arr = np.arange(0,11)
#Show
arr
#Important notes on Slices
slice_of_arr = arr[0:6]
#Show slice
slice_of_arr
#Change Slice
slice_of_arr[:]=99
#Show Slice again
slice_of_arr
"""
Explanation: Broadcasting
NumPy arrays differ from normal Python lists because of their ability to broadcast. With lists, you can only reassign parts of a list with new parts of the same size and shape. That is, if you wanted to replace the first 5 elements in a list with a new value, you would have to pass in a new 5 element list. With NumPy arrays, you can broadcast a single value across a larger set of values:
End of explanation
"""
arr
"""
Explanation: Now note the changes also occur in our original array!
End of explanation
"""
#To get a copy, need to be explicit
arr_copy = arr.copy()
arr_copy
"""
Explanation: Data is not copied, it's a view of the original array! This avoids memory problems!
End of explanation
"""
arr_2d = np.array(([5,10,15],[20,25,30],[35,40,45]))
#Show
arr_2d
#Indexing row
arr_2d[1]
# Format is arr_2d[row][col] or arr_2d[row,col]
# Getting individual element value
arr_2d[1][0]
# Getting individual element value
arr_2d[1,0]
# 2D array slicing
#Shape (2,2) from top right corner
arr_2d[:2,1:]
#Shape bottom row
arr_2d[2]
#Shape bottom row
arr_2d[2,:]
"""
Explanation: Indexing a 2D array (matrices)
The general format is arr_2d[row][col] or arr_2d[row,col]. I recommend using the comma notation for clarity.
End of explanation
"""
arr = np.arange(1,11)
arr
arr > 4
bool_arr = arr>4
bool_arr
arr[bool_arr]
arr[arr>2]
x = 2
arr[arr>x]
"""
Explanation: More Indexing Help
Indexing a 2D matrix can be a bit confusing at first, especially when you start to add in step size. Try google image searching NumPy indexing to find useful images, like this one:
<img src= 'numpy_indexing.png' width=500/> Image source: http://www.scipy-lectures.org/intro/numpy/numpy.html
Conditional Selection
This is a very fundamental concept that will directly translate to pandas later on, make sure you understand this part!
Let's briefly go over how to use brackets for selection based off of comparison operators.
End of explanation
"""
|
bryantbiggs/luther-02 | Total_Analysis1_57%.ipynb | mit | import pandas as pd
import numpy as np
import string
from collections import defaultdict
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
matplotlib.style.use('ggplot')
"""
Explanation: Dates:
Older moves might not be torrented
Month: Blockbusters are released in May and December, No good movies released January - April, Kids are on summer cvaction during summer months.
Production Budget:
Might be outliers. Take log if outliers, Remove outliers, Create new feature that is binning production budget
Rating: Is there a relationship with Number of torrents, look at mean, median torrents per rating. Maybe group ratings together for example G, PG, PG-13 in one group R in another
Genre as a feature. Maybe keep a subset maybe create new ratings by combing other ratings
End of explanation
"""
df = pd.read_csv('data/train_data2.csv', encoding='latin-1')
print(len(df))
df.head()
"""
Explanation: Read in TRAIN data set and select pertinent columns
End of explanation
"""
df['Released'] = pd.to_datetime(df['Released'])
df['Year'] = pd.DatetimeIndex(df['Released']).year
df['Month'] = pd.DatetimeIndex(df['Released']).month
df.head()
"""
Explanation: Convert dates to datetime objects
End of explanation
"""
df['Year'].describe().astype(int)
# dictionary - year counts
yr_dict = df['Year'].value_counts().to_dict()
import operator
yr_lst = sorted(yr_dict.items(), key=operator.itemgetter(0)) # sort by year
yr_lst = yr_lst[::-1]
#print(yr_lst)
plt.figure(figsize=(25,10))
ind = np.arange(len(yr_dict))
width = 0.35
bar_year = [year for year, count in yr_lst]
bar_count = [count for year, count in yr_lst]
plt.bar(ind, bar_count, width, color='r')
plt.ylabel('Count')
plt.xlabel('Year')
plt.title('Number of Torrents per Year')
plt.xticks(ind + width/2., (bar_year), rotation='vertical')
plt.yticks(np.arange(0, 91, 5))
plt.show()
"""
Explanation: Inspect years
End of explanation
"""
# cut off at year
before = len(df)
yr_cut_bot = 1998
yr_cut_top = 2015
mask = (df['Year'] >= yr_cut_bot) & (df['Year'] < yr_cut_top)
df_yr = df.loc[mask]
df_yr.sort_values('Year').head()
after = len(df_yr)
print('{0} entries lost ({1}%) due to date cutoff between {2} and {3}'.format(before-after,
round((before/after)/before *100, 2), yr_cut_bot, yr_cut_top))
# look at current data set AFTER year cutoff
plt.rcParams['figure.figsize'] = (15, 15)
_ = pd.tools.plotting.scatter_matrix(df_yr)
# unique list of grouped genres as strings
unq_genres = df_yr['Genre'].unique()
unq_genres = unq_genres.tolist()
#print(len(unq_genres))
#print(unq_genres[:10])
# unique list of grouped genres as lists
lst_grp_genres = []
for lst in unq_genres:
temp = []
for genre in lst.split(','):
temp.append(genre)
lst_grp_genres.append(temp)
#print(len(lst_grp_genres))
#print(lst_grp_genres)
# unique list of individual genres
ind_genre = set()
for lst in unq_genres:
for genre in lst.split(','):
ind_genre.add(genre.strip())
ind_genre = sorted(ind_genre)
#print(len(ind_genre))
#print(ind_genre)
# dictionary - count of genre occurences
count = defaultdict(lambda:0)
for genre in ind_genre:
count[genre] = df_yr.Genre.str.contains(genre).sum()
import operator
srt = sorted(count.items(), key=operator.itemgetter(1))
srt = srt[::-1]
#print(srt)
def split_to_array(ser):
split_array = np.array(ser.strip().replace(',','').split(' '))
return pd.Series(split_array)
genres = df_yr.Genre.apply(split_to_array)
genres = pd.Series(genres.values.ravel()).dropna()
genres = genres.value_counts().sort_values(ascending=False)
def convert_frequency(ser, genres=genres):
split_array = np.array(ser.strip().replace(',','').split(' '))
genre = genres.loc[split_array].argmax()
return genre
df_yr['Genre_Single'] = df_yr.Genre.apply(convert_frequency)
"""
Explanation: df => df_yr
End of explanation
"""
# select only genres of significance
genre = ['Action', 'Adventure', 'Comedy', 'Drama']
df_sub = df_yr.loc[df_yr['Genre_Single'].isin(genre)]
# select only genres of significance
ratings = ['PG-13', 'PG', 'G', 'R']
df_sub = df_sub.loc[df_sub['Rated'].isin(ratings)]
#df_sub['Runtime'].value_counts()
#df_sub['Genre_Single'].value_counts()
#df_sub['Rated'].value_counts()
df_sub.describe()
# entire dataframe
plt.rcParams['figure.figsize'] = (15, 15)
_ = pd.tools.plotting.scatter_matrix(df_sub)
from patsy import dmatrices
patsy_formula = 'Total_Torrents ~ Prod_Budget + Year + Genre_Single'
y, x = dmatrices(patsy_formula, data=df_sub, return_type='dataframe')
import statsmodels.api as sm
model = sm.OLS(y, x)
results = model.fit()
results.summary()
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(x, y)
mod_lr_score = model.score(x, y)
mod_lr_coef = model.coef_
from sklearn import cross_validation as cv
from sklearn import metrics
x_train, x_test, y_train, y_test = cv.train_test_split(x,y,test_size=0.20,random_state=1234)
model = LinearRegression().fit(x_train, y_train)
# store results
mean_sq_err = metrics.mean_squared_error(y_train,model.predict(x_train))
cv_mod_score = model.score(x_train, y_train)
# reset x, y otherwise errors occur
y, x = dmatrices(patsy_formula, data=df_sub, return_type='dataframe')
from sklearn.cross_validation import KFold
kf = KFold(len(df_sub), n_folds=10, shuffle=True)
for train_index, test_index in kf:
x_train, x_test = x.iloc[train_index], x.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
clf2 = LinearRegression().fit(x.iloc[train_index], y.iloc[train_index])
# store results
mean_sq_errKf = metrics.mean_squared_error(y_train,model.predict(x_train))
cvKf_mod_score = clf2.score(x,y)
#NORMAL RESULTS
print('Model Linear Regression Score = {0}'.format(mod_lr_score))
print(' Mean Square Error = {0}'.format(mean_sq_err))
print(' Cross Validation Model Score = {0}'.format(cv_mod_score))
print(' Mean Squred Error K-Fold = {0}'.format(mean_sq_errKf))
print('Cross Val. K-Fold Model Score = {0}'.format(cvKf_mod_score))
_ = plt.plot(y, model.predict(x), 'ro')
# entire dataframe
plt.rcParams['figure.figsize'] = (15, 15)
_ = pd.tools.plotting.scatter_matrix(df_sub)
"""
Explanation: Select only significant values from dataframe
df_yr => df_sub
End of explanation
"""
df.columns
df_sub['log_budg']=np.log(df_sub.Prod_Budget)
#df_sub['log_year']=np.log(df_sub.Year)
#df_sub['log_run']=np.log(df_sub.Runtime)
df_sub['log_tor']=np.log(df_sub.Total_Torrents)
trans = df_sub[['log_budg', 'Year', 'log_tor']]
plt.rcParams['figure.figsize'] = (15, 15)
_ = pd.tools.plotting.scatter_matrix(trans)
log_patsy_formula = 'log_tor ~ log_budg + Year + Genre_Single'
y, x = dmatrices(log_patsy_formula, data=df_sub, return_type='dataframe')
import statsmodels.formula.api as smf
results = smf.ols(formula=log_patsy_formula, data=df_sub,).fit()
results.summary()
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(x, y)
# store results
log_mod_lr_score = model.score(x,y)
from sklearn import cross_validation as cv
from sklearn import metrics
x_train, x_test, y_train, y_test = cv.train_test_split(x,y,test_size=0.20,random_state=1234)
model = LinearRegression().fit(x_train, y_train)
# store results
log_mean_sq_err = metrics.mean_squared_error(y_train,model.predict(x_train))
log_cv_mod_score = model.score(x_train, y_train)
# reset x, y otherwise errors occur
y, x = dmatrices(log_patsy_formula, data=df_sub, return_type='dataframe')
from sklearn.cross_validation import KFold
kf = KFold(len(df_sub), n_folds=10, shuffle=True)
for train_index, test_index in kf:
x_train, x_test = x.iloc[train_index], x.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
clf2 = LinearRegression().fit(x.iloc[train_index], y.iloc[train_index])
# store results
log_mean_sq_errKf = metrics.mean_squared_error(y_train,model.predict(x_train))
log_cvKf_mod_score = clf2.score(x,y)
#LOG RESULTS
print('Log Model Linear Regression Score = {0}'.format(log_mod_lr_score))
print(' Log Mean Square Error = {0}'.format(log_mean_sq_err))
print(' Log Cross Validation Model Score = {0}'.format(log_cv_mod_score))
print(' Log Mean Squred Error K-Fold = {0}'.format(log_mean_sq_errKf))
print('Log Cross Val. K-Fold Model Score = {0}'.format(log_cvKf_mod_score))
df_TEST = pd.read_csv('data/test_data2.csv', encoding='latin-1')
df_TEST['log_budg']=np.log(df_TEST.Prod_Budget)
df_TEST['log_run']=np.log(df_TEST.Runtime)
df_TEST['log_tor']=np.log(df_TEST.Total_Torrents)
def split_to_array(ser):
split_array = np.array(ser.strip().replace(',','').split(' '))
return pd.Series(split_array)
genres = df_yr.Genre.apply(split_to_array)
genres = pd.Series(genres.values.ravel()).dropna()
genres = genres.value_counts().sort_values(ascending=False)
def convert_frequency(ser, genres=genres):
split_array = np.array(ser.strip().replace(',','').split(' '))
genre = genres.loc[split_array].argmax()
return genre
df_TEST['Genre_Single'] = df_TEST.Genre.apply(convert_frequency)
log_patsy_formula_test = 'log_tor ~ log_budg + Year + Month + Genre_Single'
y, x = dmatrices(log_patsy_formula_test, data=df_TEST, return_type='dataframe')
print(clf2.score(x_test, y_test))
print(metrics.mean_squared_error(y_test,model.predict(x_test)))
#_ = plt.plot(y, model.predict(x), 'ro')
"""
Explanation: Log Transform
End of explanation
"""
|
henchc/Rediscovering-Text-as-Data | 10-Metadata/02-HTRC-Classification-Example.ipynb | mit | poetry_output = !htid2rsync --f data/poetry.txt | rsync -azv --files-from=- data.sharc.hathitrust.org::features/ data/poetry/
scifi_output = !htid2rsync --f data/scifi.txt | rsync -azv --files-from=- data.sharc.hathitrust.org::features/ data/scifi/
outputs = list([poetry_output, scifi_output])
subjects = ['poetry', 'scifi']
paths = {}
suffix = '.json.bz2'
for subject, output in zip(subjects, outputs):
folder = subject
filePaths = [path for path in output if path.endswith(suffix)]
paths[subject] = [os.path.join(folder, path) for path in filePaths]
fn = 'data/' + subject + '_paths.txt'
with open(fn, 'w') as f:
for path in paths[subject]:
p = str(path) + '\n'
f.write(p)
"""
Explanation: Genre classification with HTRC data
In this example, we'll be classifying texts into 2 different genres: poetry and science-fiction. JSON files containing the metadata for 100 texts in each genre need to be downloaded:
End of explanation
"""
paths = {}
subjects = ['poetry', 'scifi']
for subject in subjects:
with open('data/' + subject + '_paths.txt', 'r') as f:
paths[subject] = ['data/' + line[:len(line)-1] for line in f.readlines()]
poetry = FeatureReader(paths['poetry'])
scifi = FeatureReader(paths['scifi'])
"""
Explanation: As in the previous notebooks, we'll construct FeatureReader objects for each corpus. The line below reads in path files we created to the downloaded data:
End of explanation
"""
def createWordDict(HTRC_FeatureReader_List):
wordDict = {}
i = 0
volumes = []
for f in HTRC_FeatureReader_List:
for vol in f.volumes():
volumes.append(vol)
tok_list = vol.tokenlist(pages=False)
tokens = tok_list.index.get_level_values('token')
for token in tokens:
if token not in wordDict.keys():
wordDict[token] = i
i += 1
return wordDict, volumes
wordDict, volumes = createWordDict([scifi, poetry])
"""
Explanation: To create our bag of words matrix, we need to keep a global dictionary of all words seen in each of our texts. We initialize "wordDict", which tracks all the words seen and records its index in the bag of words matrix. We also keep a list of volumes so that we can parse them later.
End of explanation
"""
dtm = np.zeros((200, len(wordDict.keys())))
for i, vol in enumerate(volumes):
tok_list = vol.tokenlist(pages=False)
counts = list(tok_list['count'])
tokens = tok_list.index.get_level_values('token')
for token, count in zip(tokens, counts):
try:
index = wordDict[token]
dtm[i, index] = count
except:
pass
X = dtm
y = np.zeros((200))
y[100:200] = 1
"""
Explanation: Once we construct the global dictionary, we can fill the bag of words matrix with the word counts for each volume. Once we have this, we will use it to format the training data for our model.
End of explanation
"""
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.svm import LinearSVC
from sklearn import cross_validation
tfidf = TfidfTransformer()
out = tfidf.fit_transform(X, y)
model = LinearSVC()
score = cross_validation.cross_val_score(model, X, y, cv=10)
print(np.mean(score))
"""
Explanation: We can then use the TfidfTransformer to format the bag of words matrix, so that we can fit it to our LinearSVC model. Let's see how our model does.
End of explanation
"""
model.fit(X, y)
feats = np.argsort(model.coef_[0])[:50]
top_scifi = [(list(feats).index(wordDict[w]) + 1, w) for w in wordDict.keys() if wordDict[w] in feats]
sorted(top_scifi)
feats = np.argsort(model.coef_[0])[-50:]
top_poetry = [(list(feats).index(wordDict[w]) + 1, w) for w in wordDict.keys() if wordDict[w] in feats]
sorted(top_poetry, key=lambda tup: tup[0])
"""
Explanation: We can also get the most helpful features, or words, for each class. First we'll fit the model:
End of explanation
"""
|
ewulczyn/talk_page_abuse | src/modeling/Clean Annotations.ipynb | apache-2.0 | """
# v4_annotated
user_blocked = [
'annotated_onion_layer_5_rows_0_to_5000_raters_20',
'annotated_onion_layer_5_rows_0_to_10000',
'annotated_onion_layer_5_rows_0_to_10000_raters_3',
'annotated_onion_layer_5_rows_10000_to_50526_raters_10',
'annotated_onion_layer_10_rows_0_to_1000',
'annotated_onion_layer_20_rows_0_to_1000',
'annotated_onion_layer_30_rows_0_to_1000',
]
user_random = [
'annotated_random_data_rows_0_to_5000_raters_20',
'annotated_random_data_rows_5000_to_10000',
'annotated_random_data_rows_5000_to_10000_raters_3',
'annotated_random_data_rows_10000_to_20000_raters_10',
]
article_blocked = ['article_onion_layer_5_all_rows_raters_10',]
article_random = ['article_random_data_all_rows_raters_10',]
"""
user_blocked = [
'user_blocked',
'user_blocked_2',
'user_blocked_3',
'user_blocked_4',
'user_blocked_layer_10',
'user_blocked_layer_20',
'user_blocked_layer_30',
]
user_random = [
'user_random',
'user_random_2',
'user_random_3',
'user_random_4',
'user_random_extra_baselines',
]
article_blocked = [ 'article_blocked',
'article_blocked_layer_5_extra_baselines' ]
article_random = ['article_random',
'article_random_extra_baselines']
files = {
'user': {'blocked': user_blocked, 'random': user_random},
'article': {'blocked': article_blocked, 'random': article_random}
}
dfs = []
for ns, d in files.items():
for sample, files in d.items():
for f in files:
df = pd.read_csv('../../data/annotations/raw/%s/%s.csv' % (ns,f))
df['src'] = f
df['ns'] = ns
df['sample'] = sample
dfs.append(df)
df = pd.concat(dfs)
print('# annotations: ', df.shape[0])
"""
Explanation: Clean Raw Annotations
Load raw annotations
End of explanation
"""
df.drop_duplicates(subset=['rev_id', 'sample'])['rev_id'].value_counts().value_counts()
df.index = df.rev_id
df.sample_count = df.drop_duplicates(subset=['rev_id', 'sample'])['rev_id'].value_counts()
df.sample_count.value_counts()
# just set them all to random
df['sample'][df.sample_count == 2] = 'random'
df.drop_duplicates(subset=['rev_id', 'sample'])['rev_id'].value_counts().value_counts()
del df.sample_count
print('# annotations: ', df.shape[0])
"""
Explanation: Make random and blocked samples disjoint
End of explanation
"""
df = tidy_labels(df)
"""
Explanation: Tidy is_harassment_or_attack column
End of explanation
"""
df['aggression'] = df['aggression_score'].apply(map_aggression_score_to_2class)
"""
Explanation: Remap aggression score
End of explanation
"""
df = df.query('_golden == False')
print('# annotations: ', df.shape[0])
"""
Explanation: Remove answers to test questions
End of explanation
"""
# remove all annotations for a revisions where more than 50% of annotators for that revision could not read the comment
df = remove_na(df)
print('# annotations: ', df.shape[0])
# remove all annotations where the annotator could not read the comment
df = df.query('na==False')
print('# annotations: ', df.shape[0])
"""
Explanation: Remove annotations where revision could not be read
End of explanation
"""
df['aggression_score'].value_counts(dropna=False)
df['is_harassment_or_attack'].value_counts(dropna=False)
"""
Explanation: Examine aggression_score or is_harassment_or_attack input
End of explanation
"""
df = df.dropna(subset = ['aggression_score', 'is_harassment_or_attack'])
print('# annotations: ', df.shape[0])
"""
Explanation: Drop NAs in aggression_score or is_harassment_or_attack input
End of explanation
"""
# remove all annotations from users who are ambivalent in 10% or more of revisions
# we consider these users unreliable
def ambivalent(s):
return 'not_attack' in s and s!= 'not_attack'
df['ambivalent'] = df['is_harassment_or_attack'].apply(ambivalent)
non_ambivalent_workers = df.groupby('_worker_id', as_index = False)['ambivalent'].mean().query('ambivalent < 0.1')
df = df.merge(non_ambivalent_workers[['_worker_id']], how = 'inner', on = '_worker_id')
print('# annotations: ', df.shape[0])
# remove all other ambivalent annotations
df = df.query('ambivalent==False')
print('# annotations: ', df.shape[0])
"""
Explanation: Remove ambivalent is_harassment_or_attack annotations
An annotations is ambivalent if it was labeled as both an attack and not an attack
End of explanation
"""
df.groupby(['rev_id', '_worker_id']).size().value_counts()
df = df.drop_duplicates(subset = ['rev_id', '_worker_id'])
print('# annotations: ', df.shape[0])
"""
Explanation: Make sure that each rev was only annotated by the same worker once
End of explanation
"""
comments = df.drop_duplicates(subset = ['rev_id'])
print(comments.shape[0])
u_comments = comments.drop_duplicates(subset = ['clean_diff'])
print(u_comments.shape[0])
comments[comments.duplicated(subset = ['clean_diff'])].head(5)
df = df.merge(u_comments[['rev_id']], how = 'inner', on = 'rev_id')
print('# annotations: ', df.shape[0])
"""
Explanation: Filter out annotations for revisions with duplicated diff content
End of explanation
"""
df['recipient'].value_counts(dropna=False)
df['attack'].value_counts(dropna=False)
df['aggression'].value_counts(dropna=False)
"""
Explanation: Check that labels are not None
End of explanation
"""
counts = df['rev_id'].value_counts().to_frame()
counts.columns = ['n']
counts['rev_id'] = counts.index
counts.shape
counts['n'].value_counts().head()
counts_enough = counts.query("n>=8")
counts_enough.shape
df = df.merge(counts_enough[['rev_id']], how = 'inner', on = 'rev_id')
print('# annotations: ', df.shape[0])
"""
Explanation: Remove annotations from all revisions that were annotated less than 8 times
End of explanation
"""
df.columns
cols = ['rev_id', '_worker_id', 'ns', 'sample', 'src','clean_diff', 'diff', 'insert_only', 'page_id',
'page_title', 'rev_comment', 'rev_timestamp',
'user_id', 'user_text', 'not_attack', 'other', 'quoting', 'recipient',
'third_party', 'attack', 'aggression', 'aggression_score']
df = df[cols]
"""
Explanation: Discard nuisance columns
End of explanation
"""
df.groupby(['ns', 'sample']).size()
df.to_csv('../../data/annotations/clean/annotations.tsv', index=False, sep='\t')
pd.read_csv('../../data/annotations/clean/annotations.tsv', sep='\t').shape
"""
Explanation: Summary Stats
End of explanation
"""
|
YuriyGuts/kaggle-quora-question-pairs | notebooks/feature-jaccard-ngrams.ipynb | mit | from pygoose import *
"""
Explanation: Feature: Character N-Gram Jaccard Index
Calculate Jaccard similarities between sets of character $n$-grams for different values of $n$.
Imports
This utility package imports numpy, pandas, matplotlib and a helper kg module into the root namespace.
End of explanation
"""
project = kg.Project.discover()
"""
Explanation: Config
Automatically discover the paths to various data folders and compose the project structure.
End of explanation
"""
feature_list_id = 'jaccard_ngrams'
"""
Explanation: Identifier for storing these features on disk and referring to them later.
End of explanation
"""
NGRAM_RANGE = range(2, 6)
"""
Explanation: Range of $n$ to try for the $n$-grams.
End of explanation
"""
tokens_train = kg.io.load(project.preprocessed_data_dir + 'tokens_spellcheck_train.pickle')
tokens_test = kg.io.load(project.preprocessed_data_dir + 'tokens_spellcheck_test.pickle')
tokens = tokens_train + tokens_test
"""
Explanation: Read data
Preprocessed and tokenized questions.
End of explanation
"""
def get_char_ngrams(doc, n):
return [doc[i:i + n] for i in range(len(doc) - n + 1)]
def get_jaccard_set_similarities(a, b):
len_intersection = len(a.intersection(b))
jaccard_index = len_intersection / len(a.union(b))
jaccard_index_norm_a = len_intersection / len(a)
jaccard_index_norm_b = len_intersection / len(b)
return jaccard_index, jaccard_index_norm_a, jaccard_index_norm_b
def get_jaccard_similarities(q1, q2, n):
if len(q1) < max(NGRAM_RANGE) and len(q2) < max(NGRAM_RANGE):
return 1, 1, 1
if len(q1) < max(NGRAM_RANGE) or len(q2) < max(NGRAM_RANGE):
return 0, 0, 0
q1_ngrams = set(get_char_ngrams(q1, n))
q2_ngrams = set(get_char_ngrams(q2, n))
return get_jaccard_set_similarities(q1_ngrams, q2_ngrams)
def get_question_pair_features(pair):
q1 = ' '.join(pair[0])
q2 = ' '.join(pair[1])
features = []
for n in NGRAM_RANGE:
features.extend(get_jaccard_similarities(q1, q2, n))
return features
features = kg.jobs.map_batch_parallel(
tokens,
item_mapper=get_question_pair_features,
batch_size=1000,
)
feature_names = []
for n in NGRAM_RANGE:
feature_names.append(f'jaccard_ix_{n}gram')
feature_names.append(f'jaccard_ix_norm_q1_{n}gram')
feature_names.append(f'jaccard_ix_norm_q2_{n}gram')
"""
Explanation: Build features
Character $n$-gram similarities
End of explanation
"""
df = pd.DataFrame(features, columns=feature_names)
for n in NGRAM_RANGE[:-1]:
m = n + 1
diff_feature_name = f'jaccard_ix_diff_{n}_{m}'
df[diff_feature_name]= np.abs(df[f'jaccard_ix_{n}gram'] - df[f'jaccard_ix_{m}gram'])
feature_names.append(diff_feature_name)
"""
Explanation: Pairwise similarity differences for $n$ and $n+1$
End of explanation
"""
X_train = np.array(df.values[:len(tokens_train)], dtype='float64')
X_test = np.array(df.values[len(tokens_train):], dtype='float64')
print('X_train:', X_train.shape)
print('X_test: ', X_test.shape)
"""
Explanation: Build final features
End of explanation
"""
project.save_features(X_train, X_test, feature_names, feature_list_id)
"""
Explanation: Save features
End of explanation
"""
|
elmaso/tno-ai | aind2-cnn/cifar10-classification/cifar10_cnn.ipynb | gpl-3.0 | import keras
from keras.datasets import cifar10
# load the pre-shuffled train and test data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
"""
Explanation: Artificial Intelligence Nanodegree
Convolutional Neural Networks
In this notebook, we train a CNN to classify images from the CIFAR-10 database.
1. Load CIFAR-10 Database
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.figure(figsize=(20,5))
for i in range(36):
ax = fig.add_subplot(3, 12, i + 1, xticks=[], yticks=[])
ax.imshow(np.squeeze(x_train[i]))
"""
Explanation: 2. Visualize the First 24 Training Images
End of explanation
"""
# rescale [0,255] --> [0,1]
x_train = x_train.astype('float32')/255
x_test = x_test.astype('float32')/255
"""
Explanation: 3. Rescale the Images by Dividing Every Pixel in Every Image by 255
End of explanation
"""
from keras.utils import np_utils
# one-hot encode the labels
num_classes = len(np.unique(y_train))
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# break training set into training and validation sets
(x_train, x_valid) = x_train[5000:], x_train[:5000]
(y_train, y_valid) = y_train[5000:], y_train[:5000]
# print shape of training set
print('x_train shape:', x_train.shape)
# print number of training, validation, and test images
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print(x_valid.shape[0], 'validation samples')
"""
Explanation: 4. Break Dataset into Training, Testing, and Validation Sets
End of explanation
"""
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=2, padding='same', activation='relu',
input_shape=(32, 32, 3)))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
model.summary()
"""
Explanation: 5. Define the Model Architecture
End of explanation
"""
# compile the model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',
metrics=['accuracy'])
"""
Explanation: 6. Compile the Model
End of explanation
"""
from keras.callbacks import ModelCheckpoint
# train the model
checkpointer = ModelCheckpoint(filepath='model.weights.best.hdf5', verbose=1,
save_best_only=True)
hist = model.fit(x_train, y_train, batch_size=32, epochs=100,
validation_data=(x_valid, y_valid), callbacks=[checkpointer],
verbose=2, shuffle=True)
"""
Explanation: 7. Train the Model
End of explanation
"""
# load the weights that yielded the best validation accuracy
model.load_weights('model.weights.best.hdf5')
"""
Explanation: 8. Load the Model with the Best Validation Accuracy
End of explanation
"""
# evaluate and print test accuracy
score = model.evaluate(x_test, y_test, verbose=0)
print('\n', 'Test accuracy:', score[1])
"""
Explanation: 9. Calculate Classification Accuracy on Test Set
End of explanation
"""
# get predictions on the test set
y_hat = model.predict(x_test)
# define text labels (source: https://www.cs.toronto.edu/~kriz/cifar.html)
cifar10_labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# plot a random sample of test images, their predicted labels, and ground truth
fig = plt.figure(figsize=(20, 8))
for i, idx in enumerate(np.random.choice(x_test.shape[0], size=32, replace=False)):
ax = fig.add_subplot(4, 8, i + 1, xticks=[], yticks=[])
ax.imshow(np.squeeze(x_test[idx]))
pred_idx = np.argmax(y_hat[idx])
true_idx = np.argmax(y_test[idx])
ax.set_title("{} ({})".format(cifar10_labels[pred_idx], cifar10_labels[true_idx]),
color=("green" if pred_idx == true_idx else "red"))
"""
Explanation: 10. Visualize Some Predictions
This may give you some insight into why the network is misclassifying certain objects.
End of explanation
"""
|
apryor6/apryor6.github.io | visualizations/seaborn/notebooks/countplot.ipynb | mit | %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams['figure.figsize'] = (20.0, 10.0)
plt.rcParams['font.family'] = "serif"
df = pd.read_csv('../../../datasets/movie_metadata.csv')
df.head()
"""
Explanation: seaborn.countplot
Bar graphs are useful for displaying relationships between categorical data and at least one numerical variable. seaborn.countplot is a barplot where the dependent variable is the number of instances of each instance of the independent variable.
dataset: IMDB 5000 Movie Dataset
End of explanation
"""
# split each movie's genre list, then form a set from the unwrapped list of all genres
categories = set([s for genre_list in df.genres.unique() for s in genre_list.split("|")])
# one-hot encode each movie's classification
for cat in categories:
df[cat] = df.genres.transform(lambda s: int(cat in s))
# drop other columns
df = df[['director_name','genres','duration'] + list(categories)]
df.head()
# convert from wide to long format and remove null classificaitons
df = pd.melt(df,
id_vars=['duration'],
value_vars = list(categories),
var_name = 'Category',
value_name = 'Count')
df = df.loc[df.Count>0]
top_categories = df.groupby('Category').aggregate(sum).sort_values('Count', ascending=False).index
howmany=10
# add an indicator whether a movie is short or long, split at 100 minutes runtime
df['islong'] = df.duration.transform(lambda x: int(x > 100))
df = df.loc[df.Category.isin(top_categories[:howmany])]
# sort in descending order
#df = df.loc[df.groupby('Category').transform(sum).sort_values('Count', ascending=False).index]
df.head()
"""
Explanation: For the bar plot, let's look at the number of movies in each category, allowing each movie to be counted more than once.
End of explanation
"""
p = sns.countplot(data=df, x = 'Category')
"""
Explanation: Basic plot
End of explanation
"""
p = sns.countplot(data=df,
x = 'Category',
hue = 'islong')
"""
Explanation: color by a category
End of explanation
"""
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong')
"""
Explanation: make plot horizontal
End of explanation
"""
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1)
"""
Explanation: Saturation
End of explanation
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2)
sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
ax=ax[1])
"""
Explanation: Targeting a non-default axes
End of explanation
"""
import numpy as np
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=7*np.arange(num_categories))
"""
Explanation: Add error bars
End of explanation
"""
import numpy as np
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=7*np.arange(num_categories),
edgecolor=(0,0,0),
linewidth=2)
"""
Explanation: add black bounding lines
End of explanation
"""
import numpy as np
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=7*np.arange(num_categories),
edgecolor=(0,0,0),
linewidth=2,
fill=False)
import numpy as np
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=7*np.arange(num_categories),
edgecolor=(0,0,0),
linewidth=2)
sns.set(font_scale=1.25)
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=3*np.arange(num_categories),
edgecolor=(0,0,0),
linewidth=2)
help(sns.set)
plt.rcParams['font.family'] = "cursive"
#sns.set(style="white",font_scale=1.25)
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=3*np.arange(num_categories),
edgecolor=(0,0,0),
linewidth=2)
plt.rcParams['font.family'] = 'Times New Roman'
#sns.set_style({'font.family': 'Helvetica'})
sns.set(style="white",font_scale=1.25)
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=3*np.arange(num_categories),
edgecolor=(0,0,0),
linewidth=2)
bg_color = (0.25, 0.25, 0.25)
sns.set(rc={"font.style":"normal",
"axes.facecolor":bg_color,
"figure.facecolor":bg_color,
"text.color":"black",
"xtick.color":"black",
"ytick.color":"black",
"axes.labelcolor":"black",
"axes.grid":False,
'axes.labelsize':30,
'figure.figsize':(20.0, 10.0),
'xtick.labelsize':25,
'ytick.labelsize':20})
#sns.set_style({'font.family': 'Helvetica'})
#sns.set(style="white",font_scale=1.25)
num_categories = df.Category.unique().size
p = sns.countplot(data=df,
y = 'Category',
hue = 'islong',
saturation=1,
xerr=3*np.arange(num_categories),
edgecolor=(0,0,0),
linewidth=2,
palette="Dark2")
leg = p.get_legend()
leg.set_title("Duration")
labs = leg.texts
labs[0].set_text("Short")
labs[1].set_text("Long")
leg.get_title().set_color('white')
for lab in labs:
lab.set_color('white')
p.axes.xaxis.label.set_text("Counts")
plt.text(900,2.8, "Count Plot", fontsize = 95, color='white', fontstyle='italic')
p.get_figure().savefig('../../figures/countplot.png')
"""
Explanation: Remove color fill
End of explanation
"""
|
PDBeurope/PDBe_Programming | search_interface/notebooks/search_introduction.ipynb | apache-2.0 | PDBE_SOLR_URL = "http://www.ebi.ac.uk/pdbe/search/pdb"
# or https://www.ebi.ac.uk/pdbe/search/pdb/select?rows=0&q=status:REL&wt=json
from mysolr import Solr
solr = Solr(PDBE_SOLR_URL, version=4)
response = solr.search(q='status:REL', rows=0)
documents = response.documents
print("Number of results:", len(documents))
#fields = response.documents[0].keys()
#print("Number of fields in the documents:", [len(rd.keys()) for rd in documents])
response.raw_content
"""
Explanation: Introduction
The new search service under development at PDBe is powered by Apache Solr.
A pre-release version of user interface is available here: http://wwwdev.ebi.ac.uk/pdbe/entry/search/index
For programmatic usage, a Solr instance is available here: http://wwwdev.ebi.ac.uk/pdbe/search/pdb
Please note that the search service will be released in 2015 - at that point, it would be better to use URLs similar to those above, but hosted from www instead of wwwdev.
Getting started
To avoid writing long Solr URLs by hand and having to encode them etc., we will use a Solr client library called mysolr. It is pretty lightweight and easy to install, e.g. I installed it on my Redhat (Enterprise 6.6) machine as follows:
easy_install mysolr==0.7
There are many such client libraries available for python as well as other languages.
Let us now make a simple query - let us look for a PDB entry.
End of explanation
"""
def join_with_AND(query_params) :
'''convenience function to create query string with AND'''
return " AND ".join(["%s:%s" % (k,v) for k,v in query_params.items()])
def execute_solr_query(query, query_fields) :
'''convenience function'''
query["q"] = join_with_AND(query_fields) # add q
response = solr.search(**query)
documents = response.documents
print("Found %d matching entities in %d entries." % (len(documents), len({rd["pdb_id"] for rd in documents})))
return documents
query_detail = {
"pfam_name" : "Lipocalin",
"resolution" : "[1 TO 2]",
}
query = {
"rows" : pow(10,8), # i.e. all matching documents are required in response
"fl" : "pdb_id, entity_id", # restrict the returned documents to these fields only
}
docs = execute_solr_query(query, query_detail)
"""
Explanation: There are 3 documents in Solr response for a single PDB id, and each has >75 fields. At this juncture, it is essential to understand what the document represents and contains before proceeding further.
Entity document
PDBe Solr instance serves documents based on polymeric entities in PDB entries, i.e. each document indexed by Solr represents polymeric molecules of type protein, sugar, DNA, RNA or DNA/RNA hybrid. This is why for entry 2qk9 we get 3 documents in the response, each representing the protein, RNA and DNA molecule in that entry.
Fields in PDBe's entity-based Solr document cover a wide range of properties, such as entry's experimental details, details of deposition and primary publication, entity's taxonomy, entry's quality, entity's cross references to UniProt and popular domain databases, biological assembly, etc. They are documented here: http://wwwdev.ebi.ac.uk/pdbe/api/doc/search.html
Solr features
It is also useful now to understand a little more about Solr querying. Solr has a rich and complex query syntax, described at http://wiki.apache.org/solr/CommonQueryParameters and elsewhere.
The fields of immediate relevance to us in this tutorial are:
* q - the query itself. There is a lot of flexibility in describing a query, e.g. fields, wildcards, case-insensitivity, logical operators, ranges, etc.
* rows - number of results returned by Solr. Needs to be explicitly set in mysolr because it defaults to 10. Useful if only part of results are desired.
* fl - fields returned in each document. This is useful to reduce the size of response.
Solr capabilities combined with the wide-ranging description in entity document can help us write really powerful Solr queries to find precisely the entries or polymers of interest.
Examples
Now let us write a query to find entities containing a Pfam domain called "Lipocalin" in X-ray entries of decent resolution (1Å - 2Å).
End of explanation
"""
query_detail = {
"pfam_name" : "Lipocalin",
"resolution" : "[1 TO 2]",
"tax_id" : "9606",
}
query = {
"rows" : pow(10,8), # i.e. all matching documents are required in response
"fl" : "pdb_id, entity_id", # restrict the returned documents to these fields only
}
docs = execute_solr_query(query, query_detail)
"""
Explanation: Let us narrow down to proteins of human origin.
End of explanation
"""
query_detail = {
"pfam_name" : "Lipocalin",
"resolution" : "[1 TO 2]",
"tax_id" : "9606",
"entry_authors" : "*Kleywegt*",
}
query = {
"rows" : pow(10,8), # i.e. all matching documents are required in response
"fl" : "pdb_id, entity_id", # restrict the returned documents to these fields only
}
docs = execute_solr_query(query, query_detail)
"""
Explanation: Let us look for entries deposited by Kleywegt.
End of explanation
"""
|
azhurb/deep-learning | intro-to-tensorflow/intro_to_tensorflow.ipynb | mit | import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
"""
Explanation: <h1 align="center">TensorFlow Neural Network Lab</h1>
<img src="image/notmnist.png">
In this lab, you'll use all the tools you learned from Introduction to TensorFlow to label images of English letters! The data you are using, <a href="http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html">notMNIST</a>, consists of images of a letter from A to J in different fonts.
The above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in!
To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print "All modules imported".
End of explanation
"""
def download(url, file):
"""
Download file from <url>
:param url: URL to file
:param file: Local file path
"""
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
"""
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
"""
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
"""
Explanation: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
End of explanation
"""
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
# TODO: Implement Min-Max scaling for grayscale image data
a = 0.1
b = 0.9
grayscale_min = 0
grayscale_max = 255
return a + ( ( (image_data - grayscale_min)*(b - a) )/( grayscale_max - grayscale_min ) )
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
#print(test_labels)
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
"""
Explanation: <img src="image/Mean_Variance_Image.png" style="height: 75%;width: 75%; position: relative; right: 5%">
Problem 1
The first problem involves normalizing the features for your training and test data.
Implement Min-Max scaling in the normalize_grayscale() function to a range of a=0.1 and b=0.9. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.
Since the raw notMNIST image data is in grayscale, the current values range from a min of 0 to a max of 255.
Min-Max Scaling:
$
X'=a+{\frac {\left(X-X_{\min }\right)\left(b-a\right)}{X_{\max }-X_{\min }}}
$
If you're having trouble solving problem 1, you can view the solution here.
End of explanation
"""
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
"""
Explanation: Checkpoint
All your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.
End of explanation
"""
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# TODO: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal((features_count, labels_count)))
biases = tf.Variable(tf.zeros(labels_count))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
"""
Explanation: Problem 2
Now it's time to build a simple neural network using TensorFlow. Here, your network will be just an input layer and an output layer.
<img src="image/network_diagram.png" style="height: 40%;width: 40%; position: relative; right: 10%">
For the input here the images have been flattened into a vector of $28 \times 28 = 784$ features. Then, we're trying to predict the image digit so there are 10 output units, one for each label. Of course, feel free to add hidden layers if you want, but this notebook is built to guide you through a single layer network.
For the neural network to train on your data, you need the following <a href="https://www.tensorflow.org/resources/dims_types.html#data-types">float32</a> tensors:
- features
- Placeholder tensor for feature data (train_features/valid_features/test_features)
- labels
- Placeholder tensor for label data (train_labels/valid_labels/test_labels)
- weights
- Variable Tensor with random numbers from a truncated normal distribution.
- See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#truncated_normal">tf.truncated_normal() documentation</a> for help.
- biases
- Variable Tensor with all zeros.
- See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#zeros"> tf.zeros() documentation</a> for help.
If you're having trouble solving problem 2, review "TensorFlow Linear Function" section of the class. If that doesn't help, the solution for this problem is available here.
End of explanation
"""
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
epochs = 5
learning_rate = 0.2
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
"""
Explanation: <img src="image/Learn_Rate_Tune_Image.png" style="height: 70%;width: 70%">
Problem 3
Below are 2 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.
Parameter configurations:
Configuration 1
* Epochs: 1
* Learning Rate:
* 0.8
* 0.5
* 0.1
* 0.05
* 0.01
* Epochs: 1
* Learning Rate: 0.1
Configuration 2
* Epochs:
* 1
* 2
* 3
* 4
* 5
* Learning Rate: 0.2
Epochs: 4
Learning Rate: 0.2
The code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.
If you're having trouble solving problem 3, you can view the solution here.
End of explanation
"""
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
"""
Explanation: Test
You're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.
End of explanation
"""
|
frictionlessdata/datapackage-pipelines | TUTORIAL.ipynb | mit | %%sh
python3 -m pip install -qU datapackage-pipelines[seedup]
"""
Explanation: Datapackage Pipelines Tutorial
This tutorial is built as a Jupyter notebook which allows you to run and modify the code inline and can be used as a starting point for new Datapackage Pipelines projects.
Installation
Follow the DataFlows Tutorial installation instructions.
Save this tutorial in curreny working directory (right-click and save on following link): https://raw.githubusercontent.com/frictionlessdata/datapackage-pipelines/master/TUTORIAL.ipynb
Start Jupyter Lab in the dataflows environment and open the datapackage pipelines tutorial notebook you downloaded.
Install datapackage-pipelines
End of explanation
"""
%%sh
dpp version
"""
Explanation: This installs datapackage-pipelines with speed optimizations, if you encounter problems installing it, remove the [speedup] suffix.
Verify you have the latest datapackage-pipelines version
End of explanation
"""
%%writefile countries_population_flow.py
# notice that we don't import any datapackage-pipelines modules
# all the flow code is written purely with the DataFlows library
from dataflows import Flow, dump_to_path, load, add_metadata, printer, update_resource
from lxml import etree
from urllib.request import urlopen
# Generator flow step, copied from the DataFlows tutorial
# it just spews rows of data - in this case, countries populations scraped from Wikipedia
def country_population():
# Read the Wikipedia page and parse it using etree
page = urlopen('https://en.wikipedia.org/w/index.php?title=List_of_countries_and_dependencies_by_population&oldid=987469839').read()
parser = etree.XMLParser(recover=True)
tree = etree.fromstring(page, parser)
# Iterate on all tables, rows and cells
for table in tree.findall('.//table'):
if 'wikitable' in table.attrib.get('class', ''):
for row in table.find('tbody').findall('tr'):
cells = row.findall('td')
if len(cells) > 3:
# If a matching row is found...
name = cells[0].find('.//a').attrib.get('title').replace("Demographics of","")
population = cells[1].text
# ... yield a row with the information
yield dict(
name=name,
population=population
)
# The main entrypoint for Datapackage Pipelines, each flow file should have a single flow function
def flow(*args):
return Flow(
country_population(),
update_resource('res_1', **{
# Set a proper name for the resource
'name': 'countries_population',
# Always set a path as well, even if you don't intend to save it to the filesystem
'path': 'countries_population.csv',
# dpp:streaming property is required to let Datapackage Pipelines know it should handle this resource
'dpp:streaming': True,
})
)
# Entrypoint for running the flow directly, without Datapackage Pipelines
if __name__ == '__main__':
# Add a printer step and run the flow
Flow(flow(), printer(num_rows=1, tablefmt='html')).process()
"""
Explanation: Create a flow
Datapackage-pipelines uses the DataFlows library's Flow objects as the basic building blocks for larger pipeline systems.
It's recommended to follow the DataFlows Tutorial to get a better understanding of the DataFlows concepts which will be used here.
Run the following cell to create a file called countries_population_flow.py which scrapes a list of countries populations from wikipedia.
This flow is copied from the DataFlows tutorial, the processing function country_population is exactly the same, the flow and how we run it is changed to integrate with Datapackage Pipelines:
End of explanation
"""
%run countries_population_flow.py
"""
Explanation: Run the flow:
End of explanation
"""
%%writefile pipeline-spec.yaml
countries-population:
pipeline:
- flow: countries_population_flow
- run: dump.to_path
parameters:
out-path: data/countries_population
"""
Explanation: This is standard DataFlows library usage, now let's see what datapackage-pipelines provides
Create a pipeline spec
Datapackage-pipelines uses yaml files to to define pipelines of flow steps.
Create a spec to run the countries population flow and save to a path:
End of explanation
"""
%%sh
dpp
"""
Explanation: Using dpp
dpp is the CLI interface to the datapackage-pipelines library. It is used to list and run available pipelines.
Let's list the available pipelines to see if our countries-population pipeline is available:
End of explanation
"""
%%sh
dpp run ./countries-population
"""
Explanation: Run the pipeline:
End of explanation
"""
%%writefile pipeline-spec.yaml
countries-population:
pipeline:
- flow: countries_population_flow
- run: dump.to_path
parameters:
out-path: data/countries_population
sorted_countries_by_name:
dependencies:
- pipeline: ./countries-population
- datapackage: data/countries_population/datapackage.json
pipeline:
- run: load
parameters:
from: data/countries_population/datapackage.json
resources: ['countries_population']
- run: sort
parameters:
resources: ['countries_population']
sort-by: '{name}'
- run: dump.to_path
parameters:
out-path: data/sorted_countries_by_name
"""
Explanation: Pipeline Dependencies
Let's add another pipeline which depends on the countries-population pipeline.
This time we will use just the pipeline spec yaml to write the pipeline, without any DataFlows code (although DataFlows library is used to implement the processors we are using here):
End of explanation
"""
%%sh
dpp init
dpp
"""
Explanation: Clear the pipelines state using dpp init and list the available pipelines:
End of explanation
"""
%%sh
dpp run --dirty all
"""
Explanation: You can see that the new pipeline can't run until it's dependency is executed.
Let's run all the "Dirty" dependencies:
End of explanation
"""
from dataflows import Flow, load, printer
Flow(
load('data/sorted_countries_by_name/datapackage.json'),
printer(num_rows=1, tablefmt='html')
).process()
"""
Explanation: Inspect the created datapackage
End of explanation
"""
%%writefile pipeline-spec.yaml
double-winners:
pipeline:
- run: load
parameters:
name: emmies
from: https://raw.githubusercontent.com/datahq/dataflows/master/data/emmy.csv
- run: load
parameters:
name: oscars
from: https://raw.githubusercontent.com/datahq/dataflows/master/data/academy.csv
- run: filter
parameters:
resources: ['emmies']
in:
- winner: 1
- run: concatenate
parameters:
target: {'name': 'emmies_filtered'}
resources: ['emmies']
fields:
emmy_nominee: ['nominee']
- run: join
parameters:
source:
name: 'emmies_filtered'
key: ['emmy_nominee']
delete: true
target:
name: 'oscars'
key: ['Name']
fields: {}
full: false
- run: filter
parameters:
in:
- Winner: "1"
- run: dump.to_path
parameters:
out-path: data/double_winners
"""
Explanation: Pipeline processors
Datapackage Pipelines has a standard library of processors, like the sort processor used previously. These processors correspond to DataFlows standard library processors.
See the Datapackage Pipelines README for reference and usage examples.
An example showing usage of common processors:
End of explanation
"""
%%sh
dpp run ./double-winners
"""
Explanation: Run the pipeline:
End of explanation
"""
from dataflows import Flow, printer, load
Flow(load('data/double_winners/datapackage.json'), printer(tablefmt='html', num_rows=1)).process()
"""
Explanation: Print the datapackage:
End of explanation
"""
%%sh
docker pull frictionlessdata/datapackage-pipelines
"""
Explanation: Pipelines Server
Running pipelines on your laptop is fine for many use-cases but sometimes you want to run pipelines in a more reproducible, scalable and automatic fashion.
The Datapackage Pipelines Server is a Docker image which provides the core functionality to achieve this.
To start a local pipelines server for development, you will need to install Docker for Windows,
Mac or Linux
Pull the datapackage-pipelines image:
End of explanation
"""
%%sh
docker run -d --name dpp -v `pwd`:/pipelines:rw -p 5000:5000 frictionlessdata/datapackage-pipelines server
"""
Explanation: Start a local pipelines server, mounting the current working directory into the container:
End of explanation
"""
%%writefile pipeline-spec.yaml
countries-population:
schedule:
# minute hour day_of_week day_of_month month_of_year
crontab: '* * * * *'
pipeline:
- flow: countries_population_flow
- run: dump.to_path
parameters:
out-path: data/countries_population
sorted_countries_by_name:
dependencies:
- pipeline: ./countries-population
- datapackage: data/countries_population/datapackage.json
pipeline:
- run: load
parameters:
from: data/countries_population/datapackage.json
resources: ['countries_population']
- run: sort
parameters:
resources: ['countries_population']
sort-by: '{name}'
- run: dump.to_path
parameters:
out-path: data/sorted_countries_by_name
"""
Explanation: After a few seconds, the pipelines dashboad should be available at http://localhost:5000
New / modified pipelines and dirty dependencies are executed by the pipelines server automatically.
The server also supports scheduled pipelines for periodical execution.
Let's see this in action:
End of explanation
"""
%%sh
docker logs dpp --tail 5
"""
Explanation: Inspect the Pipelines server logs and wait for Update Pipelines task to complete and pipelines to start running
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/4_keras_functional_api.ipynb | apache-2.0 | # Use the chown command to change the ownership of the repository.
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.3.0 || pip install tensorflow==2.3.0
"""
Explanation: Introducing the Keras Functional API on Vertex AI Platform
Learning objectives
1. Understand embeddings and how to create them with the feature column API.
1. Understand Deep and Wide models and when to use them.
1. Understand the Keras functional API and how to build a deep and wide model with it.
1. Learn how to deploy the Model to Vertex AI and make predictions with the Keras model.
Introduction
In the last notebook, you learned about the Keras Sequential API. The Keras Functional API provides an alternate way of building models which is more flexible. With the Functional API, we can build models with more complex topologies, multiple input or output layers, shared layers or non-sequential data flows (e.g. residual layers).
In this notebook you'll use what we learned about feature columns to build a Wide & Deep model. Recall, that the idea behind Wide & Deep models is to join the two methods of learning through memorization and generalization by making a wide linear model and a deep learning model to accommodate both. You can have a look at the original research paper here: Wide & Deep Learning for Recommender Systems.
<img src='assets/wide_deep.png' width='80%'>
<sup>(image: https://ai.googleblog.com/2016/06/wide-deep-learning-better-together-with.html)</sup>
The Wide part of the model is associated with the memory element. In this case, we train a linear model with a wide set of crossed features and learn the correlation of this related data with the assigned label. The Deep part of the model is associated with the generalization element where we use embedding vectors for features. The best embeddings are then learned through the training process. While both of these methods can work well alone, Wide & Deep models excel by combining these techniques together.
Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook.
End of explanation
"""
# Install the required numpy version.
!pip install numpy==1.21.0
"""
Explanation: Kindly ignore the deprecation warnings and incompatibility errors.
End of explanation
"""
# Importing necessary modules/libraries such as numpy, pandas and datetime.
import datetime
import os
import shutil
import numpy as np
import pandas as pd
import tensorflow as tf
from google.cloud import aiplatform
from matplotlib import pyplot as plt
from tensorflow import feature_column as fc
from tensorflow import keras
from tensorflow.keras import Model
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import Input, Dense, DenseFeatures, concatenate
from tensorflow.keras.models import Sequential
print(tf.__version__)
%matplotlib inline
# It sets the backend of matplotlib to the 'inline' backend. The output of plotting commands is displayed inline within frontends, directly
# below the code cell that produced it. The resulting plots will then also be stored in the notebook document.
%matplotlib inline
"""
Explanation: Kindly ignore the deprecation warnings and incompatibility errors.
Restart the kernel before proceeding further (On the Notebook menu, select Kernel > Restart Kernel > Restart).
Start by importing the necessary libraries for this lab.
End of explanation
"""
!ls -l ../data/*.csv
"""
Explanation: Load raw data
We will use the taxifare dataset, using the CSV files that we created in the first notebook of this sequence. Those files have been saved into ../data.
End of explanation
"""
# Selecting specific CSV_COLUMNS, LABEL_COLUMN, DEFAULTS, UNWANTED_COLS.
CSV_COLUMNS = [
'fare_amount',
'pickup_datetime',
'pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'passenger_count',
'key'
]
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0], ['na'], [0.0], [0.0], [0.0], [0.0], [0.0], ['na']]
UNWANTED_COLS = ['pickup_datetime', 'key']
# Create an input function reading a file using the Dataset API
def features_and_labels(row_data):
label = row_data.pop(LABEL_COLUMN)
features = row_data
for unwanted_col in UNWANTED_COLS:
features.pop(unwanted_col)
return features, label
# Reading CSV files into a dataset.
def create_dataset(pattern, batch_size=1, mode='eval'):
dataset = tf.data.experimental.make_csv_dataset(
pattern, batch_size, CSV_COLUMNS, DEFAULTS)
dataset = dataset.map(features_and_labels)
if mode == 'train':
dataset = dataset.shuffle(buffer_size=1000).repeat()
# take advantage of multi-threading; 1=AUTOTUNE
dataset = dataset.prefetch(1)
return dataset
"""
Explanation: Use tf.data to read the CSV files
We wrote these functions for reading data from the csv files above in the previous notebook. For this lab we will also include some additional engineered features in our model. In particular, we will compute the difference in latitude and longitude, as well as the Euclidean distance between the pick-up and drop-off locations. We can accomplish this by adding these new features to the features dictionary with the function add_engineered_features below.
Note that we include a call to this function when collecting our features dict and labels in the features_and_labels function below as well.
End of explanation
"""
# TODO 1
# 1. Bucketize latitudes and longitudes
NBUCKETS = 16
latbuckets = np.linspace(start=38.0, stop=42.0, num=NBUCKETS).tolist()
lonbuckets = np.linspace(start=-76.0, stop=-72.0, num=NBUCKETS).tolist()
fc_bucketized_plat = fc.bucketized_column(
source_column=fc.numeric_column("pickup_longitude"), boundaries=lonbuckets)
fc_bucketized_plon = fc.bucketized_column(
source_column=fc.numeric_column("pickup_latitude"), boundaries=latbuckets)
fc_bucketized_dlat = fc.bucketized_column(
source_column=fc.numeric_column("dropoff_longitude"), boundaries=lonbuckets)
fc_bucketized_dlon = fc.bucketized_column(
source_column=fc.numeric_column("dropoff_latitude"), boundaries=latbuckets)
# 2. Cross features for locations
fc_crossed_dloc = fc.crossed_column(
[fc_bucketized_dlat, fc_bucketized_dlon],
hash_bucket_size=NBUCKETS * NBUCKETS)
fc_crossed_ploc = fc.crossed_column(
[fc_bucketized_plat, fc_bucketized_plon],
hash_bucket_size=NBUCKETS * NBUCKETS)
fc_crossed_pd_pair = fc.crossed_column(
[fc_crossed_dloc, fc_crossed_ploc],
hash_bucket_size=NBUCKETS**4)
# 3. Create embedding columns for the crossed columns
fc_pd_pair = fc.embedding_column(categorical_column=fc_crossed_pd_pair, dimension=3)
fc_dloc = fc.embedding_column(categorical_column=fc_crossed_dloc, dimension=3)
fc_ploc = fc.embedding_column(categorical_column=fc_crossed_ploc, dimension=3)
"""
Explanation: Feature columns for Wide and Deep model
For the Wide columns, we will create feature columns of crossed features. To do this, we'll create a collection of Tensorflow feature columns to pass to the tf.feature_column.crossed_column constructor. The Deep columns will consist of numeric columns and the embedding columns we want to create.
End of explanation
"""
# TODO 2
wide_columns = [
# One-hot encoded feature crosses
fc.indicator_column(fc_crossed_dloc),
fc.indicator_column(fc_crossed_ploc),
fc.indicator_column(fc_crossed_pd_pair)
]
deep_columns = [
# Embedding_column to "group" together ...
fc.embedding_column(fc_crossed_pd_pair, dimension=10),
# Numeric columns
fc.numeric_column("pickup_latitude"),
fc.numeric_column("pickup_longitude"),
fc.numeric_column("dropoff_longitude"),
fc.numeric_column("dropoff_latitude")
]
"""
Explanation: Gather list of feature columns
Next we gather the list of wide and deep feature columns we'll pass to our Wide & Deep model in Tensorflow. Recall, wide columns are sparse, have linear relationship with the output while continuous columns are deep, have a complex relationship with the output. We will use our previously bucketized columns to collect crossed feature columns and sparse feature columns for our wide columns, and embedding feature columns and numeric features columns for the deep columns.
End of explanation
"""
INPUT_COLS = [
'pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'passenger_count'
]
inputs = {colname : Input(name=colname, shape=(), dtype='float32')
for colname in INPUT_COLS
}
"""
Explanation: Build a Wide and Deep model in Keras
To build a wide-and-deep network, we connect the sparse (i.e. wide) features directly to the output node, but pass the dense (i.e. deep) features through a set of fully connected layers. Here’s that model architecture looks using the Functional API.
First, we'll create our input columns using tf.keras.layers.Input.
End of explanation
"""
# Here, tf.reduce_mean computes the mean of elements across dimensions of a tensor.
# tf.sqrt Computes element-wise square root of the input tensor.
# tf.square computes square of x element-wise.
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
# TODO 3
def build_model(dnn_hidden_units):
# Create the deep part of model
deep = DenseFeatures(deep_columns, name='deep_inputs')(inputs)
for num_nodes in dnn_hidden_units:
deep = Dense(num_nodes, activation='relu')(deep)
# Create the wide part of model
wide = DenseFeatures(wide_columns, name='wide_inputs')(inputs)
# Combine deep and wide parts of the model
combined = concatenate(inputs=[deep, wide], name='combined')
# Map the combined outputs into a single prediction value
output = Dense(units=1, activation=None, name='prediction')(combined)
# Finalize the model
model = Model(inputs=list(inputs.values()), outputs=output)
# Compile the keras model
model.compile(optimizer="adam", loss="mse", metrics=[rmse, "mse"])
return model
"""
Explanation: Then, we'll define our custom RMSE evaluation metric and build our wide and deep model.
End of explanation
"""
HIDDEN_UNITS = [10,10]
# Calling the build model
model = build_model(dnn_hidden_units=HIDDEN_UNITS)
# Converts a Keras plot_model to see a diagram of the model that we have created.
tf.keras.utils.plot_model(model, show_shapes=False, rankdir='LR')
"""
Explanation: Next, we can call the build_model to create the model. Here we'll have two hidden layers, each with 10 neurons, for the deep part of our model. We can also use plot_model to see a diagram of the model we've created.
End of explanation
"""
BATCH_SIZE = 1000
NUM_TRAIN_EXAMPLES = 10000 * 5 # training dataset will repeat, wrap around
NUM_EVALS = 50 # how many times to evaluate
NUM_EVAL_EXAMPLES = 10000 # enough to get a reasonable sample
trainds = create_dataset(
pattern='../data/taxi-train*',
batch_size=BATCH_SIZE,
mode='train')
evalds = create_dataset(
pattern='../data/taxi-valid*',
batch_size=BATCH_SIZE,
mode='eval').take(NUM_EVAL_EXAMPLES//1000)
%%time
# Here, %%time prints the wall time for the entire cell
steps_per_epoch = NUM_TRAIN_EXAMPLES // (BATCH_SIZE * NUM_EVALS)
OUTDIR = "./taxi_trained"
shutil.rmtree(path=OUTDIR, ignore_errors=True) # start fresh each time
history = model.fit(x=trainds,
steps_per_epoch=steps_per_epoch,
epochs=NUM_EVALS,
validation_data=evalds,
callbacks=[TensorBoard(OUTDIR)])
"""
Explanation: Next, we'll set up our training variables, create our datasets for training and validation, and train our model.
(We refer you the the blog post ML Design Pattern #3: Virtual Epochs for further details on why express the training in terms of NUM_TRAIN_EXAMPLES and NUM_EVALS and why, in this training code, the number of epochs is really equal to the number of evaluations we perform.)
End of explanation
"""
RMSE_COLS = ['rmse', 'val_rmse']
# Pandas DataFrame is two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns).
pd.DataFrame(history.history)[RMSE_COLS].plot()
"""
Explanation: Just as before, we can examine the history to see how the RMSE changes through training on the train set and validation set.
End of explanation
"""
model.predict(
x={
"pickup_longitude": tf.convert_to_tensor([-73.982683]),
"pickup_latitude": tf.convert_to_tensor([40.742104]),
"dropoff_longitude": tf.convert_to_tensor([-73.983766]),
"dropoff_latitude": tf.convert_to_tensor([40.755174]),
"passenger_count": tf.convert_to_tensor([3.0]),
},
steps=1,
)
"""
Explanation: Making predictions with our model
To make predictions with our trained model, we can call the predict method, passing to it a dictionary of values. The steps parameter determines the total number of steps before declaring the prediction round finished. Here since we have just one example, we set steps=1 (setting steps=None would also work). Note, however, that if x is a tf.data dataset or a dataset iterator, and steps is set to None, predict will run until the input dataset is exhausted.
End of explanation
"""
OUTPUT_DIR = "./export/savedmodel"
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
TIMESTAMP = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
EXPORT_PATH = os.path.join(OUTPUT_DIR, TIMESTAMP)
tf.saved_model.save(model, EXPORT_PATH) # with default serving function
!saved_model_cli show \
--tag_set serve \
--signature_def serving_default \
--dir {EXPORT_PATH}
!find {EXPORT_PATH}
os.environ['EXPORT_PATH'] = EXPORT_PATH
"""
Explanation: Export and deploy our model
Of course, making individual predictions is not realistic, because we can't expect client code to have a model object in memory. For others to use our trained model, we'll have to export our model to a file, and expect client code to instantiate the model from that exported file.
We'll export the model to a TensorFlow SavedModel format. Once we have a model in this format, we have lots of ways to "serve" the model, from a web application, from JavaScript, from mobile applications, etc.
End of explanation
"""
PROJECT = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT = PROJECT[0]
BUCKET = PROJECT
REGION = "us-central1"
MODEL_DISPLAYNAME = f"taxifare_keras_functional-{TIMESTAMP}"
print(f"MODEL_DISPLAYNAME: {MODEL_DISPLAYNAME}")
# from https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers
SERVING_CONTAINER_IMAGE_URI = (
"us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-3:latest"
)
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
%%bash
# Create GCS bucket if it doesn't exist already...
exists=$(gsutil ls -d | grep -w gs://${BUCKET}/)
if [ -n "$exists" ]; then
echo -e "Bucket exists, let's not recreate it."
else
echo "Creating a new GCS bucket."
gsutil mb -l ${REGION} gs://${BUCKET}
echo "\nHere are your current buckets:"
gsutil ls
fi
!gsutil cp -R $EXPORT_PATH gs://$BUCKET/$MODEL_DISPLAYNAME
uploaded_model = aiplatform.Model.upload(
display_name=MODEL_DISPLAYNAME,
artifact_uri=f"gs://{BUCKET}/{MODEL_DISPLAYNAME}",
serving_container_image_uri=SERVING_CONTAINER_IMAGE_URI,
)
MACHINE_TYPE = "n1-standard-2"
endpoint = uploaded_model.deploy(
machine_type=MACHINE_TYPE,
accelerator_type=None,
accelerator_count=None,
)
"""
Explanation: Deploy our model to Vertex AI
Finally, we will deploy our trained model to Vertex AI and see how we can make online predicitons.
End of explanation
"""
instance = {
"pickup_longitude": -73.982683,
"pickup_latitude": 40.742104,
"dropoff_longitude": -73.983766,
"dropoff_latitude": 40.755174,
"passenger_count": 3.0,
}
endpoint.predict([instance])
"""
Explanation: <img src='assets/taxi_fare_keras_func_model.png' width='80%'>
<sup>(image:Your model in Vertex AI)</sup>
End of explanation
"""
endpoint.undeploy_all()
endpoint.delete()
"""
Explanation: Cleanup
When deploying a model to an endpoint for online prediction, the minimum min-replica-count is 1, and it is charged per node hour. So let's delete the endpoint to reduce unnecessary charges. Before we can delete the endpoint, we first undeploy all attached models...
End of explanation
"""
|
smorton2/think-stats | code/chap05exmine.ipynb | gpl-3.0 | from __future__ import print_function, division
%matplotlib inline
import numpy as np
import nsfg
import first
import analytic
import thinkstats2
import thinkplot
"""
Explanation: Examples and Exercises from Think Stats, 2nd Edition
http://thinkstats2.com
Copyright 2016 Allen B. Downey
MIT License: https://opensource.org/licenses/MIT
End of explanation
"""
thinkplot.PrePlot(3)
for lam in [2.0, 1, 0.5]:
xs, ps = thinkstats2.RenderExpoCdf(lam, 0, 3.0, 50)
label = r'$\lambda=%g$' % lam
thinkplot.Plot(xs, ps, label=label)
thinkplot.Config(title='Exponential CDF', xlabel='x', ylabel='CDF',
loc='lower right')
"""
Explanation: Exponential distribution
Here's what the exponential CDF looks like with a range of parameters.
End of explanation
"""
df = analytic.ReadBabyBoom()
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label='actual')
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='Time between births (minutes)', ylabel='CDF')
"""
Explanation: Here's the distribution of interarrival times from a dataset of birth times.
End of explanation
"""
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel='Time between births (minutes)',
ylabel='CCDF', yscale='log', loc='upper right')
"""
Explanation: Here's what the CCDF looks like on a log-y scale. A straight line is consistent with an exponential distribution.
End of explanation
"""
thinkplot.PrePlot(3)
mus = [1.0, 2.0, 3.0]
sigmas = [0.5, 0.4, 0.3]
for mu, sigma in zip(mus, sigmas):
xs, ps = thinkstats2.RenderNormalCdf(mu=mu, sigma=sigma,
low=-1.0, high=4.0)
label = r'$\mu=%g$, $\sigma=%g$' % (mu, sigma)
thinkplot.Plot(xs, ps, label=label)
thinkplot.Config(title='Normal CDF', xlabel='x', ylabel='CDF',
loc='upper left')
"""
Explanation: Normal distribution
Here's what the normal CDF looks like with a range of parameters.
End of explanation
"""
preg = nsfg.ReadFemPreg()
weights = preg.totalwgt_lb.dropna()
"""
Explanation: I'll use a normal model to fit the distribution of birth weights from the NSFG.
End of explanation
"""
# estimate parameters: trimming outliers yields a better fit
mu, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
print('Mean, Var', mu, var)
# plot the model
sigma = np.sqrt(var)
print('Sigma', sigma)
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=12.5)
thinkplot.Plot(xs, ps, label='model', color='0.6')
# plot the data
cdf = thinkstats2.Cdf(weights, label='data')
thinkplot.PrePlot(1)
thinkplot.Cdf(cdf)
thinkplot.Config(title='Birth weights',
xlabel='Birth weight (pounds)',
ylabel='CDF')
"""
Explanation: Here's the observed CDF and the model. The model fits the data well except in the left tail.
End of explanation
"""
n = 1000
thinkplot.PrePlot(3)
mus = [0, 1, 5]
sigmas = [1, 1, 2]
for mu, sigma in zip(mus, sigmas):
sample = np.random.normal(mu, sigma, n)
xs, ys = thinkstats2.NormalProbability(sample)
label = '$\mu=%d$, $\sigma=%d$' % (mu, sigma)
thinkplot.Plot(xs, ys, label=label)
thinkplot.Config(title='Normal probability plot',
xlabel='standard normal sample',
ylabel='sample values')
"""
Explanation: A normal probability plot is a visual test for normality. The following example shows that if the data are actually from a normal distribution, the plot is approximately straight.
End of explanation
"""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = np.sqrt(var)
xs = [-4, 4]
fxs, fys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color='0.8')
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='all live')
thinkplot.Config(title='Normal probability plot',
xlabel='Standard deviations from mean',
ylabel='Birth weight (lbs)')
"""
Explanation: Here's the normal probability plot for birth weights, showing that the lightest babies are lighter than we expect from the normal mode, and the heaviest babies are heavier.
End of explanation
"""
full_term = preg[preg.prglngth >= 37]
term_weights = full_term.totalwgt_lb.dropna()
"""
Explanation: If we suspect that the deviation in the left tail is due to preterm babies, we can check by selecting only full term births.
End of explanation
"""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = np.sqrt(var)
xs = [-4, 4]
fxs, fys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color='0.8')
thinkplot.PrePlot(2)
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='all live')
xs, ys = thinkstats2.NormalProbability(term_weights)
thinkplot.Plot(xs, ys, label='full term')
thinkplot.Config(title='Normal probability plot',
xlabel='Standard deviations from mean',
ylabel='Birth weight (lbs)')
"""
Explanation: Now the deviation in the left tail is almost gone, but the heaviest babies are still heavy.
End of explanation
"""
import brfss
df = brfss.ReadBrfss()
weights = df.wtkg2.dropna()
"""
Explanation: Lognormal model
As an example of a lognormal disrtribution, we'll look at adult weights from the BRFSS.
End of explanation
"""
def MakeNormalModel(weights):
"""Plots a CDF with a Normal model.
weights: sequence
"""
cdf = thinkstats2.Cdf(weights, label='weights')
mean, var = thinkstats2.TrimmedMeanVar(weights)
std = np.sqrt(var)
print('n, mean, std', len(weights), mean, std)
xmin = mean - 4 * std
xmax = mean + 4 * std
xs, ps = thinkstats2.RenderNormalCdf(mean, std, xmin, xmax)
thinkplot.Plot(xs, ps, label='model', linewidth=4, color='0.8')
thinkplot.Cdf(cdf)
"""
Explanation: The following function estimates the parameters of a normal distribution and plots the data and a normal model.
End of explanation
"""
MakeNormalModel(weights)
thinkplot.Config(title='Adult weight, linear scale', xlabel='Weight (kg)',
ylabel='CDF', loc='upper right')
"""
Explanation: Here's the distribution of adult weights and a normal model, which is not a very good fit.
End of explanation
"""
log_weights = np.log10(weights)
MakeNormalModel(log_weights)
thinkplot.Config(title='Adult weight, log scale', xlabel='Weight (log10 kg)',
ylabel='CDF', loc='upper right')
"""
Explanation: Here's the distribution of adult weight and a lognormal model, plotted on a log-x scale. The model is a better fit for the data, although the heaviest people are heavier than the model expects.
End of explanation
"""
def MakeNormalPlot(weights):
"""Generates a normal probability plot of birth weights.
weights: sequence
"""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = np.sqrt(var)
xs = [-5, 5]
xs, ys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(xs, ys, color='0.8', label='model')
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='weights')
"""
Explanation: The following function generates a normal probability plot.
End of explanation
"""
MakeNormalPlot(weights)
thinkplot.Config(title='Adult weight, normal plot', xlabel='Weight (kg)',
ylabel='CDF', loc='upper left')
"""
Explanation: When we generate a normal probability plot with adult weights, we can see clearly that the data deviate from the model systematically.
End of explanation
"""
MakeNormalPlot(log_weights)
thinkplot.Config(title='Adult weight, lognormal plot', xlabel='Weight (log10 kg)',
ylabel='CDF', loc='upper left')
"""
Explanation: If we make a normal probability plot with log weights, the model fit the data well except in the tails, where the heaviest people exceed expectations.
End of explanation
"""
xmin = 0.5
thinkplot.PrePlot(3)
for alpha in [2.0, 1.0, 0.5]:
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 10.0, n=100)
thinkplot.Plot(xs, ps, label=r'$\alpha=%g$' % alpha)
thinkplot.Config(title='Pareto CDF', xlabel='x',
ylabel='CDF', loc='lower right')
"""
Explanation: Pareto distribution
Here's what the Pareto CDF looks like with a range of parameters.
End of explanation
"""
import populations
pops = populations.ReadData()
print('Number of cities/towns', len(pops))
"""
Explanation: The distribution of populations for cities and towns is sometimes said to be Pareto-like.
End of explanation
"""
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log', loc='lower left')
"""
Explanation: Here's the distribution of population for cities and towns in the U.S., along with a Pareto model. The model fits the data well in the tail.
End of explanation
"""
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF', loc='lower right')
"""
Explanation: The lognormal model might be a better fit for this data (as is often the case for things that are supposed to be Pareto).
End of explanation
"""
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='Random variate',
ylabel='log10 population',
xlim=[-5, 5])
"""
Explanation: Here's a normal probability plot for the log-populations. The model fits the data well except in the right tail, where the biggest cities are bigger than expected.
End of explanation
"""
import random
def expovariate(lam):
p = random.random()
x = -np.log(1-p) / lam
return x
"""
Explanation: Random variates
When we have an analytic CDF, we can sometimes invert it to generate random values. The following function generates values from an exponential distribution.
End of explanation
"""
t = [expovariate(lam=2) for _ in range(1000)]
"""
Explanation: We can test it by generating a sample.
End of explanation
"""
cdf = thinkstats2.Cdf(t)
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel='Exponential variate', ylabel='CCDF', yscale='log')
"""
Explanation: And plotting the CCDF on a log-y scale.
End of explanation
"""
import scipy.stats
"""
Explanation: A straight line is consistent with an exponential distribution.
As an exercise, write a function that generates a Pareto variate.
Exercises
Exercise: In the BRFSS (see Section 5.4), the distribution of heights is roughly normal with parameters µ = 178 cm and σ = 7.7 cm for men, and µ = 163 cm and σ = 7.3 cm for women.
In order to join Blue Man Group, you have to be male between 5’10” and 6’1” (see http://bluemancasting.com). What percentage of the U.S. male population is in this range? Hint: use scipy.stats.norm.cdf.
scipy.stats contains objects that represent analytic distributions
End of explanation
"""
mu = 178
sigma = 7.7
dist = scipy.stats.norm(loc=mu, scale=sigma)
type(dist)
"""
Explanation: For example <tt>scipy.stats.norm</tt> represents a normal distribution.
End of explanation
"""
dist.mean(), dist.std()
"""
Explanation: A "frozen random variable" can compute its mean and standard deviation.
End of explanation
"""
dist.cdf(mu-sigma)
"""
Explanation: It can also evaluate its CDF. How many people are more than one standard deviation below the mean? About 16%
End of explanation
"""
# Solution goes here
short = dist.cdf(178)
tall = dist.cdf(185)
print(tall - short)
"""
Explanation: How many people are between 5'10" and 6'1"?
End of explanation
"""
alpha = 1.7
xmin = 1 # meter
dist = scipy.stats.pareto(b=alpha, scale=xmin)
dist.median()
"""
Explanation: Exercise: To get a feel for the Pareto distribution, let’s see how different the world would be if the distribution of human height were Pareto. With the parameters xm = 1 m and α = 1.7, we get a distribution with a reasonable minimum, 1 m, and median, 1.5 m.
Plot this distribution. What is the mean human height in Pareto world? What fraction of the population is shorter than the mean? If there are 7 billion people in Pareto world, how many do we expect to be taller than 1 km? How tall do we expect the tallest person to be?
scipy.stats.pareto represents a pareto distribution. In Pareto world, the distribution of human heights has parameters alpha=1.7 and xmin=1 meter. So the shortest person is 100 cm and the median is 150.
End of explanation
"""
# Solution goes here
dist.mean()
"""
Explanation: What is the mean height in Pareto world?
End of explanation
"""
# Solution goes here
dist.cdf(dist.mean())
"""
Explanation: What fraction of people are shorter than the mean?
End of explanation
"""
# Solution goes here
# 1 km or 1 m?
# for 1 km
(1 - dist.cdf(1000)) * 7e9
"""
Explanation: Out of 7 billion people, how many do we expect to be taller than 1 km? You could use <tt>dist.cdf</tt> or <tt>dist.sf</tt>.
End of explanation
"""
# max(dist)
dist.sf(6e5) * 7e9
# Solution goes here
# Solution goes here
# the height should be 600km
"""
Explanation: How tall do we expect the tallest person to be?
End of explanation
"""
sample = [random.weibullvariate(2, 1) for _ in range(1000)]
cdf = thinkstats2.Cdf(sample)
thinkplot.Cdf(cdf, transform='weibull')
thinkplot.Config(xlabel='Weibull variate', ylabel='CCDF')
"""
Explanation: Exercise: The Weibull distribution is a generalization of the exponential distribution that comes up in failure analysis (see http://wikipedia.org/wiki/Weibull_distribution). Its CDF is
$\mathrm{CDF}(x) = 1 − \exp[−(x / λ)^k]$
Can you find a transformation that makes a Weibull distribution look like a straight line? What do the slope and intercept of the line indicate?
Use random.weibullvariate to generate a sample from a Weibull distribution and use it to test your transformation.
Generate a sample from a Weibull distribution and plot it using a transform that makes a Weibull distribution look like a straight line.
thinkplot.Cdf provides a transform that makes the CDF of a Weibull distribution look like a straight line.
End of explanation
"""
import analytic
df = analytic.ReadBabyBoom()
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label='actual')
n = len(diffs)
lam = 44.0 / 24 / 60
sample = [random.expovariate(lam) for _ in range(n)]
1/lam, np.mean(sample)
# Solution goes here
# make 44 values from exp dist
# give them same mean as data aka 33 min
# plot distribution of random values
rand_cdf = thinkstats2.Cdf(sample, label='random values')
thinkplot.Cdf(rand_cdf)
thinkplot.Show()
thinkplot.Cdfs([rand_cdf, cdf], complement=True)
thinkplot.Config(xlabel='time between births', yscale='log')
# Solution goes here
"""
Explanation: Exercise: For small values of n, we don’t expect an empirical distribution to fit an analytic distribution exactly. One way to evaluate the quality of fit is to generate a sample from an analytic distribution and see how well it matches the data.
For example, in Section 5.1 we plotted the distribution of time between births and saw that it is approximately exponential. But the distribution is based on only 44 data points. To see whether the data might have come from an exponential distribution, generate 44 values from an exponential distribution with the same mean as the data, about 33 minutes between births.
Plot the distribution of the random values and compare it to the actual distribution. You can use random.expovariate to generate the values.
End of explanation
"""
import hinc
df = hinc.ReadData()
df.head()
"""
Explanation: Worked Example: The distributions of wealth and income are sometimes modeled using lognormal and Pareto distributions. To see which is better, let’s look at some data.
The Current Population Survey (CPS) is a joint effort of the Bureau of Labor Statistics and the Census Bureau to study income and related variables. Data collected in 2013 is available from http://www.census.gov/hhes/www/cpstables/032013/hhinc/toc.htm. I downloaded hinc06.xls, which is an Excel spreadsheet with information about household income, and converted it to hinc06.csv, a CSV file you will find in the repository for this book. You will also find hinc.py, which reads this file.
Extract the distribution of incomes from this dataset. Are any of the analytic distributions in this chapter a good model of the data?
End of explanation
"""
xs, ps = df.income.values, df.ps.values
cdf = thinkstats2.Cdf(xs, ps, label='data')
cdf_log = thinkstats2.Cdf(np.log10(xs), ps, label='data')
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='household income',
ylabel='CDF')
"""
Explanation: Here's what the CDF looks like on a linear scale.
End of explanation
"""
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5,
low=0, high=250000)
thinkplot.Plot(xs, 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel='log10 household income',
ylabel='CCDF',
xscale='log',
yscale='log',
loc='lower left')
"""
Explanation: To check whether a Pareto model describes the data well, I plot the CCDF on a log-log scale.
I found parameters for the Pareto model that match the tail of the distribution.
End of explanation
"""
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
"""
Explanation: For the lognormal model I estimate mu and sigma using percentile-based statistics (median and IQR).
End of explanation
"""
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 household income',
ylabel='CDF')
"""
Explanation: Here's what the distribution, and fitted model, look like on a log-x scale.
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/art_and_science_of_ml/solutions/export_data_from_bq_to_gcs.ipynb | apache-2.0 | # Run the chown command to change the ownership of the repository
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Install the Google Cloud BigQuery library
%pip install google-cloud-bigquery==1.25.0
"""
Explanation: Exporting data from BigQuery to Google Cloud Storage
In this notebook, we export BigQuery data to GCS so that we can reuse our Keras model that was developed on CSV data.
End of explanation
"""
# Importing necessary tensorflow library and printing the TF version.
import tensorflow as tf
print("Tensorflow version: ",tf.__version__)
# The OS module in python provides functions for interacting with the operating system.
import os
from google.cloud import bigquery
"""
Explanation: Please ignore any incompatibility warnings and errors.
Restart the kernel to use updated packages. (On the Notebook menu, select Kernel > Restart Kernel > Restart).
End of explanation
"""
# Change with your own bucket and project below:
BUCKET = "<BUCKET>"
PROJECT = "<PROJECT>"
OUTDIR = "gs://{bucket}/taxifare/data".format(bucket=BUCKET)
os.environ['BUCKET'] = BUCKET
os.environ['OUTDIR'] = OUTDIR
os.environ['PROJECT'] = PROJECT
"""
Explanation: Change the following cell as necessary:
End of explanation
"""
# Initialize a BigQuery client.
bq = bigquery.Client(project = PROJECT)
dataset = bigquery.Dataset(bq.dataset("taxifare"))
# Create a new dataset with the `create_dataset()` method.
try:
bq.create_dataset(dataset)
print("Dataset created")
except:
print("Dataset already exists")
"""
Explanation: Create BigQuery tables
If you haven not already created a BigQuery dataset for our data, run the following cell:
End of explanation
"""
%%bigquery
CREATE OR REPLACE TABLE taxifare.feateng_training_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
'unused' AS key
FROM `nyc-tlc.yellow.trips`
WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 1000)) = 1
AND
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
"""
Explanation: Let's create a table with 1 million examples.
Note that the order of columns is exactly what was in our CSV files.
End of explanation
"""
%%bigquery
CREATE OR REPLACE TABLE taxifare.feateng_valid_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
'unused' AS key
FROM `nyc-tlc.yellow.trips`
WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 10000)) = 2
AND
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
"""
Explanation: Make the validation dataset be 1/10 the size of the training dataset.
End of explanation
"""
%%bash
echo "Deleting current contents of $OUTDIR"
gsutil -m -q rm -rf $OUTDIR
echo "Extracting training data to $OUTDIR"
bq --location=US extract \
--destination_format CSV \
--field_delimiter "," --noprint_header \
taxifare.feateng_training_data \
$OUTDIR/taxi-train-*.csv
echo "Extracting validation data to $OUTDIR"
bq --location=US extract \
--destination_format CSV \
--field_delimiter "," --noprint_header \
taxifare.feateng_valid_data \
$OUTDIR/taxi-valid-*.csv
# With `-l` option, gsutil will output additional information about each matching provider, bucket, subdirectory, or object.
gsutil ls -l $OUTDIR
# The cat command outputs the contents of one or more URLs to stdout.
!gsutil cat gs://$BUCKET/taxifare/data/taxi-train-000000000000.csv | head -2
"""
Explanation: Export the tables as CSV files
End of explanation
"""
|
massimo-nocentini/simulation-methods | notes/set-based-type-system/set-based-type-system.ipynb | mit | from itertools import repeat
from sympy import *
#from type_system import *
%run ../../src/commons.py
%run ./type-system.py
"""
Explanation: <p>
<img src="http://www.cerm.unifi.it/chianti/images/logo%20unifi_positivo.jpg"
alt="UniFI logo" style="float: left; width: 20%; height: 20%;">
<div align="right">
Massimo Nocentini<br>
</div>
</p>
<br>
<div align="center">
<b>Abstract</b><br>
In this document we collect a naive <i>type system</i> based on sets.
</div>
End of explanation
"""
init_printing()
x,y,m,n,t,z = symbols('x y m n t z', commutative=True)
alpha, beta, gamma, eta = symbols(r'\alpha \beta \gamma \eta', commutative=True)
f,g = Function('f'), Function('g')
"""
Explanation:
End of explanation
"""
((1/(1-w[0]*z))*(1/(1-w[1]*z))).diff(z).series(z, n=6)
define(f(z), z/((1-z)**2),ctor=FEq).series(z,n=10)
define(f(z), 1/(1-alpha*z), ctor=FEq).series(z,n=10)
define(f(z), 1/(1-(u[0]+u[1])*z), ctor=FEq).series(z,n=4)
define(f(z), 1/(1-(o[0]+o[1])*z), ctor=FEq).series(z,n=4)
"""
Explanation: Non-commutative symbols
End of explanation
"""
define(f(z), z*(1/(1-z))*(1/(1-z)), ctor=FEq).series(z,n=10)
define(f(z), z**3,ctor=FEq).series(z, n=10, kernel='exponential')
define(f(z), exp(z),ctor=FEq).series(z, n=10, kernel='exponential')
define(f(z), z*exp(z), ctor=FEq).series(z, n=10, kernel='exponential')
define(f(z), z**2*exp(z)/factorial(2,evaluate=False),
ctor=FEq).series(z, n=10, kernel='exponential')
define(f(z), z**3*exp(z)/factorial(3, evaluate=False),
ctor=FEq).series(z, n=10, kernel='exponential')
define(f(z), (exp(z)+exp(-z))/2, ctor=FEq).series(z, n=20, kernel='exponential')
define(f(z), exp(m*z), ctor=FEq).series(z, n=10, kernel='exponential')
define(f(z), (exp(z)-1)/z, ctor=FEq).series(z, n=10, kernel='exponential')
define(f(z), 1/(1-z), ctor=FEq).series(z, n=10, kernel='exponential')
define(f(z), (1/(1-z))*(1/(1-z)), ctor=FEq).series(z, n=10, kernel='exponential')
define(f(z), exp(z)**2, ctor=FEq).series(z, n=10, kernel='exponential')
"""
Explanation: Exponential gf recap
End of explanation
"""
tyvar(x).gf()
(tyvar(u[0]) * tyvar(u[1]) * tyvar(u[2])).gf()
(tyvar(o[0]) * tyvar(o[1]) * tyvar(o[2])).gf()
(tyvar(u[0]) | tyvar(u[1]) | tyvar(u[2])).gf()
(tyvar(o[0]) | tyvar(o[1]) | tyvar(o[2])).gf()
truth.gf() + falsehood.gf()
boolean.gf()
maybe(tyvar(alpha)[z]).gf()
"""
Explanation: Linear types
End of explanation
"""
nel = 4
syms=[u[i] for i in range(nel)]
occ_prb, = cp(maybe(tyvar(u[i]*z)) for i in range(nel)).gf() # here we can use the `[z]` notation too.
occ_prb
occupancy(occ_prb, syms, objects='unlike', boxes='unlike').series(z)
occupancy(occ_prb, syms, objects='unlike', boxes='like').series(z)
occupancy(occ_prb, syms, objects='like', boxes='unlike').series(z)
occupancy(occ_prb, syms, objects='like', boxes='like').series(z)
"""
Explanation: occupancies
End of explanation
"""
u_hat = symbols(r'␣_0:10')
nel = 3
occ_prb, = cp(tyvar(z*(sum(u[j] for j in range(nel) if j != i))) | tyvar(u_hat[i])
for i in range(nel)).gf()
occ_prb
syms=[u[i] for i in range(nel)]+[u_hat[i] for i in range(nel)]
occupancy(occ_prb, syms, objects='unlike', boxes='unlike').series(z)
occupancy(occ_prb, syms, objects='unlike', boxes='like').series(z)
occupancy(occ_prb, syms, objects='like', boxes='unlike').series(z)
occupancy(occ_prb, syms, objects='like', boxes='like').series(z)
"""
Explanation:
End of explanation
"""
occupancy_problem, = cp(maybe(du(tyvar((u[i]*z)**(j+1)) for j in range(i+1)))
for i in range(3)).gf()
occupancy_problem
occupancy(occupancy_problem, syms=[u[i] for i in range(3)], objects='unlike', boxes='unlike').series(z)
occupancy(occupancy_problem, syms=[u[i] for i in range(3)], objects='unlike', boxes='like').series(z)
occupancy(occupancy_problem, syms=[u[i] for i in range(3)], objects='like', boxes='unlike').series(z)
occupancy(occupancy_problem, syms=[u[i] for i in range(3)], objects='like', boxes='like').series(z)
((1+t)*(1+t+t**2)*(1+t+t**2+t**3)).series(t,n=10) # just for checking
"""
Explanation:
End of explanation
"""
def sums_of_powers(boxes, base):
p = IndexedBase('\space')
return cp(cp() | tyvar(p[j]*z**(base**i))
for i in range(0,boxes)
for j in [Pow(base,i,evaluate=False)] # implicit let
).gf()
occupancy, = sums_of_powers(boxes=4, base=2)
occupancy.series(z, n=32)
occupancy, = sums_of_powers(boxes=4, base=3)
occupancy.series(z, n=100)
occupancy, = sums_of_powers(boxes=4, base=5)
occupancy.series(z, n=200)
occupancy, = sums_of_powers(boxes=4, base=7)
occupancy.series(z, n=500)
assert 393 == 7**0 + 7**2 + 7**3 # _.rhs.rhs.coeff(z, 393)
"""
Explanation:
End of explanation
"""
difference = (cp() | tyvar(-gamma*z))
ones = nats * difference
ones_gf, = ones.gf()
ones_gf
ones_gf(z,1,1,1).series(z, n=10) # check!
one_gf, = (ones * difference).gf()
one_gf.series(z, n=10).rhs.rhs.subs({w[0]:1, w[1]:1, gamma:1})
"""
Explanation: Differences
End of explanation
"""
l = IndexedBase(r'\circ')
def linear_comb_of_powers(boxes, base):
return cp(lst(tyvar(Mul(l[j], z**(base**i), evaluate=False)))
for i in range(boxes)
for j in [Pow(base,i,evaluate=False)]).gf()
occupancy, = linear_comb_of_powers(boxes=4, base=Integer(2))
occupancy.series(z, n=8)
occupancy, = linear_comb_of_powers(boxes=4, base=3)
occupancy.series(z, n=9)
occupancy, = linear_comb_of_powers(boxes=4, base=5)
occupancy.series(z, n=10)
def uniform_rv(n):
return tyvar(S(1)/nel) * lst(tyvar(x))
occupancy, = uniform_rv(n=10).gf()
occupancy.series(x,n=10)
class lst_structure_w(rec):
def definition(self, alpha):
me = self.me()
return alpha | lst(me)
def label(self):
return r'\mathcal{L}_{w}' # `_s` stands for "structure"
lst_structure_w(tyvar(alpha)).gf()
[gf.series(alpha) for gf in _]
class lst_structure(rec):
def definition(self, alpha):
me = self.me()
return alpha | (lst(me) * me * me)
def label(self):
return r'\mathcal{L}_{s}' # `_s` stands for "structure"
lst_structure(tyvar(alpha)).gf()
_[0].series(alpha, n=10)
class structure(rec):
def definition(self, alpha):
me = self.me()
return alpha | (bin_tree(me) * me * me)
def label(self):
return r'\mathcal{S}'
structure(tyvar(alpha)).gf()
gf = _[0]
gf.simplify()
nel = 7
s = gf.simplify().series(alpha, n=nel).rhs.rhs
[s.coeff(alpha, n=i).subs({pow(-1,S(1)/3):-1}).radsimp().powsimp() for i in range(nel)]
class structure(rec):
def definition(self, alpha):
me = self.me()
return alpha | (nnbin_tree(me) * me)
def label(self):
return r'\mathcal{S}'
structure(tyvar(alpha)).gf()
gf = _[0]
gf.simplify()
nel = 20
s = gf.simplify().series(alpha, n=nel).rhs.rhs
[s.coeff(alpha, n=i).subs({pow(-1,S(1)/3):-1}).radsimp().powsimp() for i in range(nel)]
class nn_structure(rec):
def definition(self, alpha):
me = self.me()
return alpha * bin_tree(nnbin_tree(me))
def label(self):
return r'\mathcal{L}_{s}^{+}' # `_s` stands for "structure"
nn_structure(tyvar(alpha)).gf()
_[0].series(alpha, n=10)
class nnlst_structure(rec):
def definition(self, alpha):
me = self.me()
return alpha * lst(nnlst(me))
def label(self):
return r'\mathcal{L}_{s}^{+}' # `_s` stands for "structure"
nnlst_structure(tyvar(alpha)).gf()
_[0].series(alpha, n=10)
class tree(rec):
def definition(self, alpha):
return alpha * lst(self.me())
def label(self):
return r'\mathcal{T}'
tree(tyvar(alpha)).gf()
_[0].series(alpha, n=10)
class combination(rec):
def definition(self, alpha):
me = self.me()
return alpha | (me * me)
def label(self):
return r'\mathcal{C}'
combination(tyvar(alpha)).gf()
_[0].series(alpha, n=10)
class ab_tree(rec):
def definition(self, alpha, beta):
me = self.me()
return beta | (alpha * me * me)
def label(self):
return r'\mathcal{T}_{a,b}'
ab_tree_gfs = ab_tree(tyvar(alpha), tyvar(beta)).gf()
ab_tree_gfs
ab_tree_gf = ab_tree_gfs[0]
fab_eq = FEq(ab_tree_gf.lhs, ab_tree_gf.rhs.series(beta, n=20).removeO(), evaluate=False)
fab_eq
fab_eq(x,x)
(_*alpha).expand()
#with lift_to_Lambda(fab_eq) as F:
B = fab_eq(x,1)
A = fab_eq(1,x)
A,B,
(A+B).expand()
((1+x)*A).expand()
class dyck(rec):
def definition(self, alpha, beta):
me = self.me()
return cp() | (alpha * me * beta * me)
def label(self):
return r'\mathcal{D}'
dyck_gfs = dyck(tyvar(alpha*x), tyvar(beta*x)).gf()
dyck_gfs
dyck_gf = dyck_gfs[0]
dyck_gf.series(x,n=10)
class motzkin(rec):
def definition(self, alpha, beta, gamma):
me = self.me()
return cp() | (alpha * me * beta * me) | (gamma * me)
def label(self):
return r'\mathcal{M}'
motzkin_gfs = motzkin(tyvar(alpha*x), tyvar(beta*x), tyvar(gamma*x),).gf()
motzkin_gfs
motzkin_gf = motzkin_gfs[0]
motzkin_gf.series(x,n=10)
motzkin_gf(x,1,1,1).series(x,n=10)
class motzkin_p(rec):
def definition(self, alpha, beta, gamma, eta):
me = self.me()
return cp() | (alpha * me * beta * me) | (gamma * me) | (eta * me)
def label(self):
return r'\mathcal{M}^{+}'
motzkinp_gfs = motzkin_p(tyvar(alpha*x), tyvar(beta*x), tyvar(gamma*x), tyvar(eta*x),).gf()
motzkinp_gfs
motzkinp_gf = motzkinp_gfs[0]
motzkinp_gf.series(x,n=6)
motzkinp_gf(x,1,1,1,1).series(x,n=10)
class fibo(rec):
def definition(self, alpha, beta):
me = self.me()
return cp() | alpha | ((beta | (alpha * beta)) * me)
def label(self):
return r'\mathcal{F}'
fibo_gf, = fibo(tyvar(alpha*x), tyvar(beta*x),).gf()
fibo_gf
fibo_gf.series(x,n=10)
fibo_gf(1,x,1).series(x,n=10)
lst_of_truth_gf, = lst(tyvar(x)).gf()
lst_of_truth_gf.series(x, n=10, is_exp=True)
lst_of_boolean_gf.series(x,n=10,is_exp=True)
_.rhs.rhs.subs({w[0]:1,w[1]:1})
sum((_.rhs.rhs.coeff(x,i)/factorial(i))*x**i for i in range(1,10))
class powerset(ty):
def gf_rhs(self, ty):
return [exp(self.mulfactor() * gf.rhs) for gf in ty.gf()]
def mulfactor(self):
return 1
def label(self):
return r'\mathcal{P}'
powerset_of_tyvar_gf, = (2**(nnlst(tyvar(alpha)))).gf()
powerset_of_tyvar_gf
powerset_of_tyvar_gf.series(alpha, n=10, is_exp=True)
powerset_of_tyvar_gf, = (2**(nnlst(boolean))).gf()
powerset_of_tyvar_gf
powerset_of_tyvar_gf.series(x, n=5, is_exp=True)
_.rhs.rhs.subs({w[0]:1,w[1]:1})
powerset_of_tyvar_gf, _ = (2**(bin_tree(tyvar(alpha)))).gf()
powerset_of_tyvar_gf
powerset_of_tyvar_gf.series(alpha, n=10, is_exp=True)
l, = (2**(2**(nnlst(tyvar(alpha))))).gf()
define(l.lhs, l.rhs.ratsimp(), ctor=FEq).series(alpha,n=8,is_exp=True)
class cycle(ty):
def gf_rhs(self, ty):
return [log(gf.rhs) for gf in ty.gf()]
def label(self):
return r'\mathcal{C}'
cycle_of_tyvar_gf, = (~(lst(tyvar(alpha)))).gf()
cycle_of_tyvar_gf
cycle_of_tyvar_gf.series(alpha, n=10, is_exp=True)
cycle_of_tyvar_gf, = (~(lst(boolean))).gf()
cycle_of_tyvar_gf
cycle_of_tyvar_gf.series(x, n=8, is_exp=True)
_.rhs.rhs.subs({w[0]:1,w[1]:1})
Pstar_gf, = (2**(~(lst(tyvar(alpha))))).gf()
Pstar_gf.series(alpha, n=10, is_exp=True)
class ipowerset(powerset):
def mulfactor(self):
return -1
derangements_gf, = ((-2)**tyvar(alpha)).gf()
derangements_gf.series(alpha, n=10, is_exp=True)
derangements_gf, = ((-2)**nnlst(tyvar(alpha))).gf()
derangements_gf.series(alpha, n=10, is_exp=True)
[1,2][1:]
def foldr(f, l, i):
if not l:
return i
else:
car, *cdr = l
return f(car, foldr(f, cdr, i))
class arrow(ty):
def label(self):
return r'\rightarrow'
def gf_rhs(self, alpha, beta):
v = Dummy()
return [foldr(lambda gf, acc: Lambda([x], acc(gf.rhs)),
gfs[:-1],
Lambda([x], gfs[-1].rhs))(x)
for gfs in self.gfs_space()]
return [foldr(lambda gf, acc: acc**gf.rhs, gfs[:-1], gfs[-1].rhs)
for gfs in self.gfs_space()]
arr, = arrow(boolean, boolean).gf()
arr
arr.series(x,n=5,is_exp=False)
_.rhs.rhs.removeO().subs({w[0]:1,w[1]:1})
arr, = arrow(lst(boolean), lst(boolean)).gf()
arr
arr.series(x,n=5,is_exp=False)
_.rhs.rhs.removeO().subs({w[0]:1,w[1]:1})
"""
Explanation:
End of explanation
"""
lamda_gf = lamda(tyvar(x)).gf_rhs(tyvar(x))
lamda_gf
lamda_gf.rhs.series(x,n=10)
"""
Explanation:
End of explanation
"""
|
Hamstard/RVMs | Tutorial.ipynb | mit | %matplotlib inline
from linear_model import RelevanceVectorMachine, distribution_wrapper, GaussianFeatures, \
FourierFeatures, repeated_regression, plot_summary
from sklearn import preprocessing
import numpy as np
from scipy import stats
import matplotlib#
import matplotlib.pylab as plt
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
"""
Explanation: Tutorial on RVM Regression
In this tutorial we play around with linear regression in form of Relevance Vector Machines (RVMs) using linear and localized kernels. And heeeere we go!
End of explanation
"""
x = np.linspace(-np.pi,np.pi,100)
x_pred = np.linspace(-1.5*np.pi,1.5*np.pi,200)
epsilon = stats.norm(loc=0,scale=0.01)
noise = epsilon.rvs(size=x.shape[0])
t = np.exp(-x**2) + noise
fig = plt.figure(figsize=(5,5))
plt.plot(x,t,'ro',markerfacecolor="None",label="data")
plt.xlabel("input")
plt.ylabel("output")
plt.legend(loc=0)
plt.show()
"""
Explanation: First things first, let's set up up the database to regress.
End of explanation
"""
# choosing the feature space
k = 5
trafo = preprocessing.PolynomialFeatures(k)
X = trafo.fit_transform(x.reshape((-1,1)))
# initializing hyperparameters
init_beta = 1./ np.var(t) # (that's the default start)
init_alphas = np.ones(X.shape[1])
init_alphas[1:] = np.inf
# setting up the model regression class
model = RelevanceVectorMachine(n_iter=250,verbose=False,compute_score=True,init_beta=init_beta,
init_alphas=init_alphas)
# regress
model.fit(X,t)
# predict
X_pred = trafo.fit_transform(x_pred.reshape((-1,1)))
y, yerr = model.predict(X_pred,return_std=True)
fig = plt.figure()
ax = fig.add_subplot(121)
ax.plot(x,t,'ro',label="data",markerfacecolor="None")
ax.fill_between(x_pred,y-2*yerr,y+2*yerr,alpha=.5,label="95\%")
ax.plot(x_pred,y,'-',label="estimate")
plt.legend(loc=0)
ax.set_xlabel("input")
ax.set_ylabel("output")
ax1 = fig.add_subplot(122)
ax1.plot(model.mse_,'-')
ax1.set_xlabel("iteration")
ax1.set_ylabel("MSE")
plt.tight_layout()
plt.show()
"""
Explanation: 1. Single Regression
1.1 Linear Kernel
Neat now let's test whether we can regress that data using a polynomial feature space.
End of explanation
"""
# choosing the feature space
trafo = GaussianFeatures(k=30,mu0=-3,dmu=.2)
X = trafo.fit_transform(x.reshape((-1,1)))
# initializing hyperparameters
init_beta = 1./ np.var(t) # (that's the default start)
init_alphas = np.ones(X.shape[1])
init_alphas[1:] = np.inf
# setting up the model regression class
model = RelevanceVectorMachine(n_iter=250,verbose=False,compute_score=True,init_beta=init_beta,
init_alphas=init_alphas)
# regress
model.fit(X,t)
# predict
X_pred = trafo.fit_transform(x_pred.reshape((-1,1)))
y, yerr = model.predict(X_pred,return_std=True)
fig = plt.figure()
ax = fig.add_subplot(121)
ax.plot(x,t,'ro',label="data",markerfacecolor="None")
ax.fill_between(x_pred,y-2*yerr,y+2*yerr,alpha=.5,label="95\%")
ax.plot(x_pred,y,'-',label="estimate")
plt.legend(loc=0)
ax.set_xlabel("input")
ax.set_ylabel("output")
ax1 = fig.add_subplot(122)
ax1.plot(model.mse_,'-')
ax1.set_xlabel("iteration")
ax1.set_ylabel("MSE")
plt.tight_layout()
plt.show()
"""
Explanation: 1.2 Localized Kernel
Indeed that seemed to work. But what about a Gaussian feature space, will it be able to fit the Gaussian?
End of explanation
"""
# choosing the feature space
k = 5
trafo = preprocessing.PolynomialFeatures(k)
X = trafo.fit_transform(x.reshape((-1,1)))
base_trafo = trafo.fit_transform
# initializing hyperparameters using callable distributions giving new hyperparameters
# with every call (useful for repeated regression)
init_beta = distribution_wrapper(stats.halfnorm(scale=1),size=1,single=True)
init_alphas = distribution_wrapper(stats.halfnorm(scale=1),single=False)
model_type = RelevanceVectorMachine
model_kwargs = dict(n_iter=250,verbose=False,compute_score=True,init_beta=init_beta,
init_alphas=init_alphas,fit_intercept=False)
Nruns = 100
runtimes, coefs, models = repeated_regression(x,base_trafo,model_type,t=t,
model_kwargs=model_kwargs,Nruns=Nruns,
return_coefs=True,return_models=True)
plot_summary(models,noise,x,t,X,coefs,base_trafo)
"""
Explanation: 2. Repeated Regressions
Indeed using a Gaussian basis set, for some mysterious reason, gave a closer estimate to the real data with tighter confidence intervals. Now let's do the same again for both kernels but multiple times initializing the hyperparmaeters such that we sample them from distributions as well.
2.1 Linear Kernel
End of explanation
"""
# choosing the feature space
trafo = GaussianFeatures(k=30,mu0=-3,dmu=.2)
base_trafo = trafo.fit_transform
# initializing hyperparameters using callable distributions giving new hyperparameters
# with every call (useful for repeated regression)
init_beta = distribution_wrapper(stats.halfnorm(scale=1),size=1,single=True)
init_alphas = distribution_wrapper(stats.halfnorm(scale=1),single=False)
model_type = RelevanceVectorMachine
model_kwargs = dict(n_iter=250,verbose=False,compute_score=True,init_beta=init_beta,
init_alphas=init_alphas,fit_intercept=False)
Nruns = 100
runtimes, coefs, models = repeated_regression(x,base_trafo,model_type,t=t,
model_kwargs=model_kwargs,Nruns=Nruns,
return_coefs=True,return_models=True)
X = base_trafo(x.reshape((-1,1)))
plot_summary(models,noise,x,t,X,coefs,base_trafo)
"""
Explanation: 2.2 Localized kernel
End of explanation
"""
|
sdpython/ensae_teaching_cs | _doc/notebooks/td2a_eco2/td2a_Seance_7_Analyse_de_textes_correction.ipynb | mit | from jyquickhelper import add_notebook_menu
add_notebook_menu()
"""
Explanation: TD7 - Analyse de texte - correction
Analyse de texte, TF-IDF, LDA, moteur de recherche, expressions régulières (correction).
End of explanation
"""
from pyensae.datasource import download_data
download_data("df_pocket.zip")
"""
Explanation: Récupération des données
Il est possible de télécharger les données df_pocket.zip.
End of explanation
"""
import json
from pprint import pprint
with open('./data_pocket.json') as fp:
dict_pocket = json.load(fp)
dict_to_df = {}
keys = ['resolved_url', 'resolved_title', 'excerpt', 'tags']
for (k,v) in dict_pocket.items():
dict_to_df[k] = dict(zip(keys, [v[key] for key in keys if key in v]))
import pandas as p
df_pocket = p.DataFrame.from_dict(dict_to_df, orient = "index")
df_pocket.head()
import pandas as p
import ast
df_pocket = p.read_csv('./df_pocket.csv')
df_pocket.head()
df_pocket['html_soup'] = df_pocket['html_soup'].apply(lambda x : ast.literal_eval(x) if x != "scraper banned" else x)
df_pocket['tags'] = df_pocket['tags'].apply(lambda x : ast.literal_eval(x) if x == x else x)
df_pocket.head()
def nan_to_string(x):
if x==x:
return x
else:
return ''
title_string = ' '.join(df_pocket['title'].apply( lambda x: nan_to_string(x)))
import re
def url_cleaning(url):
return ' '.join(re.split(r'\/|\.|\:|-|\?',url))
url_string = ' '.join(df_pocket['url'].apply(lambda x : url_cleaning(x)))
def hx_cleaning(d,hx):
if str(hx) in d:
return ' '.join(d[str(hx)])
else:
return ''
h1_string = ' '.join(df_pocket['html_soup'].apply(lambda x : hx_cleaning(x,'h1')))
h2_string = ' '.join(df_pocket['html_soup'].apply(lambda x : hx_cleaning(x,'h2')))
h3_string = ' '.join(df_pocket['html_soup'].apply(lambda x : hx_cleaning(x, 'h3')))
excerpt_string = ' '.join(df_pocket['excerpt'].apply( lambda x: nan_to_string(x)))
def p_cleaning(x):
if (type(x) == dict) & ('p' in x ):
return ' '.join(x['p'])
else:
return ''
p_string = ' '.join(df_pocket['html_soup'].apply(lambda x : p_cleaning(x)))
words = ' '.join([title_string,url_string,h1_string,h2_string,h3_string,excerpt_string])
# on ne conserve que les mots
words_string = re.sub('[^A-Za-z ]','', words)
#on "nettoie les espaces"
words_string = re.sub('\s+',' ', words_string)
words_string[:1000] + '...'
! pip install nltk
"""
Explanation: Nettoyer les données (regexp et nltk)
Mais avant tout, nous devrions augmenter la qualité de nos données (en améliorant les parsers et la liste des stopwords). C'est l'objet de cette section.
End of explanation
"""
from wordcloud import WordCloud, STOPWORDS
stopwords = set(STOPWORDS)
print(STOPWORDS)
for wds in ['http', 'https', 'www', 'fr', 'com', 'io', 'org', 'co', 'jo', 'edu', 'news', 'html', 'htm',\
'github', 'youtube', 'google', 'blog', 'watch', 'de', 'le', 'la', 'en', 'sur', 'vous', 'les', \
'ajouter', 'README', 'md', 'et', 'PROCESS', 'CMYK', 'des', 'chargement', 'playlists', 'endobj', \
'obj','est', 'use', 'using', 'will', 'web', 'first','pour', 'du', 'une', 'que']:
stopwords.add(wds)
import nltk
stopwords_fr_ntlk = set(nltk.corpus.stopwords.words('french'))
stopwords_en_ntlk = set(nltk.corpus.stopwords.words('english'))
stopwords_clean = [ l.lower() for l in list(stopwords.union(stopwords_fr_ntlk).union(stopwords_en_ntlk))]
stopwords_clean[:50] + ['...']
%matplotlib inline
wordcloud = WordCloud(stopwords=stopwords, background_color="white")
wordcloud.generate(words_string)
import matplotlib.pyplot as plt
plt.imshow(wordcloud)
plt.axis('off');
"""
Explanation: Nltk contient un corpus de stopwords en plusieurs langues. On peut enrichir la liste déjà créée.
End of explanation
"""
def words_cleaning(url,title,excerpt,html_soup):
url_clean = url_cleaning(url)
title_clean = nan_to_string(title)
excerpt_clean = nan_to_string(excerpt)
h1_clean = hx_cleaning(html_soup,'h1')
h2_clean = hx_cleaning(html_soup,'h2')
h3_clean = hx_cleaning(html_soup,'h3')
p_clean = p_cleaning(html_soup)
words = ' '.join([url_clean, title_clean, excerpt_clean, h1_clean, h2_clean, h3_clean, p_clean])
words_clean = re.sub('[^A-Za-z ]','', words)
words_clean = re.sub('\s+',' ', words_clean)
words_list = words_clean.split(' ')
return ' '.join([w.lower() for w in words_list if w not in stopwords_clean])
import numpy as np
df_pocket['words_string'] = np.vectorize(words_cleaning)(df_pocket['url'], \
df_pocket['title'], \
df_pocket['excerpt'], \
df_pocket['html_soup'])
df_pocket.head()
"""
Explanation: On applique tout ça à df_pocket.
End of explanation
"""
corpus = {
'a' : "Mr. Green killed Colonel Mustard in the study with the candlestick. "
"Mr. Green is not a very nice fellow.",
'b' : "Professor Plum has a green plant in his study.",
'c' : "Miss Scarlett watered Professor Plum's green plant while he was away "
"from his office last week."
}
terms = {
'a' : [ i.lower() for i in corpus['a'].split() ],
'b' : [ i.lower() for i in corpus['b'].split() ],
'c' : [ i.lower() for i in corpus['c'].split() ]
}
from math import log
QUERY_TERMS = ['mr.', 'green']
def tf(term, doc, normalize=True):
doc = doc.lower().split()
if normalize:
return doc.count(term.lower()) / float(len(doc))
else:
return doc.count(term.lower()) / 1.0
def idf(term, corpus):
num_texts_with_term = len([True for text in corpus if term.lower() \
in text.lower().split()])
try:
return 1.0 + log(float(len(corpus)) / num_texts_with_term)
except ZeroDivisionError:
return 1.0
def tf_idf(term, doc, corpus):
return tf(term, doc) * idf(term, corpus)
for (k, v) in sorted(corpus.items()):
print(k, ':', v)
print('\n')
query_scores = {'a': 0, 'b': 0, 'c': 0}
for term in [t.lower() for t in QUERY_TERMS]:
for doc in sorted(corpus):
print('TF({}): {}'.format(doc, term), tf(term, corpus[doc]))
print('IDF: {}'.format(term, ), idf(term, corpus.values()))
print('\n')
for doc in sorted(corpus):
score = tf_idf(term, corpus[doc], corpus.values())
print('TF-IDF({}): {}'.format(doc, term), score)
query_scores[doc] += score
print('\n')
print("Score TF-IDF total pour le terme '{}'".format(' '.join(QUERY_TERMS), ))
for (doc, score) in sorted(query_scores.items()):
print(doc, score)
"""
Explanation: A présent la base df_pocket est nettoyée et prête à être utilisée pour les analyses de textes.
Analyse des données textuelles - TD-IDF, similarité cosine et n-grams
Le calcul tf-idf (term frequency–inverse document frequency) permet de calculer un score de proximité entre un terme de recherche et un document (c'est ce que font les moteurs de recherche).
La partie tf calcule une fonction croissante de la fréquence du terme de recherche dans le document à l'étude, la partie idf calcule une fonction inversement proportionnelle à la fréquence du terme dans l'ensemble des documents (ou corpus).
Le score total, obtenu en multipliant les deux composantes, permet ainsi de donner un score d'autant plus élevé que le terme est surréprésenté dans un document (par rapport à l'ensemble des documents). Il existe plusieurs fonctions, qui pénalisent plus ou moins les documents longs, ou qui sont plus ou moins smooth.
End of explanation
"""
from math import log
def tf(term, doc, normalize=True):
doc = doc.lower().split()
if normalize:
return doc.count(term.lower()) / float(len(doc))
else:
return doc.count(term.lower()) / 1.0
def idf(term, corpus):
num_texts_with_term = len([True for text in corpus if term.lower()
in text.lower().split()])
try:
return 1.0 + log(float(len(corpus)) / num_texts_with_term)
except ZeroDivisionError:
return 1.0
def tf_idf(term, doc, corpus):
return tf(term, doc) * idf(term, corpus)
QUERY_TERMS = ['green', 'plant']
query_scores = {'a': 0, 'b': 0, 'c': 0}
for term in [t.lower() for t in QUERY_TERMS]:
for doc in sorted(corpus):
score = tf_idf(term, corpus[doc], corpus.values())
query_scores[doc] += score
print("Score TF-IDF total pour le terme '{}'".format(' '.join(QUERY_TERMS), ))
for (doc, score) in sorted(query_scores.items()):
print(doc, score)
"""
Explanation: Exercice guidé - Calcul de TF-IDF
Quel document est le plus proche du terme "green plant" ? Calculer les scores TF-IDF pour le terme "green plant". Cela correspond-il à vos attentes ? Que se passe-t-il avec "green" seul ?
Green plant
End of explanation
"""
QUERY_TERMS = ['green']
term = [t.lower() for t in QUERY_TERMS]
term = 'green'
query_scores = {'a': 0, 'b': 0, 'c': 0}
for doc in sorted(corpus):
score = tf_idf(term, corpus[doc], corpus.values())
query_scores[doc] += score
print("Score TF-IDF total pour le terme '{}'".format(term))
for (doc, score) in sorted(query_scores.items()):
print(doc, score)
"""
Explanation: Deux documents possibles : b ou c (a ne contient pas le mot « plant »). B est plus court : donc green plant « pèse » plus.
Green
End of explanation
"""
import re, nltk
#Tokenisation naïve sur les espaces entre les mots => on obtient une liste de mots
tokens = re.split('\s+', ' '.join(df_pocket['words_string']))
#On transforme cette liste en objet nltk "Text" (objet chaine de caractère qui conserve la notion de tokens, et qui
#comprend un certain nombre de méthodes utiles pour explorer les données.
text = nltk.Text(tokens)
## la méthode vocab permet d'obtenir pour chaque terme présent dans la liste text nltk, le nombre d'occurence des termes
## ici on crée le dictionnaire fdist
fdist = text.vocab()
list(fdist.items())[:50]
# Une autre méthode "concordance" : montre les occurences d'un mot dans son contexte
print("Exemples d'occurences du terme 'python' :")
text.concordance("python")
print('\n')
"""
Explanation: Exercice 1 - TF-IDF sur des données pocket
Prenez 5 articles enregistrés dans pocket et déterminer leur score pour les mots python, data et science.
Exercice 2 - Création d'un moteur de recherche pour les données pocket
L'idée de cet exercice est de créer un moteur de recherche pour ['python','data','science'].
Le but : trouver les 5 articles les plus pertinents pour ces termes.
1) La première étape sera de calculer pour chaque article de la base le score td-idf.
2) La seconde étape sera de trier ces scores du plus élevé au moins fort.
Exercice 3 - Catégorisation automatique avec des méthodes non supervisées
Avec ce que vous venez d'apprendre (le tf-idf), il est possible de créer un modèle de catégorisation automatique "non-supervisé". Ce terme barbare signifie que l'on peut créér des tags à partir des seules variables explicatives, sans utiliser de "label", c'est-à-dire de données qui valident si la prédiction (ici, présence de tel ou tel mot dans les tags) est correcte. Normalement, on utilise ce genre de méthode quand on a pas de labels et que l'on cherche à faire ressortir des régularités (des patterns) dans les données. D'autres méthodes de machine learning non-supervisées connues sont : le clustering, les ACP.
Pour bien comprendre le tf-idf, on vous l'a fait coder "à la main". En réalité, c'est tellement classique, qu'il existe des librairies qui l'ont déjà codé. Voir scikitlearn.feature_extraction.text
L'idée est la suivante : on va retenir comme "tags", les 3 mots les plus "caractéristiques" d'un document. C'est-à-dire, les mots correspondants aux 3 scores tf-idf les plus élevés.
Les étapes à suivre :
- transformer les mots en vecteurs. L'idée est de créer une matrice, avec en ligne les documents, en colonne les mots possibles (prendre tous le smots uniques présents dans l'ensemble des documents). Cela se fait en 3 lignes de code, voir la documentation scikitlearn.feature_extraction.text
- calculer les tf-idf normalisés
- récupérer les indices dont les mots ont le score le plus élevé : voir la méthode argsort
- récupérer la correspondance mots et indices
- récupérer les 3 mots les plus caractéristiques, et comparer aux tags de la table df_pocket
Approche contextuelle
Les approches bag-of-words, bien que simplistes, permettent de créer, d'indexer et de comparer des documents. La prise en compte des suites de 2, 3 ou plus mots serait un moyen d'affiner de tels modèles. Cela permet aussi de mieux comprendre le sens des homonymes, et des phrases (d'une manière générale, la sémantique).
nltk offre des methodes pour tenir compte du contexte : pour ce faire, nous calculons les n-grams, c'est-à-dire l'ensemble des co-occurrences successives de mots deux-à-deux (bigrams), trois-à-trois (tri-grams), etc.
En général, on se contente de bi-grams, au mieux de tri-grams :
- les modèles de classification, analyse du sentiment, comparaison de documents, etc. qui comparent des n-grams avec n trop grands sont rapidement confrontés au problème de données sparse, cela réduit la capacité prédictive des modèles ;
- les performances décroissent très rapidement en fonction de n, et les coûts de stockage des données augmentent rapidement (environ n fois plus élevé que la base de donnée initiale).
référence : introduction à nltk
Dans cette partie, nous allons nous intéresser au nombre d'occurences et de co-occurences des termes dans les articles de la base pocket. Pour cela, nous utilisons les méthodes disponibles dans le package nltk
End of explanation
"""
|
CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | Chapter4_TheGreatestTheoremNeverTold/Ch4_LawOfLargeNumbers_PyMC2.ipynb | mit | %matplotlib inline
import numpy as np
from IPython.core.pylabtools import figsize
import matplotlib.pyplot as plt
figsize(12.5, 5)
import pymc as pm
sample_size = 100000
expected_value = lambda_ = 4.5
poi = pm.rpoisson
N_samples = range(1, sample_size, 100)
for k in range(3):
samples = poi(lambda_, size=sample_size)
partial_average = [samples[:i].mean() for i in N_samples]
plt.plot(N_samples, partial_average, lw=1.5, label="average \
of $n$ samples; seq. %d" % k)
plt.plot(N_samples, expected_value * np.ones_like(partial_average),
ls="--", label="true expected value", c="k")
plt.ylim(4.35, 4.65)
plt.title("Convergence of the average of \n random variables to its \
expected value")
plt.ylabel("average of $n$ samples")
plt.xlabel("# of samples, $n$")
plt.legend();
"""
Explanation: Chapter 4
The greatest theorem never told
This chapter focuses on an idea that is always bouncing around our minds, but is rarely made explicit outside books devoted to statistics. In fact, we've been using this simple idea in every example thus far.
The Law of Large Numbers
Let $Z_i$ be $N$ independent samples from some probability distribution. According to the Law of Large numbers, so long as the expected value $E[Z]$ is finite, the following holds,
$$\frac{1}{N} \sum_{i=1}^N Z_i \rightarrow E[ Z ], \;\;\; N \rightarrow \infty.$$
In words:
The average of a sequence of random variables from the same distribution converges to the expected value of that distribution.
This may seem like a boring result, but it will be the most useful tool you use.
Intuition
If the above Law is somewhat surprising, it can be made clearer by examining a simple example.
Consider a random variable $Z$ that can take only two values, $c_1$ and $c_2$. Suppose we have a large number of samples of $Z$, denoting a specific sample $Z_i$. The Law says that we can approximate the expected value of $Z$ by averaging over all samples. Consider the average:
$$ \frac{1}{N} \sum_{i=1}^N \;Z_i $$
By construction, $Z_i$ can only take on $c_1$ or $c_2$, hence we can partition the sum over these two values:
\begin{align}
\frac{1}{N} \sum_{i=1}^N \;Z_i
& =\frac{1}{N} \big( \sum_{ Z_i = c_1}c_1 + \sum_{Z_i=c_2}c_2 \big) \\[5pt]
& = c_1 \sum_{ Z_i = c_1}\frac{1}{N} + c_2 \sum_{ Z_i = c_2}\frac{1}{N} \\[5pt]
& = c_1 \times \text{ (approximate frequency of $c_1$) } \\
& \;\;\;\;\;\;\;\;\; + c_2 \times \text{ (approximate frequency of $c_2$) } \\[5pt]
& \approx c_1 \times P(Z = c_1) + c_2 \times P(Z = c_2 ) \\[5pt]
& = E[Z]
\end{align}
Equality holds in the limit, but we can get closer and closer by using more and more samples in the average. This Law holds for almost any distribution, minus some important cases we will encounter later.
Example
Below is a diagram of the Law of Large numbers in action for three different sequences of Poisson random variables.
We sample sample_size = 100000 Poisson random variables with parameter $\lambda = 4.5$. (Recall the expected value of a Poisson random variable is equal to its parameter.) We calculate the average for the first $n$ samples, for $n=1$ to sample_size.
End of explanation
"""
figsize(12.5, 4)
N_Y = 250 # use this many to approximate D(N)
N_array = np.arange(1000, 50000, 2500) # use this many samples in the approx. to the variance.
D_N_results = np.zeros(len(N_array))
lambda_ = 4.5
expected_value = lambda_ # for X ~ Poi(lambda) , E[ X ] = lambda
def D_N(n):
"""
This function approx. D_n, the average variance of using n samples.
"""
Z = poi(lambda_, size=(n, N_Y))
average_Z = Z.mean(axis=0)
return np.sqrt(((average_Z - expected_value) ** 2).mean())
for i, n in enumerate(N_array):
D_N_results[i] = D_N(n)
plt.xlabel("$N$")
plt.ylabel("expected squared-distance from true value")
plt.plot(N_array, D_N_results, lw=3,
label="expected distance between\n\
expected value and \naverage of $N$ random variables.")
plt.plot(N_array, np.sqrt(expected_value) / np.sqrt(N_array), lw=2, ls="--",
label=r"$\frac{\sqrt{\lambda}}{\sqrt{N}}$")
plt.legend()
plt.title("How 'fast' is the sample average converging? ");
"""
Explanation: Looking at the above plot, it is clear that when the sample size is small, there is greater variation in the average (compare how jagged and jumpy the average is initially, then smooths out). All three paths approach the value 4.5, but just flirt with it as $N$ gets large. Mathematicians and statistician have another name for flirting: convergence.
Another very relevant question we can ask is how quickly am I converging to the expected value? Let's plot something new. For a specific $N$, let's do the above trials thousands of times and compute how far away we are from the true expected value, on average. But wait — compute on average? This is simply the law of large numbers again! For example, we are interested in, for a specific $N$, the quantity:
$$D(N) = \sqrt{ \;E\left[\;\; \left( \frac{1}{N}\sum_{i=1}^NZ_i - 4.5 \;\right)^2 \;\;\right] \;\;}$$
The above formulae is interpretable as a distance away from the true value (on average), for some $N$. (We take the square root so the dimensions of the above quantity and our random variables are the same). As the above is an expected value, it can be approximated using the law of large numbers: instead of averaging $Z_i$, we calculate the following multiple times and average them:
$$ Y_k = \left( \;\frac{1}{N}\sum_{i=1}^NZ_i - 4.5 \; \right)^2 $$
By computing the above many, $N_y$, times (remember, it is random), and averaging them:
$$ \frac{1}{N_Y} \sum_{k=1}^{N_Y} Y_k \rightarrow E[ Y_k ] = E\;\left[\;\; \left( \frac{1}{N}\sum_{i=1}^NZ_i - 4.5 \;\right)^2 \right]$$
Finally, taking the square root:
$$ \sqrt{\frac{1}{N_Y} \sum_{k=1}^{N_Y} Y_k} \approx D(N) $$
End of explanation
"""
import pymc as pm
N = 10000
print(np.mean([pm.rexponential(0.5) > 10 for i in range(N)]))
"""
Explanation: As expected, the expected distance between our sample average and the actual expected value shrinks as $N$ grows large. But also notice that the rate of convergence decreases, that is, we need only 10 000 additional samples to move from 0.020 to 0.015, a difference of 0.005, but 20 000 more samples to again decrease from 0.015 to 0.010, again only a 0.005 decrease.
It turns out we can measure this rate of convergence. Above I have plotted a second line, the function $\sqrt{\lambda}/\sqrt{N}$. This was not chosen arbitrarily. In most cases, given a sequence of random variable distributed like $Z$, the rate of converge to $E[Z]$ of the Law of Large Numbers is
$$ \frac{ \sqrt{ \; Var(Z) \; } }{\sqrt{N} }$$
This is useful to know: for a given large $N$, we know (on average) how far away we are from the estimate. On the other hand, in a Bayesian setting, this can seem like a useless result: Bayesian analysis is OK with uncertainty so what's the statistical point of adding extra precise digits? Though drawing samples can be so computationally cheap that having a larger $N$ is fine too.
How do we compute $Var(Z)$ though?
The variance is simply another expected value that can be approximated! Consider the following, once we have the expected value (by using the Law of Large Numbers to estimate it, denote it $\mu$), we can estimate the variance:
$$ \frac{1}{N}\sum_{i=1}^N \;(Z_i - \mu)^2 \rightarrow E[ \;( Z - \mu)^2 \;] = Var( Z )$$
Expected values and probabilities
There is an even less explicit relationship between expected value and estimating probabilities. Define the indicator function
$$\mathbb{1}_A(x) =
\begin{cases} 1 & x \in A \\
0 & else
\end{cases}
$$
Then, by the law of large numbers, if we have many samples $X_i$, we can estimate the probability of an event $A$, denoted $P(A)$, by:
$$ \frac{1}{N} \sum_{i=1}^N \mathbb{1}_A(X_i) \rightarrow E[\mathbb{1}_A(X)] = P(A) $$
Again, this is fairly obvious after a moments thought: the indicator function is only 1 if the event occurs, so we are summing only the times the event occurs and dividing by the total number of trials (consider how we usually approximate probabilities using frequencies). For example, suppose we wish to estimate the probability that a $Z \sim Exp(.5)$ is greater than 10, and we have many samples from a $Exp(.5)$ distribution.
$$ P( Z > 10 ) = \frac{1}{N} \sum_{i=1}^N \mathbb{1}_{z > 10 }(Z_i) $$
End of explanation
"""
figsize(12.5, 4)
std_height = 15
mean_height = 150
n_counties = 5000
pop_generator = pm.rdiscrete_uniform
norm = pm.rnormal
# generate some artificial population numbers
population = pop_generator(100, 1500, size=n_counties)
average_across_county = np.zeros(n_counties)
for i in range(n_counties):
# generate some individuals and take the mean
average_across_county[i] = norm(mean_height, 1. / std_height ** 2,
size=population[i]).mean()
# located the counties with the apparently most extreme average heights.
i_min = np.argmin(average_across_county)
i_max = np.argmax(average_across_county)
# plot population size vs. recorded average
plt.scatter(population, average_across_county, alpha=0.5, c="#7A68A6")
plt.scatter([population[i_min], population[i_max]],
[average_across_county[i_min], average_across_county[i_max]],
s=60, marker="o", facecolors="none",
edgecolors="#A60628", linewidths=1.5,
label="extreme heights")
plt.xlim(100, 1500)
plt.title("Average height vs. County Population")
plt.xlabel("County Population")
plt.ylabel("Average height in county")
plt.plot([100, 1500], [150, 150], color="k", label="true expected \
height", ls="--")
plt.legend(scatterpoints=1);
"""
Explanation: What does this all have to do with Bayesian statistics?
Point estimates, to be introduced in the next chapter, in Bayesian inference are computed using expected values. In more analytical Bayesian inference, we would have been required to evaluate complicated expected values represented as multi-dimensional integrals. No longer. If we can sample from the posterior distribution directly, we simply need to evaluate averages. Much easier. If accuracy is a priority, plots like the ones above show how fast you are converging. And if further accuracy is desired, just take more samples from the posterior.
When is enough enough? When can you stop drawing samples from the posterior? That is the practitioners decision, and also dependent on the variance of the samples (recall from above a high variance means the average will converge slower).
We also should understand when the Law of Large Numbers fails. As the name implies, and comparing the graphs above for small $N$, the Law is only true for large sample sizes. Without this, the asymptotic result is not reliable. Knowing in what situations the Law fails can give us confidence in how unconfident we should be. The next section deals with this issue.
The Disorder of Small Numbers
The Law of Large Numbers is only valid as $N$ gets infinitely large: never truly attainable. While the law is a powerful tool, it is foolhardy to apply it liberally. Our next example illustrates this.
Example: Aggregated geographic data
Often data comes in aggregated form. For instance, data may be grouped by state, county, or city level. Of course, the population numbers vary per geographic area. If the data is an average of some characteristic of each the geographic areas, we must be conscious of the Law of Large Numbers and how it can fail for areas with small populations.
We will observe this on a toy dataset. Suppose there are five thousand counties in our dataset. Furthermore, population number in each state are uniformly distributed between 100 and 1500. The way the population numbers are generated is irrelevant to the discussion, so we do not justify this. We are interested in measuring the average height of individuals per county. Unbeknownst to us, height does not vary across county, and each individual, regardless of the county he or she is currently living in, has the same distribution of what their height may be:
$$ \text{height} \sim \text{Normal}(150, 15 ) $$
We aggregate the individuals at the county level, so we only have data for the average in the county. What might our dataset look like?
End of explanation
"""
print("Population sizes of 10 'shortest' counties: ")
print(population[np.argsort(average_across_county)[:10]])
print("\nPopulation sizes of 10 'tallest' counties: ")
print(population[np.argsort(-average_across_county)[:10]])
"""
Explanation: What do we observe? Without accounting for population sizes we run the risk of making an enormous inference error: if we ignored population size, we would say that the county with the shortest and tallest individuals have been correctly circled. But this inference is wrong for the following reason. These two counties do not necessarily have the most extreme heights. The error results from the calculated average of smaller populations not being a good reflection of the true expected value of the population (which in truth should be $\mu =150$). The sample size/population size/$N$, whatever you wish to call it, is simply too small to invoke the Law of Large Numbers effectively.
We provide more damning evidence against this inference. Recall the population numbers were uniformly distributed over 100 to 1500. Our intuition should tell us that the counties with the most extreme population heights should also be uniformly spread over 100 to 1500, and certainly independent of the county's population. Not so. Below are the population sizes of the counties with the most extreme heights.
End of explanation
"""
figsize(12.5, 6.5)
data = np.genfromtxt("./data/census_data.csv", skip_header=1,
delimiter=",")
plt.scatter(data[:, 1], data[:, 0], alpha=0.5, c="#7A68A6")
plt.title("Census mail-back rate vs Population")
plt.ylabel("Mail-back rate")
plt.xlabel("population of block-group")
plt.xlim(-100, 15e3)
plt.ylim(-5, 105)
i_min = np.argmin(data[:, 0])
i_max = np.argmax(data[:, 0])
plt.scatter([data[i_min, 1], data[i_max, 1]],
[data[i_min, 0], data[i_max, 0]],
s=60, marker="o", facecolors="none",
edgecolors="#A60628", linewidths=1.5,
label="most extreme points")
plt.legend(scatterpoints=1);
"""
Explanation: Not at all uniform over 100 to 1500. This is an absolute failure of the Law of Large Numbers.
Example: Kaggle's U.S. Census Return Rate Challenge
Below is data from the 2010 US census, which partitions populations beyond counties to the level of block groups (which are aggregates of city blocks or equivalents). The dataset is from a Kaggle machine learning competition some colleagues and I participated in. The objective was to predict the census letter mail-back rate of a group block, measured between 0 and 100, using census variables (median income, number of females in the block-group, number of trailer parks, average number of children etc.). Below we plot the census mail-back rate versus block group population:
End of explanation
"""
# adding a number to the end of the %run call will get the ith top post.
%run top_showerthoughts_submissions.py 2
print("Post contents: \n")
print(top_post)
"""
contents: an array of the text from the last 100 top submissions to a subreddit
votes: a 2d numpy array of upvotes, downvotes for each submission.
"""
n_submissions = len(votes)
submissions = np.random.randint( n_submissions, size=4)
print("Some Submissions (out of %d total) \n-----------"%n_submissions)
for i in submissions:
print('"' + contents[i] + '"')
print("upvotes/downvotes: ",votes[i,:], "\n")
"""
Explanation: The above is a classic phenomenon in statistics. I say classic referring to the "shape" of the scatter plot above. It follows a classic triangular form, that tightens as we increase the sample size (as the Law of Large Numbers becomes more exact).
I am perhaps overstressing the point and maybe I should have titled the book "You don't have big data problems!", but here again is an example of the trouble with small datasets, not big ones. Simply, small datasets cannot be processed using the Law of Large Numbers. Compare with applying the Law without hassle to big datasets (ex. big data). I mentioned earlier that paradoxically big data prediction problems are solved by relatively simple algorithms. The paradox is partially resolved by understanding that the Law of Large Numbers creates solutions that are stable, i.e. adding or subtracting a few data points will not affect the solution much. On the other hand, adding or removing data points to a small dataset can create very different results.
For further reading on the hidden dangers of the Law of Large Numbers, I would highly recommend the excellent manuscript The Most Dangerous Equation.
Example: How to order Reddit submissions
You may have disagreed with the original statement that the Law of Large numbers is known to everyone, but only implicitly in our subconscious decision making. Consider ratings on online products: how often do you trust an average 5-star rating if there is only 1 reviewer? 2 reviewers? 3 reviewers? We implicitly understand that with such few reviewers that the average rating is not a good reflection of the true value of the product.
This has created flaws in how we sort items, and more generally, how we compare items. Many people have realized that sorting online search results by their rating, whether the objects be books, videos, or online comments, return poor results. Often the seemingly top videos or comments have perfect ratings only from a few enthusiastic fans, and truly more quality videos or comments are hidden in later pages with falsely-substandard ratings of around 4.8. How can we correct this?
Consider the popular site Reddit (I purposefully did not link to the website as you would never come back). The site hosts links to stories or images, and a very popular part of the site are the comments associated with each link. Redditors can vote up or down on each submission (called upvotes and downvotes). Reddit, by default, will sort submissions to a given subreddit by Hot, that is, the submissions that have the most upvotes recently.
<img src="http://i.imgur.com/3v6bz9f.png" />
How would you determine which submissions are the best? There are a number of ways to achieve this:
Popularity: A submission is considered good if it has many upvotes. A problem with this model is that a submission with hundreds of upvotes, but thousands of downvotes. While being very popular, the submission is likely more controversial than best.
Difference: Using the difference of upvotes and downvotes. This solves the above problem, but fails when we consider the temporal nature of submission. Depending on when a submission is posted, the website may be experiencing high or low traffic. The difference method will bias the Top submissions to be the those made during high traffic periods, which have accumulated more upvotes than submissions that were not so graced, but are not necessarily the best.
Time adjusted: Consider using Difference divided by the age of the submission. This creates a rate, something like difference per second, or per minute. An immediate counter-example is, if we use per second, a 1 second old submission with 1 upvote would be better than a 100 second old submission with 99 upvotes. One can avoid this by only considering at least t second old submission. But what is a good t value? Does this mean no submission younger than t is good? We end up comparing unstable quantities with stable quantities (young vs. old submissions).
Ratio: Rank submissions by the ratio of upvotes to total number of votes (upvotes plus downvotes). This solves the temporal issue, such that new submissions who score well can be considered Top just as likely as older submissions, provided they have many upvotes to total votes. The problem here is that a submission with a single upvote (ratio = 1.0) will beat a submission with 999 upvotes and 1 downvote (ratio = 0.999), but clearly the latter submission is more likely to be better.
I used the phrase more likely for good reason. It is possible that the former submission, with a single upvote, is in fact a better submission than the latter with 999 upvotes. The hesitation to agree with this is because we have not seen the other 999 potential votes the former submission might get. Perhaps it will achieve an additional 999 upvotes and 0 downvotes and be considered better than the latter, though not likely.
What we really want is an estimate of the true upvote ratio. Note that the true upvote ratio is not the same as the observed upvote ratio: the true upvote ratio is hidden, and we only observe upvotes vs. downvotes (one can think of the true upvote ratio as "what is the underlying probability someone gives this submission a upvote, versus a downvote"). So the 999 upvote/1 downvote submission probably has a true upvote ratio close to 1, which we can assert with confidence thanks to the Law of Large Numbers, but on the other hand we are much less certain about the true upvote ratio of the submission with only a single upvote. Sounds like a Bayesian problem to me.
One way to determine a prior on the upvote ratio is to look at the historical distribution of upvote ratios. This can be accomplished by scraping Reddit's submissions and determining a distribution. There are a few problems with this technique though:
Skewed data: The vast majority of submissions have very few votes, hence there will be many submissions with ratios near the extremes (see the "triangular plot" in the above Kaggle dataset), effectively skewing our distribution to the extremes. One could try to only use submissions with votes greater than some threshold. Again, problems are encountered. There is a tradeoff between number of submissions available to use and a higher threshold with associated ratio precision.
Biased data: Reddit is composed of different subpages, called subreddits. Two examples are r/aww, which posts pics of cute animals, and r/politics. It is very likely that the user behaviour towards submissions of these two subreddits are very different: visitors are likely to be more friendly and affectionate in the former, and would therefore upvote submissions more, compared to the latter, where submissions are likely to be controversial and disagreed upon. Therefore not all submissions are the same.
In light of these, I think it is better to use a Uniform prior.
With our prior in place, we can find the posterior of the true upvote ratio. The Python script top_showerthoughts_submissions.py will scrape the best posts from the showerthoughts community on Reddit. This is a text-only community so the title of each post is the post. Below is the top post as well as some other sample posts:
End of explanation
"""
import pymc as pm
def posterior_upvote_ratio(upvotes, downvotes, samples=20000):
"""
This function accepts the number of upvotes and downvotes a particular submission received,
and the number of posterior samples to return to the user. Assumes a uniform prior.
"""
N = upvotes + downvotes
upvote_ratio = pm.Uniform("upvote_ratio", 0, 1)
observations = pm.Binomial("obs", N, upvote_ratio, value=upvotes, observed=True)
# do the fitting; first do a MAP as it is cheap and useful.
map_ = pm.MAP([upvote_ratio, observations]).fit()
mcmc = pm.MCMC([upvote_ratio, observations])
mcmc.sample(samples, samples / 4)
return mcmc.trace("upvote_ratio")[:]
"""
Explanation: For a given true upvote ratio $p$ and $N$ votes, the number of upvotes will look like a Binomial random variable with parameters $p$ and $N$. (This is because of the equivalence between upvote ratio and probability of upvoting versus downvoting, out of $N$ possible votes/trials). We create a function that performs Bayesian inference on $p$, for a particular comment's upvote/downvote pair.
End of explanation
"""
figsize(11., 8)
posteriors = []
colours = ["#348ABD", "#A60628", "#7A68A6", "#467821", "#CF4457"]
for i in range(len(submissions)):
j = submissions[i]
posteriors.append(posterior_upvote_ratio(votes[j, 0], votes[j, 1]))
plt.hist(posteriors[i], bins=18, density=True, alpha=.9,
histtype="step", color=colours[i % 5], lw=3,
label='(%d up:%d down)\n%s...' % (votes[j, 0], votes[j, 1], contents[j][:50]))
plt.hist(posteriors[i], bins=18, density=True, alpha=.2,
histtype="stepfilled", color=colours[i], lw=3, )
plt.legend(loc="upper left")
plt.xlim(0, 1)
plt.title("Posterior distributions of upvote ratios on different submissions");
"""
Explanation: Below are the resulting posterior distributions.
End of explanation
"""
N = posteriors[0].shape[0]
lower_limits = []
for i in range(len(submissions)):
j = submissions[i]
plt.hist(posteriors[i], bins=20, density=True, alpha=.9,
histtype="step", color=colours[i], lw=3,
label='(%d up:%d down)\n%s...' % (votes[j, 0], votes[j, 1], contents[j][:50]))
plt.hist(posteriors[i], bins=20, density=True, alpha=.2,
histtype="stepfilled", color=colours[i], lw=3, )
v = np.sort(posteriors[i])[int(0.05 * N)]
# plt.vlines( v, 0, 15 , color = "k", alpha = 1, linewidths=3 )
plt.vlines(v, 0, 10, color=colours[i], linestyles="--", linewidths=3)
lower_limits.append(v)
plt.legend(loc="upper left")
plt.legend(loc="upper left")
plt.title("Posterior distributions of upvote ratios on different submissions");
order = np.argsort(-np.array(lower_limits))
print(order, lower_limits)
"""
Explanation: Some distributions are very tight, others have very long tails (relatively speaking), expressing our uncertainty with what the true upvote ratio might be.
Sorting!
We have been ignoring the goal of this exercise: how do we sort the submissions from best to worst? Of course, we cannot sort distributions, we must sort scalar numbers. There are many ways to distill a distribution down to a scalar: expressing the distribution through its expected value, or mean, is one way. Choosing the mean is a bad choice though. This is because the mean does not take into account the uncertainty of distributions.
I suggest using the 95% least plausible value, defined as the value such that there is only a 5% chance the true parameter is lower (think of the lower bound on the 95% credible region). Below are the posterior distributions with the 95% least-plausible value plotted:
End of explanation
"""
def intervals(u, d):
a = 1. + u
b = 1. + d
mu = a / (a + b)
std_err = 1.65 * np.sqrt((a * b) / ((a + b) ** 2 * (a + b + 1.)))
return (mu, std_err)
print("Approximate lower bounds:")
posterior_mean, std_err = intervals(votes[:, 0], votes[:, 1])
lb = posterior_mean - std_err
print(lb)
print("\n")
print("Top 40 Sorted according to approximate lower bounds:")
print("\n")
order = np.argsort(-lb)
ordered_contents = []
for i in order[:40]:
ordered_contents.append(contents[i])
print(votes[i, 0], votes[i, 1], contents[i])
print("-------------")
"""
Explanation: The best submissions, according to our procedure, are the submissions that are most-likely to score a high percentage of upvotes. Visually those are the submissions with the 95% least plausible value close to 1.
Why is sorting based on this quantity a good idea? By ordering by the 95% least plausible value, we are being the most conservative with what we think is best. When using the lower-bound of the 95% credible interval, we believe with high certainty that the 'true upvote ratio' is at the very least equal to this value (or greater), thereby ensuring that the best submissions are still on top. Under this ordering, we impose the following very natural properties:
given two submissions with the same observed upvote ratio, we will assign the submission with more votes as better (since we are more confident it has a higher ratio).
given two submissions with the same number of votes, we still assign the submission with more upvotes as better.
But this is too slow for real-time!
I agree, computing the posterior of every submission takes a long time, and by the time you have computed it, likely the data has changed. I delay the mathematics to the appendix, but I suggest using the following formula to compute the lower bound very fast.
$$ \frac{a}{a + b} - 1.65\sqrt{ \frac{ab}{ (a+b)^2(a + b +1 ) } }$$
where
\begin{align}
& a = 1 + u \\
& b = 1 + d \\
\end{align}
$u$ is the number of upvotes, and $d$ is the number of downvotes. The formula is a shortcut in Bayesian inference, which will be further explained in Chapter 6 when we discuss priors in more detail.
End of explanation
"""
r_order = order[::-1][-40:]
plt.errorbar(posterior_mean[r_order], np.arange(len(r_order)),
xerr=std_err[r_order], capsize=0, fmt="o",
color="#7A68A6")
plt.xlim(0.3, 1)
plt.yticks(np.arange(len(r_order) - 1, -1, -1), map(lambda x: x[:30].replace("\n", ""), ordered_contents));
"""
Explanation: We can view the ordering visually by plotting the posterior mean and bounds, and sorting by the lower bound. In the plot below, notice that the left error-bar is sorted (as we suggested this is the best way to determine an ordering), so the means, indicated by dots, do not follow any strong pattern.
End of explanation
"""
# Enter code here
import scipy.stats as stats
exp = stats.expon(scale=4)
N = int(1e5)
X = exp.rvs(N)
# ...
"""
Explanation: In the graphic above, you can see why sorting by mean would be sub-optimal.
Extension to Starred rating systems
The above procedure works well for upvote-downvotes schemes, but what about systems that use star ratings, e.g. 5 star rating systems. Similar problems apply with simply taking the average: an item with two perfect ratings would beat an item with thousands of perfect ratings, but a single sub-perfect rating.
We can consider the upvote-downvote problem above as binary: 0 is a downvote, 1 if an upvote. A $N$-star rating system can be seen as a more continuous version of above, and we can set $n$ stars rewarded is equivalent to rewarding $\frac{n}{N}$. For example, in a 5-star system, a 2 star rating corresponds to 0.4. A perfect rating is a 1. We can use the same formula as before, but with $a,b$ defined differently:
$$ \frac{a}{a + b} - 1.65\sqrt{ \frac{ab}{ (a+b)^2(a + b +1 ) } }$$
where
\begin{align}
& a = 1 + S \\
& b = 1 + N - S \\
\end{align}
where $N$ is the number of users who rated, and $S$ is the sum of all the ratings, under the equivalence scheme mentioned above.
Example: Counting Github stars
What is the average number of stars a Github repository has? How would you calculate this? There are over 6 million repositories, so there is more than enough data to invoke the Law of Large numbers. Let's start pulling some data. TODO
Conclusion
While the Law of Large Numbers is cool, it is only true so much as its name implies: with large sample sizes only. We have seen how our inference can be affected by not considering how the data is shaped.
By (cheaply) drawing many samples from the posterior distributions, we can ensure that the Law of Large Number applies as we approximate expected values (which we will do in the next chapter).
Bayesian inference understands that with small sample sizes, we can observe wild randomness. Our posterior distribution will reflect this by being more spread rather than tightly concentrated. Thus, our inference should be correctable.
There are major implications of not considering the sample size, and trying to sort objects that are unstable leads to pathological orderings. The method provided above solves this problem.
Appendix
Derivation of sorting comments formula
Basically what we are doing is using a Beta prior (with parameters $a=1, b=1$, which is a uniform distribution), and using a Binomial likelihood with observations $u, N = u+d$. This means our posterior is a Beta distribution with parameters $a' = 1 + u, b' = 1 + (N - u) = 1+d$. We then need to find the value, $x$, such that 0.05 probability is less than $x$. This is usually done by inverting the CDF (Cumulative Distribution Function), but the CDF of the beta, for integer parameters, is known but is a large sum [3].
We instead use a Normal approximation. The mean of the Beta is $\mu = a'/(a'+b')$ and the variance is
$$\sigma^2 = \frac{a'b'}{ (a' + b')^2(a'+b'+1) }$$
Hence we solve the following equation for $x$ and have an approximate lower bound.
$$ 0.05 = \Phi\left( \frac{(x - \mu)}{\sigma}\right) $$
$\Phi$ being the cumulative distribution for the normal distribution
Exercises
1. How would you estimate the quantity $E\left[ \cos{X} \right]$, where $X \sim \text{Exp}(4)$? What about $E\left[ \cos{X} | X \lt 1\right]$, i.e. the expected value given we know $X$ is less than 1? Would you need more samples than the original samples size to be equally accurate?
End of explanation
"""
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
"""
Explanation: 2. The following table was located in the paper "Going for Three: Predicting the Likelihood of Field Goal Success with Logistic Regression" [2]. The table ranks football field-goal kickers by their percent of non-misses. What mistake have the researchers made?
Kicker Careers Ranked by Make Percentage
<table><tbody><tr><th>Rank </th><th>Kicker </th><th>Make % </th><th>Number of Kicks</th></tr><tr><td>1 </td><td>Garrett Hartley </td><td>87.7 </td><td>57</td></tr><tr><td>2</td><td> Matt Stover </td><td>86.8 </td><td>335</td></tr><tr><td>3 </td><td>Robbie Gould </td><td>86.2 </td><td>224</td></tr><tr><td>4 </td><td>Rob Bironas </td><td>86.1 </td><td>223</td></tr><tr><td>5</td><td> Shayne Graham </td><td>85.4 </td><td>254</td></tr><tr><td>… </td><td>… </td><td>…</td><td> </td></tr><tr><td>51</td><td> Dave Rayner </td><td>72.2 </td><td>90</td></tr><tr><td>52</td><td> Nick Novak </td><td>71.9 </td><td>64</td></tr><tr><td>53 </td><td>Tim Seder </td><td>71.0 </td><td>62</td></tr><tr><td>54 </td><td>Jose Cortez </td><td>70.7</td><td> 75</td></tr><tr><td>55 </td><td>Wade Richey </td><td>66.1</td><td> 56</td></tr></tbody></table>
In August 2013, a popular post on the average income per programmer of different languages was trending. Here's the summary chart: (reproduced without permission, cause when you lie with stats, you gunna get the hammer). What do you notice about the extremes?
Average household income by programming language
<table >
<tr><td>Language</td><td>Average Household Income ($)</td><td>Data Points</td></tr>
<tr><td>Puppet</td><td>87,589.29</td><td>112</td></tr>
<tr><td>Haskell</td><td>89,973.82</td><td>191</td></tr>
<tr><td>PHP</td><td>94,031.19</td><td>978</td></tr>
<tr><td>CoffeeScript</td><td>94,890.80</td><td>435</td></tr>
<tr><td>VimL</td><td>94,967.11</td><td>532</td></tr>
<tr><td>Shell</td><td>96,930.54</td><td>979</td></tr>
<tr><td>Lua</td><td>96,930.69</td><td>101</td></tr>
<tr><td>Erlang</td><td>97,306.55</td><td>168</td></tr>
<tr><td>Clojure</td><td>97,500.00</td><td>269</td></tr>
<tr><td>Python</td><td>97,578.87</td><td>2314</td></tr>
<tr><td>JavaScript</td><td>97,598.75</td><td>3443</td></tr>
<tr><td>Emacs Lisp</td><td>97,774.65</td><td>355</td></tr>
<tr><td>C#</td><td>97,823.31</td><td>665</td></tr>
<tr><td>Ruby</td><td>98,238.74</td><td>3242</td></tr>
<tr><td>C++</td><td>99,147.93</td><td>845</td></tr>
<tr><td>CSS</td><td>99,881.40</td><td>527</td></tr>
<tr><td>Perl</td><td>100,295.45</td><td>990</td></tr>
<tr><td>C</td><td>100,766.51</td><td>2120</td></tr>
<tr><td>Go</td><td>101,158.01</td><td>231</td></tr>
<tr><td>Scala</td><td>101,460.91</td><td>243</td></tr>
<tr><td>ColdFusion</td><td>101,536.70</td><td>109</td></tr>
<tr><td>Objective-C</td><td>101,801.60</td><td>562</td></tr>
<tr><td>Groovy</td><td>102,650.86</td><td>116</td></tr>
<tr><td>Java</td><td>103,179.39</td><td>1402</td></tr>
<tr><td>XSLT</td><td>106,199.19</td><td>123</td></tr>
<tr><td>ActionScript</td><td>108,119.47</td><td>113</td></tr>
</table>
References
Wainer, Howard. The Most Dangerous Equation. American Scientist, Volume 95.
Clarck, Torin K., Aaron W. Johnson, and Alexander J. Stimpson. "Going for Three: Predicting the Likelihood of Field Goal Success with Logistic Regression." (2013): n. page. Web. 20 Feb. 2013.
http://en.wikipedia.org/wiki/Beta_function#Incomplete_beta_function
End of explanation
"""
|
zingale/pyreaclib | library-examples.ipynb | bsd-3-clause | import pynucastro as pyna
library_file = '20180319default2'
mylibrary = pyna.rates.Library(library_file)
"""
Explanation: Selecting Rates from a Library
The Library class in pynucastro provides a high level interface for reading files containing one or more Reaclib rates and then filtering these rates based on user-specified criteria for the nuclei involved in the reactions. We can then use the resulting rates to build a network.
This example uses a Reaclib snapshot (20180319default2) downloaded from:
https://groups.nscl.msu.edu/jina/reaclib/db/library.php?action=viewsnapshots
That snapshot is found in the library search path.
Reading a rate snapshot
The Library class will look for the library file in the working directory or in the pynucastro/library subdirectory of the pynucastro package.
When the constructor is supplied a file name, pynucastro will read the contents of this file and interpret them as Reaclib rates in either the Reaclib 1 or 2 formats. The Library then stores the rates from the file as Rate objects.
End of explanation
"""
all_nuclei = ["p", "he4", "c12", "n13", "c13", "o14", "n14", "o15", "n15"]
"""
Explanation: Specifying Desired Nuclei
This example constructs a CNO network like the one constructed from a set of Reaclib rate files in the "pynucastro usage examples" section of this documentation.
This time, however, we will specify the nuclei we want in the network and allow the Library class to find all the rates linking only nuclei in the set we specified.
We can specify these nuclei by their abbreviations in the form, e.g. "he4":
End of explanation
"""
cno_library = mylibrary.linking_nuclei(all_nuclei, with_reverse=False)
"""
Explanation: Now we use the Library.linking_nuclei() function to return a smaller Library object containing only the rates that link these nuclei.
We can pass with_reverse=False to restrict linking_nuclei to only include forward rates from the Reaclib library, as pynucastro does not yet implement partition functions for reverse rates.
End of explanation
"""
cno_network = pyna.networks.PythonNetwork(libraries=cno_library)
"""
Explanation: Now we can create a network (PythonNetwork or StarKillerNetwork) as:
End of explanation
"""
cno_network.plot()
"""
Explanation: In the above, we construct a network from a Library object by passing the Library object to the libraries argument of the network constructor. To construct a network from multiple libraries, the libraries argument can also take a list of Library objects.
We can show the structure of the network by plotting a network diagram.
End of explanation
"""
cno_network.write_network('network_module.py')
"""
Explanation: Note that the above network also includes the triple-alpha rate from Reaclib.
If we wanted to generate the python code to calculate the right-hand side we could next do:
End of explanation
"""
c12_inexact_filter = pyna.rates.RateFilter(reactants=['c12'], exact=False)
"""
Explanation: And we could run this together with the burn driver program in examples/burn.py
Filtering the Library
This example introduces the RateFilter class which allows us to define a set of reactants and products to search for in a Library object.
Inexact Filtering
Inexact filtering is like using wildcards: in the following example, the rate filter we define will match any rates in which $\mathrm{^{12}C}$ is a reactant.
End of explanation
"""
c12_inexact_library = mylibrary.filter(c12_inexact_filter)
print(c12_inexact_library)
"""
Explanation: Once we construct a RateFilter object, we can apply it to our Library by passing it to the Library.filter function.
Library.filter returns a new Library object containing the rates that match our RateFilter.
We can print a Library to see the rates it contains. In parentheses the rate identifier is printed, showing the Reaclib rate label as well as whether the rate is forward or reverse.
End of explanation
"""
cago = c12_inexact_library.get_rate('c12 + he4 --> o16 <nac2_reaclib__>')
cago.plot()
"""
Explanation: The rate identifiers above can be used to access individual Rate objects within a Library as follows:
End of explanation
"""
c12_exact_filter = pyna.rates.RateFilter(reactants=['c12', 'c12'])
c12_exact_library = mylibrary.filter(c12_exact_filter)
print(c12_exact_library)
"""
Explanation: Exact Filtering
Exact filtering is useful when you have a specific rate in mind or a specific combination of reactants or products. In the following example, we look for all rates of the form $\mathrm{^{12}C + ^{12}C \rightarrow \ldots}$
To use exact filtering, omit the exact keyword to the RateFilter constructor, as it is turned on by default.
Exact filtering does not mean all the nuclei involved in the rate must be specified, it means that all filtering options passed to the RateFilter constructor are strictly applied. In this case, the filter will return rates with exactly two reactants, both of which are $\mathrm{^{12}C}$. However, the filter places no constraint on the products or number of products in the rate.
End of explanation
"""
alpha_library = pyna.rates.Library()
capture = pyna.rates.Nucleus('he4')
seed = pyna.rates.Nucleus('c12')
while True:
ac_filter = pyna.rates.RateFilter(reactants=[capture, seed], max_products=1)
ac_library = mylibrary.filter(ac_filter)
alpha_library = alpha_library + ac_library
heavy = ac_library.heaviest()
ac_filter_inv = pyna.rates.RateFilter(reactants=[heavy], products=[capture, seed])
ac_inv_library = mylibrary.filter(ac_filter_inv)
alpha_library = alpha_library + ac_inv_library
print(heavy)
if heavy.A == 56:
break
else:
seed = heavy
"""
Explanation: Example: Building an Alpha Capture Network
In the next example, we use rate filtering to iteratively construct a Library containing the alpha capture rates linking $\mathrm{^{12}C}$ to $\mathrm{^{56}Ni}$.
After finding each successive link in the alpha capture chain, we call Library.heaviest() to find the heaviest nucleus in the filtered rates. This corresponds to the nucleus with the largest mass number, and in case of a tie between isobars, this returns the isobar with the smallest atomic number. We use this feature to find the reverse rate for each alpha capture reaction.
In the example below, we add each filtered library to our alpha capture library alpha_library, initialized as an empty Library. The Library class supports the addition operator by returning a new library containing the rates in the two libraries we added together.
This example also introduces the max_products keyword, which specifies we are looking for reactions producing at most max_products product nuclei.
Similarly, the RateFilter constructor supports the following keywords constraining the number of reactants and products:
min_reactants
max_reactants
min_products
max_products
Because we have omitted the argument exact=False, the filter constraints we apply are exact.
End of explanation
"""
print(alpha_library)
"""
Explanation: We will next print out the library we constructed, seeing that we have both forward and reverse rates for the alpha chain.
Note that at this time pynucastro has not yet implemented nuclear partition functions, so these reverse rates are calculated only from detailed balance in the Reaclib library.
End of explanation
"""
alpha_network = pyna.networks.PythonNetwork(libraries=alpha_library)
"""
Explanation: Next we can create a reaction network from our filtered alpha capture library by passing our library to a network constructor using the libraries keyword.
End of explanation
"""
alpha_network.plot()
"""
Explanation: And finally we can make Z-N plot of the nuclei linked via the reactions we selected.
End of explanation
"""
|
miguelfrde/stanford-cs231n | assignment3/NetworkVisualization-PyTorch.ipynb | mit | import torch
from torch.autograd import Variable
import torchvision
import torchvision.transforms as T
import random
import numpy as np
from scipy.ndimage.filters import gaussian_filter1d
import matplotlib.pyplot as plt
from cs231n.image_utils import SQUEEZENET_MEAN, SQUEEZENET_STD
from PIL import Image
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
"""
Explanation: Network Visualization (PyTorch)
In this notebook we will explore the use of image gradients for generating new images.
When training a model, we define a loss function which measures our current unhappiness with the model's performance; we then use backpropagation to compute the gradient of the loss with respect to the model parameters, and perform gradient descent on the model parameters to minimize the loss.
Here we will do something slightly different. We will start from a convolutional neural network model which has been pretrained to perform image classification on the ImageNet dataset. We will use this model to define a loss function which quantifies our current unhappiness with our image, then use backpropagation to compute the gradient of this loss with respect to the pixels of the image. We will then keep the model fixed, and perform gradient descent on the image to synthesize a new image which minimizes the loss.
In this notebook we will explore three techniques for image generation:
Saliency Maps: Saliency maps are a quick way to tell which part of the image influenced the classification decision made by the network.
Fooling Images: We can perturb an input image so that it appears the same to humans, but will be misclassified by the pretrained network.
Class Visualization: We can synthesize an image to maximize the classification score of a particular class; this can give us some sense of what the network is looking for when it classifies images of that class.
This notebook uses PyTorch; we have provided another notebook which explores the same concepts in TensorFlow. You only need to complete one of these two notebooks.
End of explanation
"""
def preprocess(img, size=224):
transform = T.Compose([
T.Scale(size),
T.ToTensor(),
T.Normalize(mean=SQUEEZENET_MEAN.tolist(),
std=SQUEEZENET_STD.tolist()),
T.Lambda(lambda x: x[None]),
])
return transform(img)
def deprocess(img, should_rescale=True):
transform = T.Compose([
T.Lambda(lambda x: x[0]),
T.Normalize(mean=[0, 0, 0], std=(1.0 / SQUEEZENET_STD).tolist()),
T.Normalize(mean=(-SQUEEZENET_MEAN).tolist(), std=[1, 1, 1]),
T.Lambda(rescale) if should_rescale else T.Lambda(lambda x: x),
T.ToPILImage(),
])
return transform(img)
def rescale(x):
low, high = x.min(), x.max()
x_rescaled = (x - low) / (high - low)
return x_rescaled
def blur_image(X, sigma=1):
X_np = X.cpu().clone().numpy()
X_np = gaussian_filter1d(X_np, sigma, axis=2)
X_np = gaussian_filter1d(X_np, sigma, axis=3)
X.copy_(torch.Tensor(X_np).type_as(X))
return X
"""
Explanation: Helper Functions
Our pretrained model was trained on images that had been preprocessed by subtracting the per-color mean and dividing by the per-color standard deviation. We define a few helper functions for performing and undoing this preprocessing. You don't need to do anything in this cell.
End of explanation
"""
# Download and load the pretrained SqueezeNet model.
model = torchvision.models.squeezenet1_1(pretrained=True)
# We don't want to train the model, so tell PyTorch not to compute gradients
# with respect to model parameters.
for param in model.parameters():
param.requires_grad = False
"""
Explanation: Pretrained Model
For all of our image generation experiments, we will start with a convolutional neural network which was pretrained to perform image classification on ImageNet. We can use any model here, but for the purposes of this assignment we will use SqueezeNet [1], which achieves accuracies comparable to AlexNet but with a significantly reduced parameter count and computational complexity.
Using SqueezeNet rather than AlexNet or VGG or ResNet means that we can easily perform all image generation experiments on CPU.
[1] Iandola et al, "SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and < 0.5MB model size", arXiv 2016
End of explanation
"""
from cs231n.data_utils import load_imagenet_val
X, y, class_names = load_imagenet_val(num=5)
plt.figure(figsize=(12, 6))
for i in range(5):
plt.subplot(1, 5, i + 1)
plt.imshow(X[i])
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
"""
Explanation: Load some ImageNet images
We have provided a few example images from the validation set of the ImageNet ILSVRC 2012 Classification dataset. To download these images, change to cs231n/datasets/ and run get_imagenet_val.sh.
Since they come from the validation set, our pretrained model did not see these images during training.
Run the following cell to visualize some of these images, along with their ground-truth labels.
End of explanation
"""
# Example of using gather to select one entry from each row in PyTorch
def gather_example():
N, C = 4, 5
s = torch.randn(N, C)
y = torch.LongTensor([1, 2, 1, 3])
print(s)
print(y)
print(s.gather(1, y.view(-1, 1)).squeeze())
gather_example()
def compute_saliency_maps(X, y, model):
"""
Compute a class saliency map using the model for images X and labels y.
Input:
- X: Input images; Tensor of shape (N, 3, H, W)
- y: Labels for X; LongTensor of shape (N,)
- model: A pretrained CNN that will be used to compute the saliency map.
Returns:
- saliency: A Tensor of shape (N, H, W) giving the saliency maps for the input
images.
"""
# Make sure the model is in "test" mode
model.eval()
# Wrap the input tensors in Variables
X_var = Variable(X, requires_grad=True)
y_var = Variable(y)
saliency = None
##############################################################################
# TODO: Implement this function. Perform a forward and backward pass through #
# the model to compute the gradient of the correct class score with respect #
# to each input image. You first want to compute the loss over the correct #
# scores, and then compute the gradients with a backward pass. #
##############################################################################
pass
##############################################################################
# END OF YOUR CODE #
##############################################################################
return saliency
"""
Explanation: Saliency Maps
Using this pretrained model, we will compute class saliency maps as described in Section 3.1 of [2].
A saliency map tells us the degree to which each pixel in the image affects the classification score for that image. To compute it, we compute the gradient of the unnormalized score corresponding to the correct class (which is a scalar) with respect to the pixels of the image. If the image has shape (3, H, W) then this gradient will also have shape (3, H, W); for each pixel in the image, this gradient tells us the amount by which the classification score will change if the pixel changes by a small amount. To compute the saliency map, we take the absolute value of this gradient, then take the maximum value over the 3 input channels; the final saliency map thus has shape (H, W) and all entries are nonnegative.
[2] Karen Simonyan, Andrea Vedaldi, and Andrew Zisserman. "Deep Inside Convolutional Networks: Visualising
Image Classification Models and Saliency Maps", ICLR Workshop 2014.
Hint: PyTorch gather method
Recall in Assignment 1 you needed to select one element from each row of a matrix; if s is an numpy array of shape (N, C) and y is a numpy array of shape (N,) containing integers 0 <= y[i] < C, then s[np.arange(N), y] is a numpy array of shape (N,) which selects one element from each element in s using the indices in y.
In PyTorch you can perform the same operation using the gather() method. If s is a PyTorch Tensor or Variable of shape (N, C) and y is a PyTorch Tensor or Variable of shape (N,) containing longs in the range 0 <= y[i] < C, then
s.gather(1, y.view(-1, 1)).squeeze()
will be a PyTorch Tensor (or Variable) of shape (N,) containing one entry from each row of s, selected according to the indices in y.
run the following cell to see an example.
You can also read the documentation for the gather method
and the squeeze method.
End of explanation
"""
def show_saliency_maps(X, y):
# Convert X and y from numpy arrays to Torch Tensors
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0)
y_tensor = torch.LongTensor(y)
# Compute saliency maps for images in X
saliency = compute_saliency_maps(X_tensor, y_tensor, model)
# Convert the saliency map from Torch Tensor to numpy array and show images
# and saliency maps together.
saliency = saliency.numpy()
N = X.shape[0]
for i in range(N):
plt.subplot(2, N, i + 1)
plt.imshow(X[i])
plt.axis('off')
plt.title(class_names[y[i]])
plt.subplot(2, N, N + i + 1)
plt.imshow(saliency[i], cmap=plt.cm.hot)
plt.axis('off')
plt.gcf().set_size_inches(12, 5)
plt.show()
show_saliency_maps(X, y)
"""
Explanation: Once you have completed the implementation in the cell above, run the following to visualize some class saliency maps on our example images from the ImageNet validation set:
End of explanation
"""
def make_fooling_image(X, target_y, model):
"""
Generate a fooling image that is close to X, but that the model classifies
as target_y.
Inputs:
- X: Input image; Tensor of shape (1, 3, 224, 224)
- target_y: An integer in the range [0, 1000)
- model: A pretrained CNN
Returns:
- X_fooling: An image that is close to X, but that is classifed as target_y
by the model.
"""
# Initialize our fooling image to the input image, and wrap it in a Variable.
X_fooling = X.clone()
X_fooling_var = Variable(X_fooling, requires_grad=True)
learning_rate = 1
##############################################################################
# TODO: Generate a fooling image X_fooling that the model will classify as #
# the class target_y. You should perform gradient ascent on the score of the #
# target class, stopping when the model is fooled. #
# When computing an update step, first normalize the gradient: #
# dX = learning_rate * g / ||g||_2 #
# #
# You should write a training loop. #
# #
# HINT: For most examples, you should be able to generate a fooling image #
# in fewer than 100 iterations of gradient ascent. #
# You can print your progress over iterations to check your algorithm. #
##############################################################################
pass
##############################################################################
# END OF YOUR CODE #
##############################################################################
return X_fooling
"""
Explanation: Fooling Images
We can also use image gradients to generate "fooling images" as discussed in [3]. Given an image and a target class, we can perform gradient ascent over the image to maximize the target class, stopping when the network classifies the image as the target class. Implement the following function to generate fooling images.
[3] Szegedy et al, "Intriguing properties of neural networks", ICLR 2014
End of explanation
"""
idx = 0
target_y = 6
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0)
X_fooling = make_fooling_image(X_tensor[idx:idx+1], target_y, model)
scores = model(Variable(X_fooling))
assert target_y == scores.data.max(1)[1][0, 0], 'The model is not fooled!'
"""
Explanation: Run the following cell to generate a fooling image:
End of explanation
"""
X_fooling_np = deprocess(X_fooling.clone())
X_fooling_np = np.asarray(X_fooling_np).astype(np.uint8)
plt.subplot(1, 4, 1)
plt.imshow(X[idx])
plt.title(class_names[y[idx]])
plt.axis('off')
plt.subplot(1, 4, 2)
plt.imshow(X_fooling_np)
plt.title(class_names[target_y])
plt.axis('off')
plt.subplot(1, 4, 3)
X_pre = preprocess(Image.fromarray(X[idx]))
diff = np.asarray(deprocess(X_fooling - X_pre, should_rescale=False))
plt.imshow(diff)
plt.title('Difference')
plt.axis('off')
plt.subplot(1, 4, 4)
diff = np.asarray(deprocess(10 * (X_fooling - X_pre), should_rescale=False))
plt.imshow(diff)
plt.title('Magnified difference (10x)')
plt.axis('off')
plt.gcf().set_size_inches(12, 5)
plt.show()
"""
Explanation: After generating a fooling image, run the following cell to visualize the original image, the fooling image, as well as the difference between them.
End of explanation
"""
def jitter(X, ox, oy):
"""
Helper function to randomly jitter an image.
Inputs
- X: PyTorch Tensor of shape (N, C, H, W)
- ox, oy: Integers giving number of pixels to jitter along W and H axes
Returns: A new PyTorch Tensor of shape (N, C, H, W)
"""
if ox != 0:
left = X[:, :, :, :-ox]
right = X[:, :, :, -ox:]
X = torch.cat([right, left], dim=3)
if oy != 0:
top = X[:, :, :-oy]
bottom = X[:, :, -oy:]
X = torch.cat([bottom, top], dim=2)
return X
def create_class_visualization(target_y, model, dtype, **kwargs):
"""
Generate an image to maximize the score of target_y under a pretrained model.
Inputs:
- target_y: Integer in the range [0, 1000) giving the index of the class
- model: A pretrained CNN that will be used to generate the image
- dtype: Torch datatype to use for computations
Keyword arguments:
- l2_reg: Strength of L2 regularization on the image
- learning_rate: How big of a step to take
- num_iterations: How many iterations to use
- blur_every: How often to blur the image as an implicit regularizer
- max_jitter: How much to gjitter the image as an implicit regularizer
- show_every: How often to show the intermediate result
"""
model.type(dtype)
l2_reg = kwargs.pop('l2_reg', 1e-3)
learning_rate = kwargs.pop('learning_rate', 25)
num_iterations = kwargs.pop('num_iterations', 100)
blur_every = kwargs.pop('blur_every', 10)
max_jitter = kwargs.pop('max_jitter', 16)
show_every = kwargs.pop('show_every', 25)
# Randomly initialize the image as a PyTorch Tensor, and also wrap it in
# a PyTorch Variable.
img = torch.randn(1, 3, 224, 224).mul_(1.0).type(dtype)
img_var = Variable(img, requires_grad=True)
for t in range(num_iterations):
# Randomly jitter the image a bit; this gives slightly nicer results
ox, oy = random.randint(0, max_jitter), random.randint(0, max_jitter)
img.copy_(jitter(img, ox, oy))
########################################################################
# TODO: Use the model to compute the gradient of the score for the #
# class target_y with respect to the pixels of the image, and make a #
# gradient step on the image using the learning rate. Don't forget the #
# L2 regularization term! #
# Be very careful about the signs of elements in your code. #
########################################################################
pass
########################################################################
# END OF YOUR CODE #
########################################################################
# Undo the random jitter
img.copy_(jitter(img, -ox, -oy))
# As regularizer, clamp and periodically blur the image
for c in range(3):
lo = float(-SQUEEZENET_MEAN[c] / SQUEEZENET_STD[c])
hi = float((1.0 - SQUEEZENET_MEAN[c]) / SQUEEZENET_STD[c])
img[:, c].clamp_(min=lo, max=hi)
if t % blur_every == 0:
blur_image(img, sigma=0.5)
# Periodically show the image
if t == 0 or (t + 1) % show_every == 0 or t == num_iterations - 1:
plt.imshow(deprocess(img.clone().cpu()))
class_name = class_names[target_y]
plt.title('%s\nIteration %d / %d' % (class_name, t + 1, num_iterations))
plt.gcf().set_size_inches(4, 4)
plt.axis('off')
plt.show()
return deprocess(img.cpu())
"""
Explanation: Class visualization
By starting with a random noise image and performing gradient ascent on a target class, we can generate an image that the network will recognize as the target class. This idea was first presented in [2]; [3] extended this idea by suggesting several regularization techniques that can improve the quality of the generated image.
Concretely, let $I$ be an image and let $y$ be a target class. Let $s_y(I)$ be the score that a convolutional network assigns to the image $I$ for class $y$; note that these are raw unnormalized scores, not class probabilities. We wish to generate an image $I^*$ that achieves a high score for the class $y$ by solving the problem
$$
I^* = \arg\max_I s_y(I) - R(I)
$$
where $R$ is a (possibly implicit) regularizer (note the sign of $R(I)$ in the argmax: we want to minimize this regularization term). We can solve this optimization problem using gradient ascent, computing gradients with respect to the generated image. We will use (explicit) L2 regularization of the form
$$
R(I) = \lambda \|I\|_2^2
$$
and implicit regularization as suggested by [3] by periodically blurring the generated image. We can solve this problem using gradient ascent on the generated image.
In the cell below, complete the implementation of the create_class_visualization function.
[2] Karen Simonyan, Andrea Vedaldi, and Andrew Zisserman. "Deep Inside Convolutional Networks: Visualising
Image Classification Models and Saliency Maps", ICLR Workshop 2014.
[3] Yosinski et al, "Understanding Neural Networks Through Deep Visualization", ICML 2015 Deep Learning Workshop
End of explanation
"""
dtype = torch.FloatTensor
# dtype = torch.cuda.FloatTensor # Uncomment this to use GPU
model.type(dtype)
target_y = 76 # Tarantula
# target_y = 78 # Tick
# target_y = 187 # Yorkshire Terrier
# target_y = 683 # Oboe
# target_y = 366 # Gorilla
# target_y = 604 # Hourglass
out = create_class_visualization(target_y, model, dtype)
"""
Explanation: Once you have completed the implementation in the cell above, run the following cell to generate an image of a Tarantula:
End of explanation
"""
# target_y = 78 # Tick
# target_y = 187 # Yorkshire Terrier
# target_y = 683 # Oboe
# target_y = 366 # Gorilla
# target_y = 604 # Hourglass
target_y = np.random.randint(1000)
print(class_names[target_y])
X = create_class_visualization(target_y, model, dtype)
"""
Explanation: Try out your class visualization on other classes! You should also feel free to play with various hyperparameters to try and improve the quality of the generated image, but this is not required.
End of explanation
"""
|
kirichoi/tellurium | examples/notebooks/core/tellurium_utility.ipynb | apache-2.0 | %matplotlib inline
from __future__ import print_function
import tellurium as te
# to get the tellurium version use
print('te.__version__')
print(te.__version__)
# or
print('te.getTelluriumVersion()')
print(te.getTelluriumVersion())
# to print the full version info use
print('-' * 80)
te.printVersionInfo()
print('-' * 80)
"""
Explanation: Back to the main Index
Version information
Tellurium's version can be obtained via te.__version__. .printVersionInfo() also returns information from certain constituent packages.
End of explanation
"""
from builtins import range
# Load SBML file
r = te.loada("""
model test
J0: X0 -> X1; k1*X0;
X0 = 10; X1=0;
k1 = 0.2
end
""")
import matplotlib.pyplot as plt
# Turn of notices so they don't clutter the output
te.noticesOff()
for i in range(0, 20):
result = r.simulate (0, 10)
r.reset()
r.plot(result, loc=None, show=False,
linewidth=2.0, linestyle='-', color='black', alpha=0.8)
r.k1 = r.k1 + 0.2
# Turn the notices back on
te.noticesOn()
"""
Explanation: Repeat simulation without notification
End of explanation
"""
# create tmp file
import tempfile
ftmp = tempfile.NamedTemporaryFile(suffix=".xml")
# load model
r = te.loada('S1 -> S2; k1*S1; k1 = 0.1; S1 = 10')
# save to file
te.saveToFile(ftmp.name, r.getMatlab())
# or easier via
r.exportToMatlab(ftmp.name)
# load file
sbmlstr = te.readFromFile(ftmp.name)
print('%' + '*'*80)
print('Converted MATLAB code')
print('%' + '*'*80)
print(sbmlstr)
"""
Explanation: File helpers for reading and writing
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.18/_downloads/2187adaa95700a6de5f9ba2004254a87/plot_sensor_noise_level.ipynb | bsd-3-clause | # Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import mne
data_path = mne.datasets.sample.data_path()
raw_erm = mne.io.read_raw_fif(op.join(data_path, 'MEG', 'sample',
'ernoise_raw.fif'), preload=True)
"""
Explanation: Show noise levels from empty room data
This shows how to use :meth:mne.io.Raw.plot_psd to examine noise levels
of systems. See [1]_ for an example.
References
.. [1] Khan S, Cohen D (2013). Note: Magnetic noise from the inner wall of
a magnetically shielded room. Review of Scientific Instruments 84:56101.
https://doi.org/10.1063/1.4802845
End of explanation
"""
raw_erm.plot_psd(tmax=10., average=True, dB=False, xscale='log')
"""
Explanation: We can plot the absolute noise levels:
End of explanation
"""
|
StefanoAllesina/ISC | python/solutions/Lahti2014_solution.ipynb | gpl-2.0 | import csv
"""
Explanation: Solution of Lahti et al. 2014
Write a function that takes as input a dictionary of constraints and returns a dictionary tabulating the BMI group for all the records matching the constraints. For example, calling:
get_BMI_count({'Age': '28', 'Sex': 'female'})
should return:
{'NA': 3, 'lean': 8, 'overweight': 2, 'underweight': 1}
Import csv for reading the file.
End of explanation
"""
def get_BMI_count(dict_constraints):
""" Take as input a dictionary of constraints
for example, {'Age': '28', 'Sex': 'female'}
And return the count of the various groups of BMI
"""
# We use a dictionary to store the results
BMI_count = {}
# Open the file, build a csv DictReader
with open('../data/Lahti2014/Metadata.tab') as f:
csvr = csv.DictReader(f, delimiter = '\t')
# For each row
for row in csvr:
# check that all conditions are met
matching = True
for e in dict_constraints:
if row[e] != dict_constraints[e]:
# The constraint is not met. Move to the next record
matching = False
break
# matching is True only if all the constraints have been met
if matching == True:
# extract the BMI_group
my_BMI = row['BMI_group']
if my_BMI in BMI_count.keys():
# If we've seen it before, add one record to the count
BMI_count[my_BMI] = BMI_count[my_BMI] + 1
else:
# If not, initialize at 1
BMI_count[my_BMI] = 1
return BMI_count
get_BMI_count({'Nationality': 'US', 'Sex': 'female'})
"""
Explanation: Now write the function. For each row in the file, you need to make sure all the constraints are matching the desired ones. If so, keep track of the BMI group using a dictionary.
End of explanation
"""
import scipy # For log10
def get_abundance_by_BMI(dict_constraints, genus = 'Aerococcus'):
# We use a dictionary to store the results
BMI_IDs = {}
# Open the file, build a csv DictReader
with open('../data/Lahti2014/Metadata.tab') as f:
csvr = csv.DictReader(f, delimiter = '\t')
# For each row
for row in csvr:
# check that all conditions are met
matching = True
for e in dict_constraints:
if row[e] != dict_constraints[e]:
# The constraint is not met. Move to the next record
matching = False
break
# matching is True only if all the constraints have been met
if matching == True:
# extract the BMI_group
my_BMI = row['BMI_group']
if my_BMI in BMI_IDs.keys():
# If we've seen it before, add the SampleID
BMI_IDs[my_BMI] = BMI_IDs[my_BMI] + [row['SampleID']]
else:
# If not, initialize
BMI_IDs[my_BMI] = [row['SampleID']]
# Now let's open the other file, and keep track of the abundance of the genus for each
# BMI group
abundance = {}
with open('../data/Lahti2014/HITChip.tab') as f:
csvr = csv.DictReader(f, delimiter = '\t')
# For each row
for row in csvr:
# check whether we need this SampleID
matching = False
for g in BMI_IDs:
if row['SampleID'] in BMI_IDs[g]:
if g in abundance.keys():
abundance[g][0] = abundance[g][0] + float(row[genus])
abundance[g][1] = abundance[g][1] + 1
else:
abundance[g] = [float(row[genus]), 1]
# we have found it, so move on
break
# Finally, calculate means, and print results
print("____________________________________________________________________")
print("Abundance of " + genus + " In sub-population:")
print("____________________________________________________________________")
for key, value in dict_constraints.items():
print(key, "->", value)
print("____________________________________________________________________")
for ab in ['NA', 'underweight', 'lean', 'overweight',
'obese', 'severeobese', 'morbidobese']:
if ab in abundance.keys():
abundance[ab][0] = scipy.log10(abundance[ab][0] / abundance[ab][1])
print(round(abundance[ab][0], 2), '\t', ab)
print("____________________________________________________________________")
print("")
get_abundance_by_BMI({'Time': '0', 'Nationality': 'US'},
'Clostridium difficile et rel.')
"""
Explanation: Write a function that takes as input the constraints (as above), and a bacterial "genus". The function returns the average abundance (in logarithm base 10) of the genus for each group of BMI in the sub-population. For example, calling:
get_abundance_by_BMI({'Time': '0', 'Nationality': 'US'}, 'Clostridium difficile et rel.')
should return:
```
Abundance of Clostridium difficile et rel. In sub-population:
Nationality -> US
Time -> 0
3.08 NA
3.31 underweight
3.84 lean
2.89 overweight
3.31 obese
3.45 severeobese
```
End of explanation
"""
def get_all_genera():
with open('../data/Lahti2014/HITChip.tab') as f:
header = f.readline().strip()
genera = header.split('\t')[1:]
return genera
"""
Explanation: Repeat this analysis for all genera, and for the records having Time = 0.
A simple function for extracting all the genera in the database:
End of explanation
"""
get_all_genera()[:6]
"""
Explanation: Testing:
End of explanation
"""
for g in get_all_genera()[:5]:
get_abundance_by_BMI({'Time': '0'}, g)
"""
Explanation: Now use the function we wrote above to print the results for all genera:
End of explanation
"""
|
qingshuimonk/STA663 | docs/vae-Bohao.ipynb | mit | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(0)
tf.set_random_seed(0)
# Load MNIST data in a format suited for tensorflow.
# The script input_data is available under this URL:
# https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/tutorials/mnist/input_data.py
import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
n_samples = mnist.train.num_examples
def xavier_init(fan_in, fan_out, constant=1):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
"""
Explanation: Variational Autoencoder in TensorFlow
The main motivation for this post was that I wanted to get more experience with both Variational Autoencoders (VAEs) and with Tensorflow. Thus, implementing the former in the latter sounded like a good idea for learning about both at the same time. This post summarizes the result.
Note: The post was updated on December 7th 2015:
* a bug in the computation of the latent_loss was fixed (removed an erroneous factor 2). Thanks Colin Fang for pointing this out.
* Using a Bernoulli distribution rather than a Gaussian distribution in the generator network
Note: The post was updated on January 3rd 2017:
* changes required for supporting TensorFlow v0.12 and Python 3 support
Let us first do the necessary imports, load the data (MNIST), and define some helper functions.
End of explanation
"""
class VariationalAutoencoder(object):
""" Variation Autoencoder (VAE) with an sklearn-like interface implemented using TensorFlow.
This implementation uses probabilistic encoders and decoders using Gaussian
distributions and realized by multi-layer perceptrons. The VAE can be learned
end-to-end.
See "Auto-Encoding Variational Bayes" by Kingma and Welling for more details.
"""
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus,
learning_rate=0.001, batch_size=100):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
self.batch_size = batch_size
# tf Graph input
self.x = tf.placeholder(tf.float32, [None, network_architecture["n_input"]])
# Create autoencoder network
self._create_network()
# Define loss function based variational upper-bound and
# corresponding optimizer
self._create_loss_optimizer()
# Initializing the tensor flow variables
init = tf.global_variables_initializer()
# Launch the session
self.sess = tf.InteractiveSession()
self.sess.run(init)
def _create_network(self):
# Initialize autoencode network weights and biases
network_weights = self._initialize_weights(**self.network_architecture)
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
self.z_mean, self.z_log_sigma_sq = \
self._recognition_network(network_weights["weights_recog"],
network_weights["biases_recog"])
# Draw one sample z from Gaussian distribution
n_z = self.network_architecture["n_z"]
eps = tf.random_normal((self.batch_size, n_z), 0, 1,
dtype=tf.float32)
# z = mu + sigma*epsilon
self.z = tf.add(self.z_mean,
tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
# Use generator to determine mean of
# Bernoulli distribution of reconstructed input
self.x_reconstr_mean = \
self._generator_network(network_weights["weights_gener"],
network_weights["biases_gener"])
def _initialize_weights(self, n_hidden_recog_1, n_hidden_recog_2,
n_hidden_gener_1, n_hidden_gener_2,
n_input, n_z):
all_weights = dict()
all_weights['weights_recog'] = {
'h1': tf.Variable(xavier_init(n_input, n_hidden_recog_1)),
'h2': tf.Variable(xavier_init(n_hidden_recog_1, n_hidden_recog_2)),
'out_mean': tf.Variable(xavier_init(n_hidden_recog_2, n_z)),
'out_log_sigma': tf.Variable(xavier_init(n_hidden_recog_2, n_z))}
all_weights['biases_recog'] = {
'b1': tf.Variable(tf.zeros([n_hidden_recog_1], dtype=tf.float32)),
'b2': tf.Variable(tf.zeros([n_hidden_recog_2], dtype=tf.float32)),
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32)),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32))}
all_weights['weights_gener'] = {
'h1': tf.Variable(xavier_init(n_z, n_hidden_gener_1)),
'h2': tf.Variable(xavier_init(n_hidden_gener_1, n_hidden_gener_2)),
'out_mean': tf.Variable(xavier_init(n_hidden_gener_2, n_input)),
'out_log_sigma': tf.Variable(xavier_init(n_hidden_gener_2, n_input))}
all_weights['biases_gener'] = {
'b1': tf.Variable(tf.zeros([n_hidden_gener_1], dtype=tf.float32)),
'b2': tf.Variable(tf.zeros([n_hidden_gener_2], dtype=tf.float32)),
'out_mean': tf.Variable(tf.zeros([n_input], dtype=tf.float32)),
'out_log_sigma': tf.Variable(tf.zeros([n_input], dtype=tf.float32))}
return all_weights
def _recognition_network(self, weights, biases):
# Generate probabilistic encoder (recognition network), which
# maps inputs onto a normal distribution in latent space.
# The transformation is parametrized and can be learned.
layer_1 = self.transfer_fct(tf.add(tf.matmul(self.x, weights['h1']),
biases['b1']))
layer_2 = self.transfer_fct(tf.add(tf.matmul(layer_1, weights['h2']),
biases['b2']))
z_mean = tf.add(tf.matmul(layer_2, weights['out_mean']),
biases['out_mean'])
z_log_sigma_sq = \
tf.add(tf.matmul(layer_2, weights['out_log_sigma']),
biases['out_log_sigma'])
return (z_mean, z_log_sigma_sq)
def _generator_network(self, weights, biases):
# Generate probabilistic decoder (decoder network), which
# maps points in latent space onto a Bernoulli distribution in data space.
# The transformation is parametrized and can be learned.
layer_1 = self.transfer_fct(tf.add(tf.matmul(self.z, weights['h1']),
biases['b1']))
layer_2 = self.transfer_fct(tf.add(tf.matmul(layer_1, weights['h2']),
biases['b2']))
x_reconstr_mean = \
tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['out_mean']),
biases['out_mean']))
return x_reconstr_mean
def _create_loss_optimizer(self):
# The loss is composed of two terms:
# 1.) The reconstruction loss (the negative log probability
# of the input under the reconstructed Bernoulli distribution
# induced by the decoder in the data space).
# This can be interpreted as the number of "nats" required
# for reconstructing the input when the activation in latent
# is given.
# Adding 1e-10 to avoid evaluation of log(0.0)
reconstr_loss = \
-tf.reduce_sum(self.x * tf.log(1e-10 + self.x_reconstr_mean)
+ (1-self.x) * tf.log(1e-10 + 1 - self.x_reconstr_mean),
1)
# 2.) The latent loss, which is defined as the Kullback Leibler divergence
## between the distribution in latent space induced by the encoder on
# the data and some prior. This acts as a kind of regularizer.
# This can be interpreted as the number of "nats" required
# for transmitting the the latent space distribution given
# the prior.
latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq
- tf.square(self.z_mean)
- tf.exp(self.z_log_sigma_sq), 1)
self.cost = tf.reduce_mean(reconstr_loss + latent_loss) # average over batch
# Use ADAM optimizer
self.optimizer = \
tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost)
def partial_fit(self, X):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
opt, cost = self.sess.run((self.optimizer, self.cost),
feed_dict={self.x: X})
return cost
def transform(self, X):
"""Transform data by mapping it into the latent space."""
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
return self.sess.run(self.z_mean, feed_dict={self.x: X})
def generate(self, z_mu=None):
""" Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
"""
if z_mu is None:
z_mu = np.random.normal(size=self.network_architecture["n_z"])
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
return self.sess.run(self.x_reconstr_mean,
feed_dict={self.z: z_mu})
def reconstruct(self, X):
""" Use VAE to reconstruct given data. """
return self.sess.run(self.x_reconstr_mean,
feed_dict={self.x: X})
"""
Explanation: Based on this, we define now a class "VariationalAutoencoder" with a sklearn-like interface that can be trained incrementally with mini-batches using partial_fit. The trained model can be used to reconstruct unseen input, to generate new samples, and to map inputs to the latent space.
End of explanation
"""
def train(network_architecture, learning_rate=0.001,
batch_size=100, training_epochs=10, display_step=5):
vae = VariationalAutoencoder(network_architecture,
learning_rate=learning_rate,
batch_size=batch_size)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, _ = mnist.train.next_batch(batch_size)
# Fit training using batch data
cost = vae.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(avg_cost))
return vae
"""
Explanation: In general, implementing a VAE in tensorflow is relatively straightforward (in particular since we don not need to code the gradient computation). A bit confusing is potentially that all the logic happens at initialization of the class (where the graph is generated), while the actual sklearn interface methods are very simple one-liners.
We can now define a simple fuction which trains the VAE using mini-batches:
End of explanation
"""
network_architecture = \
dict(n_hidden_recog_1=500, # 1st layer encoder neurons
n_hidden_recog_2=500, # 2nd layer encoder neurons
n_hidden_gener_1=500, # 1st layer decoder neurons
n_hidden_gener_2=500, # 2nd layer decoder neurons
n_input=784, # MNIST data input (img shape: 28*28)
n_z=20) # dimensionality of latent space
vae = train(network_architecture, training_epochs=75)
"""
Explanation: Illustrating reconstruction quality
We can now train a VAE on MNIST by just specifying the network topology. We start with training a VAE with a 20-dimensional latent space.
End of explanation
"""
x_sample = mnist.test.next_batch(100)[0]
x_reconstruct = vae.reconstruct(x_sample)
plt.figure(figsize=(8, 12))
for i in range(5):
plt.subplot(5, 2, 2*i + 1)
plt.imshow(x_sample[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
plt.title("Test input")
plt.colorbar()
plt.subplot(5, 2, 2*i + 2)
plt.imshow(x_reconstruct[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
plt.title("Reconstruction")
plt.colorbar()
plt.tight_layout()
"""
Explanation: Based on this we can sample some test inputs and visualize how well the VAE can reconstruct those. In general the VAE does really well.
End of explanation
"""
network_architecture = \
dict(n_hidden_recog_1=500, # 1st layer encoder neurons
n_hidden_recog_2=500, # 2nd layer encoder neurons
n_hidden_gener_1=500, # 1st layer decoder neurons
n_hidden_gener_2=500, # 2nd layer decoder neurons
n_input=784, # MNIST data input (img shape: 28*28)
n_z=2) # dimensionality of latent space
vae_2d = train(network_architecture, training_epochs=75)
x_sample, y_sample = mnist.test.next_batch(5000)
z_mu = vae_2d.transform(x_sample)
plt.figure(figsize=(8, 6))
plt.scatter(z_mu[:, 0], z_mu[:, 1], c=np.argmax(y_sample, 1))
plt.colorbar()
plt.grid()
"""
Explanation: Illustrating latent space
Next, we train a VAE with 2d latent space and illustrates how the encoder (the recognition network) encodes some of the labeled inputs (collapsing the Gaussian distribution in latent space to its mean). This gives us some insights into the structure of the learned manifold (latent space)
End of explanation
"""
nx = ny = 20
x_values = np.linspace(-3, 3, nx)
y_values = np.linspace(-3, 3, ny)
canvas = np.empty((28*ny, 28*nx))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
z_mu = np.array([[xi, yi]]*vae.batch_size)
x_mean = vae_2d.generate(z_mu)
canvas[(nx-i-1)*28:(nx-i)*28, j*28:(j+1)*28] = x_mean[0].reshape(28, 28)
plt.figure(figsize=(8, 10))
Xi, Yi = np.meshgrid(x_values, y_values)
plt.imshow(canvas, origin="upper", cmap="gray")
plt.tight_layout()
"""
Explanation: An other way of getting insights into the latent space is to use the generator network to plot reconstrunctions at the positions in the latent space for which they have been generated:
End of explanation
"""
%load_ext watermark
%watermark -a "Jan Hendrik Metzen" -d -v -m -p numpy,scikit-learn,tensorflow
"""
Explanation: Summary
In summary, tensorflow is well suited to rapidly implement a prototype of machine learning models like VAE. The resulting code could be easily executed on GPUs as well (requiring just that tensorflow with GPU support was installed). VAE allows learning probabilistic encoders and decoders of data in an end-to-end fashion.
End of explanation
"""
|
harishkrao/Machine-Learning | Titanic - Machine Learning from Disaster - Understanding the data.ipynb | mit | sns.barplot(x='Pclass',y='Survived',data=train, hue='Sex')
"""
Explanation: The plot shows that the number of female survivors were significantly more than the male survivors. There were more survivors overall in first class than in any other class.
There were also less survivors overall in third class than in any other class.
Male survivors were twice in first class than in second or third class. Female survivors in first class were twice that of third class.
End of explanation
"""
sns.barplot(x='Sex',y='Survived',data=train, hue='Pclass')
"""
Explanation: The plot explains the above facts in a different representation.
End of explanation
"""
sns.swarmplot(x='Survived',y='Age',hue='Pclass',data=train)
"""
Explanation: The plot explains the distribution of survivors across age and class. More red on the lower part of the left swarm indicates that younger passengers in the third class had the least chance to survive.
More blue spots on the top part of the right swarm meant that elderly passengers from the first class had the best chance to survive.
Distribution of blue spots on the right swarm is uniform - indicating that, irrespective of age, the first class had better chances of survival.
End of explanation
"""
sns.swarmplot(x='Survived',y='Age',hue='Sex',data=train)
"""
Explanation: The plot shows that male passengers had the least chance of survival and female passengers had the best chance of survival.
End of explanation
"""
sns.swarmplot(x='Sex',y='Age',data=train)
"""
Explanation: Same data with a different representation.
End of explanation
"""
sns.pointplot(x='Pclass',y='Fare',data=train)
"""
Explanation: Plot showing distribution of fares among classes of travel. A first class ticket is about 4 times a second class ticket.
A third class ticket costs about 3/4 a second class ticket.
End of explanation
"""
sns.barplot(x='Embarked',y='Fare',data=train)
"""
Explanation: The plot shows differences in fares based on the point of embarkation.
Fares from Cherbourg were the highest, in fact costing about twice as the fares from Southampton and about three times as the fares from Queenstown.
Fares from Southampton costed twice that of Queenstown.
C = Cherbourg, Q = Queenstown, S = Southampton
End of explanation
"""
|
mne-tools/mne-tools.github.io | dev/_downloads/58e35821e0f211b843d5ead3e33d8849/20_sensors_time_frequency.ipynb | bsd-3-clause | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
# Richard Höchenberger <richard.hoechenberger@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet, psd_multitaper, psd_welch
from mne.datasets import somato
"""
Explanation: Frequency and time-frequency sensor analysis
The objective is to show you how to explore the spectral content
of your data (frequency and time-frequency). Here we'll work on Epochs.
We will use this dataset: somato-dataset. It contains so-called event
related synchronizations (ERS) / desynchronizations (ERD) in the beta band.
End of explanation
"""
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
# crop and resample just to reduce computation time
raw.crop(120, 360).load_data().resample(200)
events = mne.find_events(raw, stim_channel='STI 014')
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
# Construct Epochs
event_id, tmin, tmax = 1, -1., 3.
baseline = (None, 0)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
"""
Explanation: Set parameters
End of explanation
"""
epochs.plot_psd(fmin=2., fmax=40., average=True, spatial_colors=False)
"""
Explanation: Frequency analysis
We start by exploring the frequency content of our epochs.
Let's first check out all channel types by averaging across epochs.
End of explanation
"""
epochs.plot_psd_topomap(ch_type='grad', normalize=False)
"""
Explanation: Now, let's take a look at the spatial distributions of the PSD, averaged
across epochs and frequency bands.
End of explanation
"""
f, ax = plt.subplots()
psds, freqs = psd_multitaper(epochs, fmin=2, fmax=40, n_jobs=None)
psds = 10 * np.log10(psds) # convert to dB
psds_mean = psds.mean(0).mean(0)
psds_std = psds.mean(0).std(0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5)
ax.set(title='Multitaper PSD (gradiometers)', xlabel='Frequency (Hz)',
ylabel='Power Spectral Density (dB)')
plt.show()
"""
Explanation: Alternatively, you can also create PSDs from ~mne.Epochs with functions
that start with psd_ such as
:func:mne.time_frequency.psd_multitaper and
:func:mne.time_frequency.psd_welch.
<div class="alert alert-info"><h4>Note</h4><p>In contrast to the methods for visualization, those ``psd_*`` functions do
**not** scale the data from SI units to more "convenient" values. So when
e.g. calculating the PSD of gradiometers via
:func:`~mne.time_frequency.psd_multitaper`, you will get the power as
``(T/m)²/Hz`` (instead of ``(fT/cm)²/Hz`` via
:meth:`~mne.Epochs.plot_psd`).</p></div>
End of explanation
"""
# Estimate PSDs based on "mean" and "median" averaging for comparison.
kwargs = dict(fmin=2, fmax=40, n_jobs=None)
psds_welch_mean, freqs_mean = psd_welch(epochs, average='mean', **kwargs)
psds_welch_median, freqs_median = psd_welch(epochs, average='median', **kwargs)
# Convert power to dB scale.
psds_welch_mean = 10 * np.log10(psds_welch_mean)
psds_welch_median = 10 * np.log10(psds_welch_median)
# We will only plot the PSD for a single sensor in the first epoch.
ch_name = 'MEG 0122'
ch_idx = epochs.info['ch_names'].index(ch_name)
epo_idx = 0
_, ax = plt.subplots()
ax.plot(freqs_mean, psds_welch_mean[epo_idx, ch_idx, :], color='k',
ls='-', label='mean of segments')
ax.plot(freqs_median, psds_welch_median[epo_idx, ch_idx, :], color='k',
ls='--', label='median of segments')
ax.set(title='Welch PSD ({}, Epoch {})'.format(ch_name, epo_idx),
xlabel='Frequency (Hz)', ylabel='Power Spectral Density (dB)')
ax.legend(loc='upper right')
plt.show()
"""
Explanation: Notably, :func:mne.time_frequency.psd_welch supports the keyword argument
average, which specifies how to estimate the PSD based on the individual
windowed segments. The default is average='mean', which simply calculates
the arithmetic mean across segments. Specifying average='median', in
contrast, returns the PSD based on the median of the segments (corrected for
bias relative to the mean), which is a more robust measure.
End of explanation
"""
psds_welch_unagg, freqs_unagg = psd_welch(epochs, average=None, **kwargs)
print(psds_welch_unagg.shape)
"""
Explanation: Lastly, we can also retrieve the unaggregated segments by passing
average=None to :func:mne.time_frequency.psd_welch. The dimensions of
the returned array are (n_epochs, n_sensors, n_freqs, n_segments).
End of explanation
"""
freqs = np.logspace(*np.log10([6, 35]), num=8)
n_cycles = freqs / 2. # different number of cycle per frequency
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, decim=3, n_jobs=None)
"""
Explanation: Time-frequency analysis: power and inter-trial coherence
We now compute time-frequency representations (TFRs) from our Epochs.
We'll look at power and inter-trial coherence (ITC).
To this we'll use the function :func:mne.time_frequency.tfr_morlet
but you can also use :func:mne.time_frequency.tfr_multitaper
or :func:mne.time_frequency.tfr_stockwell.
<div class="alert alert-info"><h4>Note</h4><p>The ``decim`` parameter reduces the sampling rate of the time-frequency
decomposition by the defined factor. This is usually done to reduce
memory usage. For more information refer to the documentation of
:func:`mne.time_frequency.tfr_morlet`.</p></div>
define frequencies of interest (log-spaced)
End of explanation
"""
power.plot_topo(baseline=(-0.5, 0), mode='logratio', title='Average power')
power.plot([82], baseline=(-0.5, 0), mode='logratio', title=power.ch_names[82])
fig, axis = plt.subplots(1, 2, figsize=(7, 4))
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=8, fmax=12,
baseline=(-0.5, 0), mode='logratio', axes=axis[0],
title='Alpha', show=False)
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=13, fmax=25,
baseline=(-0.5, 0), mode='logratio', axes=axis[1],
title='Beta', show=False)
mne.viz.tight_layout()
plt.show()
"""
Explanation: Inspect power
<div class="alert alert-info"><h4>Note</h4><p>The generated figures are interactive. In the topo you can click
on an image to visualize the data for one sensor.
You can also select a portion in the time-frequency plane to
obtain a topomap for a certain time-frequency region.</p></div>
End of explanation
"""
power.plot_joint(baseline=(-0.5, 0), mode='mean', tmin=-.5, tmax=2,
timefreqs=[(.5, 10), (1.3, 8)])
"""
Explanation: Joint Plot
You can also create a joint plot showing both the aggregated TFR
across channels and topomaps at specific times and frequencies to obtain
a quick overview regarding oscillatory effects across time and space.
End of explanation
"""
itc.plot_topo(title='Inter-Trial coherence', vmin=0., vmax=1., cmap='Reds')
"""
Explanation: Inspect ITC
End of explanation
"""
|
AtmaMani/pyChakras | udemy_ml_bootcamp/Machine Learning Sections/K-Nearest-Neighbors/K Nearest Neighbors with Python.ipynb | mit | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
"""
Explanation: <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
K Nearest Neighbors with Python
You've been given a classified data set from a company! They've hidden the feature column names but have given you the data and the target classes.
We'll try to use KNN to create a model that directly predicts a class for a new data point based off of the features.
Let's grab it and use it!
Import Libraries
End of explanation
"""
df = pd.read_csv("Classified Data",index_col=0)
df.head()
"""
Explanation: Get the Data
Set index_col=0 to use the first column as the index.
End of explanation
"""
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(df.drop('TARGET CLASS',axis=1))
scaled_features = scaler.transform(df.drop('TARGET CLASS',axis=1))
df_feat = pd.DataFrame(scaled_features,columns=df.columns[:-1])
df_feat.head()
"""
Explanation: Standardize the Variables
Because the KNN classifier predicts the class of a given test observation by identifying the observations that are nearest to it, the scale of the variables matters. Any variables that are on a large scale will have a much larger effect on the distance between the observations, and hence on the KNN classifier, than variables that are on a small scale.
End of explanation
"""
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(scaled_features,df['TARGET CLASS'],
test_size=0.30)
"""
Explanation: Train Test Split
End of explanation
"""
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train,y_train)
pred = knn.predict(X_test)
"""
Explanation: Using KNN
Remember that we are trying to come up with a model to predict whether someone will TARGET CLASS or not. We'll start with k=1.
End of explanation
"""
from sklearn.metrics import classification_report,confusion_matrix
print(confusion_matrix(y_test,pred))
print(classification_report(y_test,pred))
"""
Explanation: Predictions and Evaluations
Let's evaluate our KNN model!
End of explanation
"""
error_rate = []
# Will take some time
for i in range(1,40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train,y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
plt.figure(figsize=(10,6))
plt.plot(range(1,40),error_rate,color='blue', linestyle='dashed', marker='o',
markerfacecolor='red', markersize=10)
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
"""
Explanation: Choosing a K Value
Let's go ahead and use the elbow method to pick a good K Value:
End of explanation
"""
# FIRST A QUICK COMPARISON TO OUR ORIGINAL K=1
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train,y_train)
pred = knn.predict(X_test)
print('WITH K=1')
print('\n')
print(confusion_matrix(y_test,pred))
print('\n')
print(classification_report(y_test,pred))
# NOW WITH K=23
knn = KNeighborsClassifier(n_neighbors=23)
knn.fit(X_train,y_train)
pred = knn.predict(X_test)
print('WITH K=23')
print('\n')
print(confusion_matrix(y_test,pred))
print('\n')
print(classification_report(y_test,pred))
"""
Explanation: Here we can see that that after arouns K>23 the error rate just tends to hover around 0.06-0.05 Let's retrain the model with that and check the classification report!
End of explanation
"""
|
TakayukiSakai/tensorflow | tensorflow/examples/udacity/1_notmnist.ipynb | apache-2.0 | # These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import SGDClassifier
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
from sklearn.metrics import accuracy_score
# Config the matlotlib backend as plotting inline in IPython
%matplotlib inline
"""
Explanation: Deep Learning
Assignment 1
The objective of this assignment is to learn about simple data curation practices, and familiarize you with some of the data we'll be reusing later.
This notebook uses the notMNIST dataset to be used with python experiments. This dataset is designed to look like the classic MNIST dataset, while looking a little more like real data: it's a harder task, and the data is a lot less 'clean' than MNIST.
End of explanation
"""
url = 'http://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 1% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
if force or not os.path.exists(filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
"""
Explanation: First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labelled examples. Given these sizes, it should be possible to train models quickly on any machine.
End of explanation
"""
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall()
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
"""
Explanation: Extract the dataset from the compressed .tar.gz file.
This should give you a set of directories, labelled A through J.
End of explanation
"""
for str in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']:
root = 'notMNIST_small'
path = os.listdir('{}/{}'.format(root, str))[0]
display(Image('{}/{}/{}'.format(root, str, path)))
"""
Explanation: Problem 1
Let's take a peek at some of the data to make sure it looks sensible. Each exemplar should be an image of a character A through J rendered in a different font. Display a sample of the images that we just downloaded. Hint: you can use the package IPython.display.
End of explanation
"""
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
"""
Explanation: Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size.
We'll convert the entire dataset into a 3D array (image index, x, y) of floating point values, normalized to have approximately zero mean and standard deviation ~0.5 to make training easier down the road.
A few images might not be readable, we'll just skip them.
End of explanation
"""
with open(test_datasets[0], 'rb') as f:
a = pickle.load(f)
plt.imshow(a[0])
"""
Explanation: Problem 2
Let's verify that the data still looks good. Displaying a sample of the labels and images from the ndarray. Hint: you can use matplotlib.pyplot.
End of explanation
"""
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
"""
Explanation: Problem 3
Another check: we expect the data to be balanced across classes. Verify that.
Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune train_size as needed. The labels will be stored into a separate array of integers 0 through 9.
Also create a validation dataset for hyperparameter tuning.
End of explanation
"""
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
"""
Explanation: Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.
End of explanation
"""
pickle_file = 'notMNIST.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
"""
Explanation: Problem 4
Convince yourself that the data is still good after shuffling!
Finally, let's save the data for later reuse:
End of explanation
"""
with open(pickle_file, 'rb') as f:
data = pickle.load(f)
train_dataset = data['train_dataset']
valid_dataset = data['valid_dataset']
test_dataset = data['test_dataset']
train_labels = data['train_labels']
valid_labels = data['valid_labels']
test_labels = data['test_labels']
train_0 = train_dataset[0]
train_1 = train_dataset[1]
diff = train_1 - train_0
plt.imshow(train_0)
plt.imshow(train_1)
plt.imshow(diff)
np.mean(np.abs(diff))
np.array_equal(train_0, train_1)
import hashlib
class MyImage:
def __init__(self, image):
self.image = image
def __eq__(self, other):
return np.array_equal(self.image, other.image)
def __hash__(self):
return hash(hashlib.sha1(self.image).hexdigest())
train_myimages = set(list(map(lambda x: MyImage(x), train_dataset)))
valid_myimages = set(list(map(lambda x: MyImage(x), valid_dataset)))
valid_myimages_no_overlap = valid_myimages - train_myimages
print(len(valid_myimages))
print(len(valid_myimages_no_overlap))
valid_dataset_no_overlap = np.array(list(map(lambda x: x.image, valid_myimages_no_overlap)))
valid_dataset_no_overlap.shape
"""
Explanation: Problem 5
By construction, this dataset might contain a lot of overlapping samples, including training data that's also contained in the validation and test set! Overlap between training and test can skew the results if you expect to use your model in an environment where there is never an overlap, but are actually ok if you expect to see training samples recur when you use it.
Measure how much overlap there is between training, validation and test samples.
Optional questions:
- What about near duplicates between datasets? (images that are almost identical)
- Create a sanitized validation and test set, and compare your accuracy on those in subsequent assignments.
End of explanation
"""
train_labels
def reshape_dataset(dataset):
shape = dataset.shape
return dataset.reshape(shape[0], shape[1] * shape[2])
reshape_dataset(valid_dataset).shape
model = SGDClassifier(loss='log', alpha=0.01, penalty='l2')
model.fit(reshape_dataset(train_dataset), train_labels)
pred = model.predict(reshape_dataset(valid_dataset))
accuracy_score(pred, valid_labels)
pred = model.predict(reshape_dataset(test_dataset))
accuracy_score(pred, test_labels)
"""
Explanation: Problem 6
Let's get an idea of what an off-the-shelf classifier can give you on this data. It's always good to check that there is something to learn, and that it's a problem that is not so trivial that a canned solution solves it.
Train a simple model on this data using 50, 100, 1000 and 5000 training samples. Hint: you can use the LogisticRegression model from sklearn.linear_model.
Optional question: train an off-the-shelf model on all the data!
End of explanation
"""
|
RainFool/Udacity_Anwser_RainFool | Project0/titanic_survival_exploration.ipynb | mit | # 检查你的Python版本
from sys import version_info
if version_info.major != 2 and version_info.minor != 7:
raise Exception('请使用Python 2.7来完成此项目')
import numpy as np
import pandas as pd
# 数据可视化代码
from titanic_visualizations import survival_stats
from IPython.display import display
%matplotlib inline
# 加载数据集
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# 显示数据列表中的前几项乘客数据
display(full_data.head())
"""
Explanation: 机器学习工程师纳米学位
机器学习基础
项目 0: 预测泰坦尼克号乘客生还率
1912年,泰坦尼克号在第一次航行中就与冰山相撞沉没,导致了大部分乘客和船员身亡。在这个入门项目中,我们将探索部分泰坦尼克号旅客名单,来确定哪些特征可以最好地预测一个人是否会生还。为了完成这个项目,你将需要实现几个基于条件的预测并回答下面的问题。我们将根据代码的完成度和对问题的解答来对你提交的项目的进行评估。
提示:这样的文字将会指导你如何使用 iPython Notebook 来完成项目。
点击这里查看本文件的英文版本。
了解数据
当我们开始处理泰坦尼克号乘客数据时,会先导入我们需要的功能模块以及将数据加载到 pandas DataFrame。运行下面区域中的代码加载数据,并使用 .head() 函数显示前几项乘客数据。
提示:你可以通过单击代码区域,然后使用键盘快捷键 Shift+Enter 或 Shift+ Return 来运行代码。或者在选择代码后使用播放(run cell)按钮执行代码。像这样的 MarkDown 文本可以通过双击编辑,并使用这些相同的快捷键保存。Markdown 允许你编写易读的纯文本并且可以转换为 HTML。
End of explanation
"""
# 从数据集中移除 'Survived' 这个特征,并将它存储在一个新的变量中。
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
# 显示已移除 'Survived' 特征的数据集
display(data.head())
"""
Explanation: 从泰坦尼克号的数据样本中,我们可以看到船上每位旅客的特征
Survived:是否存活(0代表否,1代表是)
Pclass:社会阶级(1代表上层阶级,2代表中层阶级,3代表底层阶级)
Name:船上乘客的名字
Sex:船上乘客的性别
Age:船上乘客的年龄(可能存在 NaN)
SibSp:乘客在船上的兄弟姐妹和配偶的数量
Parch:乘客在船上的父母以及小孩的数量
Ticket:乘客船票的编号
Fare:乘客为船票支付的费用
Cabin:乘客所在船舱的编号(可能存在 NaN)
Embarked:乘客上船的港口(C 代表从 Cherbourg 登船,Q 代表从 Queenstown 登船,S 代表从 Southampton 登船)
因为我们感兴趣的是每个乘客或船员是否在事故中活了下来。可以将 Survived 这一特征从这个数据集移除,并且用一个单独的变量 outcomes 来存储。它也做为我们要预测的目标。
运行该代码,从数据集中移除 Survived 这个特征,并将它存储在变量 outcomes 中。
End of explanation
"""
def accuracy_score(truth, pred):
""" 返回 pred 相对于 truth 的准确率 """
# 确保预测的数量与结果的数量一致
if len(truth) == len(pred):
# 计算预测准确率(百分比)
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
# 测试 'accuracy_score' 函数
predictions = pd.Series(np.ones(5, dtype = int)) #五个预测全部为1,既存活
print accuracy_score(outcomes[:5], predictions)
"""
Explanation: 这个例子展示了如何将泰坦尼克号的 Survived 数据从 DataFrame 移除。注意到 data(乘客数据)和 outcomes (是否存活)现在已经匹配好。这意味着对于任何乘客的 data.loc[i] 都有对应的存活的结果 outcome[i]。
计算准确率
为了验证我们预测的结果,我们需要一个标准来给我们的预测打分。因为我们最感兴趣的是我们预测的准确率,既正确预测乘客存活的比例。运行下面的代码来创建我们的 accuracy_score 函数以对前五名乘客的预测来做测试。
思考题:在前五个乘客中,如果我们预测他们全部都存活,你觉得我们预测的准确率是多少?
End of explanation
"""
def predictions_0(data):
""" 不考虑任何特征,预测所有人都无法生还 """
predictions = []
for _, passenger in data.iterrows():
# 预测 'passenger' 的生还率
predictions.append(0)
# 返回预测结果
return pd.Series(predictions)
# 进行预测
predictions = predictions_0(data)
"""
Explanation: 提示:如果你保存 iPython Notebook,代码运行的输出也将被保存。但是,一旦你重新打开项目,你的工作区将会被重置。请确保每次都从上次离开的地方运行代码来重新生成变量和函数。
最简单的预测
如果我们要预测泰坦尼克号上的乘客是否存活,但是我们又对他们一无所知,那么最好的预测就是船上的人无一幸免。这是因为,我们可以假定当船沉没的时候大多数乘客都遇难了。下面的 predictions_0 函数就预测船上的乘客全部遇难。
End of explanation
"""
print accuracy_score(outcomes, predictions)
"""
Explanation: 问题1:对比真实的泰坦尼克号的数据,如果我们做一个所有乘客都没有存活的预测,这个预测的准确率能达到多少?
回答: 61.62%
提示:运行下面的代码来查看预测的准确率。
End of explanation
"""
survival_stats(data, outcomes, 'Sex')
"""
Explanation: 考虑一个特征进行预测
我们可以使用 survival_stats 函数来看看 Sex 这一特征对乘客的存活率有多大影响。这个函数定义在名为 titanic_visualizations.py 的 Python 脚本文件中,我们的项目提供了这个文件。传递给函数的前两个参数分别是泰坦尼克号的乘客数据和乘客的 生还结果。第三个参数表明我们会依据哪个特征来绘制图形。
运行下面的代码绘制出依据乘客性别计算存活率的柱形图。
End of explanation
"""
def predictions_1(data):
""" 只考虑一个特征,如果是女性则生还 """
predictions = []
for _, passenger in data.iterrows():
if (passenger['Sex'] == 'male'):
predictions.append(0)
else:
predictions.append(1)
# 返回预测结果
return pd.Series(predictions)
# 进行预测
predictions = predictions_1(data)
**问题2**:当我们预测船上女性乘客全部存活,而剩下的人全部遇难,那么我们预测的准确率会达到多少?
**回答**: *78.68%*
**提示**:你需要在下面添加一个代码区域,实现代码并运行来计算准确率。
print accuracy_score(outcomes, predictions)
"""
Explanation: 观察泰坦尼克号上乘客存活的数据统计,我们可以发现大部分男性乘客在船沉没的时候都遇难了。相反的,大部分女性乘客都在事故中生还。让我们以此改进先前的预测:如果乘客是男性,那么我们就预测他们遇难;如果乘客是女性,那么我们预测他们在事故中活了下来。
将下面的代码补充完整,让函数可以进行正确预测。
提示:您可以用访问 dictionary(字典)的方法来访问船上乘客的每个特征对应的值。例如, passenger['Sex'] 返回乘客的性别。
End of explanation
"""
survival_stats(data, outcomes, 'Age', ["Sex == 'male'"])
"""
Explanation: 考虑两个特征进行预测
仅仅使用乘客性别(Sex)这一特征,我们预测的准确性就有了明显的提高。现在再看一下使用额外的特征能否更进一步提升我们的预测准确度。例如,综合考虑所有在泰坦尼克号上的男性乘客:我们是否找到这些乘客中的一个子集,他们的存活概率较高。让我们再次使用 survival_stats 函数来看看每位男性乘客的年龄(Age)。这一次,我们将使用第四个参数来限定柱形图中只有男性乘客。
运行下面这段代码,把男性基于年龄的生存结果绘制出来。
End of explanation
"""
def predictions_2(data):
""" 考虑两个特征:
- 如果是女性则生还
- 如果是男性并且小于10岁则生还 """
predictions = []
for _, passenger in data.iterrows():
if (passenger['Sex'] == 'female'):
predictions.append(1)
elif (passenger['Age'] < 10):
predictions.append(1)
else :
predictions.append(0)
# 返回预测结果
return pd.Series(predictions)
# 进行预测
predictions = predictions_2(data)
**问题3**:当预测所有女性以及小于10岁的男性都存活的时候,预测的准确率会达到多少?
**回答**: *79.35%*
**提示**:你需要在下面添加一个代码区域,实现代码并运行来计算准确率。
print accuracy_score(outcomes, predictions)
"""
Explanation: 仔细观察泰坦尼克号存活的数据统计,在船沉没的时候,大部分小于10岁的男孩都活着,而大多数10岁以上的男性都随着船的沉没而遇难。让我们继续在先前预测的基础上构建:如果乘客是女性,那么我们就预测她们全部存活;如果乘客是男性并且小于10岁,我们也会预测他们全部存活;所有其它我们就预测他们都没有幸存。
将下面缺失的代码补充完整,让我们的函数可以实现预测。
提示: 您可以用之前 predictions_1 的代码作为开始来修改代码,实现新的预测函数。
End of explanation
"""
survival_stats(data, outcomes, 'Age', ["Sex == 'female'","Pclass > 2"])
"""
Explanation: 你自己的预测模型
添加年龄(Age)特征与性别(Sex)的结合比单独使用性别(Sex)也提高了不少准确度。现在该你来做预测了:找到一系列的特征和条件来对数据进行划分,使得预测结果提高到80%以上。这可能需要多个特性和多个层次的条件语句才会成功。你可以在不同的条件下多次使用相同的特征。Pclass,Sex,Age,SibSp 和 Parch 是建议尝试使用的特征。
使用 survival_stats 函数来观测泰坦尼克号上乘客存活的数据统计。
提示: 要使用多个过滤条件,把每一个条件放在一个列表里作为最后一个参数传递进去。例如: ["Sex == 'male'", "Age < 18"]
End of explanation
"""
def predictions_3(data):
""" 考虑多个特征,准确率至少达到80% """
predictions = []
for _, passenger in data.iterrows():
if (passenger['Sex'] == 'female'):
if (passenger['Pclass'] > 2 and passenger['Age'] > 40 and passenger['Age'] < 50):
predictions.append(0)
else :
predictions.append(1)
else:
if (passenger['Age'] < 10):
predictions.append(1)
elif (passenger['Pclass'] < 2):
if (passenger['Age'] < 40 and passenger['Age'] > 30):
predictions.append(1)
elif (passenger['Parch'] == 0 and passenger['Age'] < 30 and passenger['Age'] > 20):
predictions.append(1)
else:
predictions.append(0)
else :
predictions.append(0)
# 返回预测结果
return pd.Series(predictions)
# 进行预测
predictions = predictions_3(data)
"""
Explanation: 当查看和研究了图形化的泰坦尼克号上乘客的数据统计后,请补全下面这段代码中缺失的部分,使得函数可以返回你的预测。
在到达最终的预测模型前请确保记录你尝试过的各种特征和条件。
提示: 您可以用之前 predictions_2 的代码作为开始来修改代码,实现新的预测函数。
End of explanation
"""
print accuracy_score(outcomes, predictions)
"""
Explanation: 问题4:请描述你实现80%准确度的预测模型所经历的步骤。您观察过哪些特征?某些特性是否比其他特征更有帮助?你用了什么条件来预测生还结果?你最终的预测的准确率是多少?
回答:
1. 开始时,着重优化男性的部分数据,经过一系列尝试,发现阶层的影响很大,直接很多年龄段出现了100%的概率覆盖,所以在男女中都测试了对Pclass字段的优化。
2. 观察过题目中推荐的特征。
3. 发现Pclass阶层越高的人生存几率越高,孩子1~2个的人生存几率比较高,女性比男性高的多,幼儿比青少年高的多
4. 先使用性别区分,然后是阶层,再加上年龄段做最后的返回划定,以此来预测
5. 最终准确率80.70%
提示:你需要在下面添加一个代码区域,实现代码并运行来计算准确率。
End of explanation
"""
|
DhashS/Olin-Complexity-Final-Project | reports/01_exact_algorithms.ipynb | gpl-3.0 | # %load -s brute_force algs.py
def brute_force(p, perf=False):
import itertools as it
#Generate all possible tours (complete graph)
tours = list(it.permutations(p.nodes())) #O(V!)
costs = []
if not perf:
cost_data = pd.DataFrame(columns=["$N$", "cost"])
#Evaluate all tours
for tour in tours:
cost = 0
for n1, n2 in zip(tour, tour[1:]): #O(V)
cost += p[n1][n2]['weight']
costs.append(cost)
if not perf:
cost_data = cost_data.append({"$N$" : len(p.nodes()),
"cost" : min(costs),
"opt_tour" : tours[np.argmin(costs)]},
ignore_index = True)
return (cost_data, pd.DataFrame())
#Choose tour with lowest cost
return tours[np.argmin(costs)]
"""
Explanation: Exact algorithims
Description
Solving the travelling salesman problem to resolve the lowest weight tour exactly is computationally tractable for graphs with a low node count. Often, getting the actual perfect solution is not required, but in the case it is, two algorithms exist.
Brute-force
The brute-force search is the simplest algorithim. Consider all permutations of the nodes of the graph, which is the same as all possible tours of a graph (if it is complete). Compute the cost of all of them, and choose the one with the minimum cost. Clearly since we're evaluating all permutations of a graph's nodes, it's complexity is $O(n!)$.
A python implementation of a brute force search is below
End of explanation
"""
from algs import brute_force_N, brute_force
from parsers import TSP
from graphgen import EUC_2D
from parstats import get_stats, dist_across_cost, scatter_vis
from itertools import permutations
tsp_prob = TSP('../data/a280.tsp')
tsp_prob.graph = EUC_2D(6)
tsp_prob.spec = dict(comment="Random euclidean graph",
dimension=11,
edge_weight_type="EUC_2D",
name="Random cities")
%%bash
./cluster.sh 8
@get_stats(name="Brute force, monotonic reduction",
data=tsp_prob,
plots=[scatter_vis])
def vis_brute(*args, **kwargs):
return brute_force_N(*args, **kwargs)
vis_brute(range(2, len(list(permutations(tsp_prob.graph.nodes())))));
"""
Explanation: If we visualize how the algorithm progresses, we can pre-emptiveley stop execution of the tour evaluation. Since the order of the permutations is deterministic, we can observe that the cost monotonically decreases.
This monotonic decrease is a result of the min function we call on costs. In actuality, since we're evaluating all tours, and only storing the smallest one (a reduce), we make no assumptions about the structure of the graph. One can see that all edge evaluations are seperate from one another, so our final evaluation is equally likeley to be the lowest-weight tour as the last
Let's set up our visualization, creating a random euclidean 2D graph, and seeing how it performs as we vary $N$, the tour at which it stops evaluating. If we choose the size of the graph to be 8, solving it exactly is feasable. Any larger, and this notebook becomes computationally intractable.
End of explanation
"""
# %load -s brute_force_N_no_reduce algs.py
def brute_force_N_no_reduce(p, n, perf=False):
import itertools as it
#Generate all possible tours (complete graph)
tours = list(it.permutations(p.nodes())) #O(V!)
costs = []
if not perf:
cost_data = pd.DataFrame(columns=["$N$", "cost", "opt_cost"])
#Evaluate all tours
for tour in tours[:n]:
cost = 0
for n1, n2 in zip(tour, tour[1:]): #O(V)
cost += p[n1][n2]['weight']
costs.append(cost)
if not perf:
cost_data = cost_data.append({"$N$" : n,
"cost" : costs[-1],
"opt_cost" : min(costs)},
ignore_index = True)
return (cost_data, pd.DataFrame())
#Choose tour with lowest cost
return tours[np.argmin(costs)]
@get_stats(name="Brute force, no reduce",
data=tsp_prob,
plots=[scatter_vis, dist_across_cost])
def vis_brute_no_reduce(*args, **kwargs):
return brute_force_N_no_reduce(*args, **kwargs)
cost_stats, _ = vis_brute_no_reduce(range(2, len(list(permutations(tsp_prob.graph.nodes())))))
"""
Explanation: If we tweak the code slightly, we can see what it's doing without a reduce step:
End of explanation
"""
from scipy.stats import pearsonr
pearsonr(cost_stats.cost, cost_stats.opt_cost)
pearsonr(cost_stats["$N$"], cost_stats.cost)
"""
Explanation: Given this is a randomly distributed dataset, it makes sense that the distribution across costs looks like a gaussian. Let's confirm by checking how correlated they are
End of explanation
"""
|
radhikapc/foundation-homework | homework_sql/Homework_6-Radhika.ipynb | mit | import requests
data = requests.get('http://localhost:5000/lakes').json()
print(len(data), "lakes")
for item in data[:10]:
print(item['name'], "- elevation:", item['elevation'], "m / area:", item['area'], "km^2 / type:", item['type'])
"""
Explanation: Homework 6: Web Applications
For this homework, you're going to write a web API for the lake data in the MONDIAL database. (Make sure you've imported the data as originally outlined in our week 1 tutorial.)
The API should perform the following tasks:
A request to /lakes should return a JSON list of dictionaries, with the information from the name, elevation, area and type fields from the lake table in MONDIAL.
The API should recognize the query string parameter sort. When left blank or set to name, the results should be sorted by the name of the lake (in alphabetical order). When set to area or elevation, the results should be sorted by the requested field, in descending order.
The API should recognize the query string parameter type. When specified, the results should only include rows that have the specified value in the type field.
You should be able to use both the sort and type parameters in any request.
This notebook contains only test requests to your API. Write the API as a standalone Python program, start the program and then run the code in the cells below to ensure that your API produces the expected output. When you're done, paste the source code in the final cell (so we can check your work, if needed).
Hints when writing your API code:
You'll need to construct the SQL query as a string, piece by piece. This will likely involve a somewhat messy tangle of if statements. Lean into the messy tangle.
Make sure to use parameter placeholders (%s) in the query.
If you're getting SQL errors, print out your SQL statement in the request handler function so you can debug it. (When you use print() in Flask, the results will display in your terminal window.)
When in doubt, return to the test code. Examine it carefully and make sure you know exactly what it's trying to do.
Problem set #1: A list of lakes
Your API should return a JSON list of dictionaries (objects). Use the code below to determine what the keys of the dictionaries should be. (For brevity, this example only prints out the first ten records, but of course your API should return all of them.)
Expected output:
143 lakes
Ammersee - elevation: 533 m / area: 46 km^2 / type: None
Arresoe - elevation: None m / area: 40 km^2 / type: None
Atlin Lake - elevation: 668 m / area: 798 km^2 / type: None
Balaton - elevation: 104 m / area: 594 km^2 / type: None
Barrage de Mbakaou - elevation: None m / area: None km^2 / type: dam
Bodensee - elevation: 395 m / area: 538 km^2 / type: None
Brienzersee - elevation: 564 m / area: 29 km^2 / type: None
Caspian Sea - elevation: -28 m / area: 386400 km^2 / type: salt
Chad Lake - elevation: 250 m / area: 23000 km^2 / type: salt
Chew Bahir - elevation: 520 m / area: 800 km^2 / type: salt
End of explanation
"""
import requests
data = requests.get('http://localhost:5000/lakes?type=salt').json()
avg_area = sum([x['area'] for x in data if x['area'] is not None]) / len(data)
avg_elev = sum([x['elevation'] for x in data if x['elevation'] is not None]) / len(data)
print("average area:", int(avg_area))
print("average elevation:", int(avg_elev))
"""
Explanation: Problem set #2: Lakes of a certain type
The following code fetches all lakes of type salt and finds their average area and elevation.
Expected output:
average area: 18880
average elevation: 970
End of explanation
"""
import requests
data = requests.get('http://localhost:5000/lakes?sort=elevation').json()
for item in [x['name'] for x in data if x['elevation'] is not None][:15]:
print("*", item)
"""
Explanation: Problem set #3: Lakes in order
The following code fetches lakes in reverse order by their elevation and prints out the name of the first fifteen, excluding lakes with an empty elevation field.
Expected output:
* Licancabur Crater Lake
* Nam Co
* Lago Junin
* Lake Titicaca
* Poopo
* Salar de Uyuni
* Koli Sarez
* Lake Irazu
* Qinghai Lake
* Segara Anak
* Lake Tahoe
* Crater Lake
* Lake Tana
* Lake Van
* Issyk-Kul
End of explanation
"""
import requests
data = requests.get('http://localhost:5000/lakes?sort=area&type=caldera').json()
for item in data:
print("*", item['name'])
"""
Explanation: Problem set #4: Order and type
The following code prints the names of the largest caldera lakes, ordered in reverse order by area.
Expected output:
* Lake Nyos
* Lake Toba
* Lago Trasimeno
* Lago di Bolsena
* Lago di Bracciano
* Crater Lake
* Segara Anak
* Laacher Maar
End of explanation
"""
import requests
data = requests.get('http://localhost:5000/lakes', params={'type': "' OR true; --"}).json()
data
"""
Explanation: Problem set #5: Error handling
Your API should work fine even when faced with potential error-causing inputs. For example, the expected output for this statement is an empty list ([]), not every row in the table.
End of explanation
"""
import requests
data = requests.get('http://localhost:5000/lakes', params={'sort': "florb"}).json()
[x['name'] for x in data[:5]]
"""
Explanation: Specifying a field other than name, area or elevation for the sort parameter should fail silently, defaulting to sorting alphabetically. Expected output: ['Ammersee', 'Arresoe', 'Atlin Lake', 'Balaton', 'Barrage de Mbakaou']
End of explanation
"""
conn.rollback()
from flask import Flask, request, jsonify
from decimal import Decimal
import pg8000
app = Flask(__name__)
conn = pg8000.connect(user="postgres",password="password", database="mondial")
@app.route("/lakes")
def get_lakes():
sorting = request.args.get('sort', 'name')
get_type = request.args.get('type', 0)
cursor = conn.cursor()
sort_by = "ORDER BY name"
if sorting=='area':
sort_by = "ORDER BY area DESC"
elif sorting=='elevation':
sort_by = "ORDER BY elevation DESC"
elif sorting=='name':
sort_by = "ORDER BY name DESC"
if get_type:
cursor.execute("SELECT name, area, elevation, type FROM lake WHERE type=%s " + sort_by, [get_type])
else:
cursor.execute("SELECT name, area, elevation, type FROM lake " + sort_by)
def decimal_to_int(x):
if isinstance(x, decimal.Decimal):
return int(x)
else:
return None
output = []
for item in cursor.fetchall():
try:
dictionary = {'name':item[0],'area': int(item[1]),'elevation':float(item[2]), 'type':item[3]}
print(dictionary)
output.append(
dictionary
)
except:
pass
return jsonify(output)
app.run(port=5004)
"""
Explanation: Paste your code
Please paste the code for your entire Flask application in the cell below, in case we want to take a look when grading or debugging your assignment.
End of explanation
"""
conn.rollback()
"""
Explanation: The API should recognize the query string parameter type. When specified, the results should only include rows that have the specified value in the type field.
End of explanation
"""
|
turbomanage/training-data-analyst | quests/endtoendml/labs/5_train_keras.ipynb | apache-2.0 | # change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.0' # not used in this notebook
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/preproc; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
"""
Explanation: <h1>Training Keras model on Cloud AI Platform</h1>
<h2>Learning Objectives</h2>
<ol>
<li>Create model arguments for hyperparameter tuning</li>
<li>Create the model and specify checkpoints during training</li>
<li>Train the keras model using model.fit</li>
</ol>
Note: This notebook requires TensorFlow 2.0 as we are creating a model using Keras.
TODO: Complete the lab notebook #TODO sections. You can refer to the solutions/ notebook for reference.
This notebook illustrates distributed training and hyperparameter tuning on Cloud AI Platform (formerly known as Cloud ML Engine). This uses Keras and requires TensorFlow 2.0
End of explanation
"""
!mkdir -p babyweight_tf2/trainer
%%writefile babyweight_tf2/trainer/task.py
import argparse
import json
import os
from . import model
import tensorflow as tf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODO 1: add the new arguments here
parser.add_argument(
'--train_examples',
# TODO specify 5000 training examples as default
)
parser.add_argument(
'--pattern',
# TODO
)
parser.add_argument(
'--eval_steps',
# specify eval steps, default is none
)
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_EXAMPLES = arguments.pop('train_examples') * 1000
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train on {} examples using batch_size={}".format(model.TRAIN_EXAMPLES, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
"""
Explanation: Now that we have the Keras wide-and-deep code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud AI Platform.
<p>
<h2> Train on Cloud AI Platform</h2>
<p>
Training on Cloud AI Platform requires:
<ol>
<li> Making the code a Python package
<li> Using gcloud to submit the training code to Cloud AI Platform
</ol>
Ensure that the AI Platform API is enabled by going to this [link](https://console.developers.google.com/apis/library/ml.googleapis.com).
## Lab Task 1
The following code edits babyweight_tf2/trainer/task.py.
End of explanation
"""
%%writefile babyweight_tf2/trainer/model.py
import shutil, os, datetime
import numpy as np
import tensorflow as tf
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_EXAMPLES = 1000 * 1000
EVAL_STEPS = None
NUM_EVALS = 10
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
def features_and_labels(row_data):
for unwanted_col in ['key']:
row_data.pop(unwanted_col)
label = row_data.pop(LABEL_COLUMN)
return row_data, label # features, label
# load the training data
def load_dataset(pattern, batch_size=1, mode=tf.estimator.ModeKeys.EVAL):
dataset = (tf.data.experimental.make_csv_dataset(pattern, batch_size, CSV_COLUMNS, DEFAULTS)
.map(features_and_labels) # features, label
)
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.shuffle(1000).repeat()
dataset = dataset.prefetch(1) # take advantage of multi-threading; 1=AUTOTUNE
return dataset
## Build a Keras wide-and-deep model using its Functional API
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
# Helper function to handle categorical columns
def categorical_fc(name, values):
orig = tf.feature_column.categorical_column_with_vocabulary_list(name, values)
wrapped = tf.feature_column.indicator_column(orig)
return orig, wrapped
def build_wd_model(dnn_hidden_units = [64, 32], nembeds = 3):
# input layer
deep_inputs = {
colname : tf.keras.layers.Input(name=colname, shape=(), dtype='float32')
for colname in ['mother_age', 'gestation_weeks']
}
wide_inputs = {
colname : tf.keras.layers.Input(name=colname, shape=(), dtype='string')
for colname in ['is_male', 'plurality']
}
inputs = {**wide_inputs, **deep_inputs}
# feature columns from inputs
deep_fc = {
colname : tf.feature_column.numeric_column(colname)
for colname in ['mother_age', 'gestation_weeks']
}
wide_fc = {}
is_male, wide_fc['is_male'] = categorical_fc('is_male', ['True', 'False', 'Unknown'])
plurality, wide_fc['plurality'] = categorical_fc('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)'])
# bucketize the float fields. This makes them wide
age_buckets = tf.feature_column.bucketized_column(deep_fc['mother_age'],
boundaries=np.arange(15,45,1).tolist())
wide_fc['age_buckets'] = tf.feature_column.indicator_column(age_buckets)
gestation_buckets = tf.feature_column.bucketized_column(deep_fc['gestation_weeks'],
boundaries=np.arange(17,47,1).tolist())
wide_fc['gestation_buckets'] = tf.feature_column.indicator_column(gestation_buckets)
# cross all the wide columns. We have to do the crossing before we one-hot encode
crossed = tf.feature_column.crossed_column(
[is_male, plurality, age_buckets, gestation_buckets], hash_bucket_size=20000)
deep_fc['crossed_embeds'] = tf.feature_column.embedding_column(crossed, nembeds)
# the constructor for DenseFeatures takes a list of numeric columns
# The Functional API in Keras requires that you specify: LayerConstructor()(inputs)
wide_inputs = tf.keras.layers.DenseFeatures(wide_fc.values(), name='wide_inputs')(inputs)
deep_inputs = tf.keras.layers.DenseFeatures(deep_fc.values(), name='deep_inputs')(inputs)
# hidden layers for the deep side
layers = [int(x) for x in dnn_hidden_units]
deep = deep_inputs
for layerno, numnodes in enumerate(layers):
deep = tf.keras.layers.Dense(numnodes, activation='relu', name='dnn_{}'.format(layerno+1))(deep)
deep_out = deep
# linear model for the wide side
wide_out = tf.keras.layers.Dense(10, activation='relu', name='linear')(wide_inputs)
# concatenate the two sides
both = tf.keras.layers.concatenate([deep_out, wide_out], name='both')
# final output is a linear activation because this is regression
output = tf.keras.layers.Dense(1, activation='linear', name='weight')(both)
model = tf.keras.models.Model(inputs, output)
model.compile(optimizer='adam', loss='mse', metrics=[rmse, 'mse'])
return model
# The main function
def train_and_evaluate(output_dir):
model = build_wd_model(NNSIZE, NEMBEDS)
print("Here is our Wide-and-Deep architecture so far:\n")
print(model.summary())
train_file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, 'train', PATTERN)
eval_file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, 'eval', PATTERN)
trainds = load_dataset('train*', BATCH_SIZE, tf.estimator.ModeKeys.TRAIN)
evalds = load_dataset('eval*', 1000, tf.estimator.ModeKeys.EVAL)
if EVAL_STEPS:
evalds = evalds.take(EVAL_STEPS)
steps_per_epoch = TRAIN_EXAMPLES // (BATCH_SIZE * NUM_EVALS)
checkpoint_path = os.path.join(output_dir, 'checkpoints/babyweight')
# TODO Create a checkpoint to save the model after every epoch
# https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/ModelCheckpoint
cp_callback = # TODO
# TODO Complete the model.fit statement
# https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit
history = model.fit(
)
EXPORT_PATH = os.path.join(output_dir, datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
tf.saved_model.save(model, EXPORT_PATH) # with default serving function
print("Exported trained model to {}".format(EXPORT_PATH))
"""
Explanation: Lab Task 2
The following code edits babyweight_tf2/trainer/model.py.
End of explanation
"""
%%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight_tf2
python3 -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1 --batch_size=10
"""
Explanation: Lab Task 3
After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about <b>3 minutes</b> in which you won't see any output ...
End of explanation
"""
%%writefile babyweight_tf2/Dockerfile
FROM gcr.io/deeplearning-platform-release/tf2-cpu
COPY trainer /babyweight_tf2/trainer
RUN apt update && \
apt install --yes python3-pip && \
pip3 install --upgrade --quiet tf-nightly-2.0-preview
ENV PYTHONPATH ${PYTHONPATH}:/babyweight_tf2
CMD ["python3", "-m", "trainer.task"]
%%writefile babyweight_tf2/push_docker.sh
export PROJECT_ID=$(gcloud config list project --format "value(core.project)")
export IMAGE_REPO_NAME=babyweight_training_container
#export IMAGE_TAG=$(date +%Y%m%d_%H%M%S)
#export IMAGE_URI=gcr.io/$PROJECT_ID/$IMAGE_REPO_NAME:$IMAGE_TAG
export IMAGE_URI=gcr.io/$PROJECT_ID/$IMAGE_REPO_NAME
echo "Building $IMAGE_URI"
docker build -f Dockerfile -t $IMAGE_URI ./
echo "Pushing $IMAGE_URI"
docker push $IMAGE_URI
"""
Explanation: Lab Task 4
Since we are using TensorFlow 2.0 preview, we will use a container image to run the code on AI Platform.
Once TensorFlow 2.0 is released, you will be able to simply do (without having to build a container)
<pre>
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=200000
</pre>
End of explanation
"""
%%bash
cd babyweight_tf2
bash push_docker.sh
"""
Explanation: Note: If you get a permissions/stat error when running push_docker.sh from Notebooks, do it from CloudShell:
Open CloudShell on the GCP Console
* git clone https://github.com/GoogleCloudPlatform/training-data-analyst
* cd training-data-analyst/courses/machine_learning/deepdive/06_structured/babyweight_tf2/containers
* bash push_docker.sh
This step takes 5-10 minutes to run
End of explanation
"""
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBID=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
#IMAGE=gcr.io/deeplearning-platform-release/tf2-cpu
IMAGE=gcr.io/$PROJECT/$IMAGE_REPO_NAME
gcloud beta ai-platform jobs submit training $JOBID \
--staging-bucket=gs://$BUCKET --region=$REGION \
--master-image-uri=$IMAGE \
--master-machine-type=n1-standard-4 --scale-tier=CUSTOM \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=200000
"""
Explanation: Lab Task 5
Once the code works in standalone mode, you can run it on Cloud AI Platform. Because this is on the entire dataset, it will take a while. The training run took about <b> two hours </b> for me. You can monitor the job from the GCP console in the Cloud AI Platform section.
End of explanation
"""
%%writefile hyperparam.yaml
trainingInput:
scaleTier: STANDARD_1
hyperparameters:
hyperparameterMetricTag: rmse
goal: MINIMIZE
maxTrials: 20
maxParallelTrials: 5
enableTrialEarlyStopping: True
params:
- parameterName: batch_size
type: INTEGER
minValue: 8
maxValue: 512
scaleType: UNIT_LOG_SCALE
- parameterName: nembeds
type: INTEGER
minValue: 3
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
- parameterName: nnsize
type: INTEGER
minValue: 64
maxValue: 512
scaleType: UNIT_LOG_SCALE
%%bash
OUTDIR=gs://${BUCKET}/babyweight/hyperparam
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--config=hyperparam.yaml \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--eval_steps=10 \
--train_examples=20000
"""
Explanation: When I ran it, I used train_examples=2000000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:
<pre>
Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186
</pre>
The final RMSE was 1.03 pounds.
<h2> Hyperparameter tuning </h2>
<p>
All of these are command-line parameters to my program. To do hyperparameter tuning, create hyperparam.xml and pass it as --configFile.
This step will take <b>up to 2 hours</b> -- you can increase maxParallelTrials or reduce maxTrials to get it done faster. Since maxParallelTrials is the number of initial seeds to start searching from, you don't want it to be too large; otherwise, all you have is a random search.
End of explanation
"""
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000 --batch_size=35 --nembeds=16 --nnsize=281
"""
Explanation: <h2> Repeat training </h2>
<p>
This time with tuned parameters (note last line)
End of explanation
"""
|
g-weatherill/notebooks | hmtk/Geology.ipynb | agpl-3.0 | #Import tools
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from hmtk.plotting.faults.geology_mfd_plot import plot_recurrence_models
from openquake.hazardlib.scalerel.wc1994 import WC1994 # In all the following examples the Wells & Coppersmith (1994) Scaling Relation is Used
"""
Explanation: HMTK Geological Tools Demonstration
This notepad demonstrates the use of the HMTK geological tools for preparing fault source models for input into OpenQuake
Construction of the Geological Input File
An active fault model input file contains two sections:
1) A tectonic regionalisation - this can provide a container for a set of properties that may be assigned to multiple faults by virtue of a common tectonic region
2) A set of active faults
Tectonic Regionalisation Representation in the Fault Source File
In the tectonic regionalisation information each of the three properties can be represented according to a set of weighted values.
For example, in the case below faults in an arbitrarily named tectonic region (called here "GEM Region 1") will share the same set
of magnitude scaling relations and shear moduli, unless over-written by the specific fault. Those faults assigned to "GEM Region 2"
will have the magnitude scaling relation fixed as WC1994 and the shear modulus of 30 GPa
Active Fault Model
A set of active faults will be defined with a common ID and name.
An active fault set containing a single fault is shown below:
Fault Geometry Representations - Example 1: Simple Fault
Fault Geometry Representations - Example 2: Complex Fault
Rupture Properties
The rupture requires characterisation of the rake (using the Aki & Richards 2002 convention), the slip-type, the slip completeness factor
(an integer constraining the quality of the slip information with 1 being the hights quality), the range of slip values and their
corresponding weights, and the aseismic slip coefficient (the proportion of slip released aseismically, 1.0 - coupling coefficient)
The Magnitude Frequency Distributions
End of explanation
"""
# Set up fault parameters
slip = 10.0 # Slip rate in mm/yr
# Area = along-strike length (km) * down-dip with (km)
area = 100.0 * 20.0
# Rake = 0.
rake = 0.
# Magnitude Scaling Relation
msr = WC1994()
"""
Explanation: The following examples refer to a fault with the following properties:
Length (Along-strike) = 100 km,
Width (Down-Dip) = 20 km,
Slip = 10.0 mm/yr,
Rake = 0. (Strike Slip),
Magnitude Scaling Relation = Wells & Coppersmith (1994),
Shear Modulus = 30.0 GPa
End of explanation
"""
#Magnitude Frequency Distribution Example
anderson_luco_config1 = {'Model_Name': 'AndersonLucoArbitrary',
'Model_Type': 'First',
'Model_Weight': 1.0, # Weight is a required key - normally weights should sum to 1.0 - current example is simply illustrative!
'MFD_spacing': 0.1,
'Maximum_Magnitude': None,
'Minimum_Magnitude': 4.5,
'b_value': [0.8, 0.05]}
anderson_luco_config2 = {'Model_Name': 'AndersonLucoArbitrary',
'Model_Type': 'Second',
'Model_Weight': 1.0,
'MFD_spacing': 0.1,
'Maximum_Magnitude': None,
'Minimum_Magnitude': 4.5,
'b_value': [0.8, 0.05]}
anderson_luco_config3 = {'Model_Name': 'AndersonLucoArbitrary',
'Model_Type': 'Third',
'Model_Weight': 1.0,
'MFD_spacing': 0.1,
'Maximum_Magnitude': None,
'Minimum_Magnitude': 4.5,
'b_value': [0.8, 0.05]}
# Create a list of the configurations
anderson_luco_arb = [anderson_luco_config1, anderson_luco_config2, anderson_luco_config3]
# View the corresponding magnitude recurrence model
plot_recurrence_models(anderson_luco_arb, area, slip, msr, rake, msr_sigma=0.0)
"""
Explanation: Anderson & Luco (Arbitrary)
This describes a set of distributons where the maximum magnitude is assumed to rupture the whole fault surface
End of explanation
"""
anderson_luco_config1 = {'Model_Name': 'AndersonLucoAreaMmax',
'Model_Type': 'First',
'Model_Weight': 1.0, # Weight is a required key - normally weights should sum to 1.0 - current example is simply illustrative!
'MFD_spacing': 0.1,
'Maximum_Magnitude': None,
'Minimum_Magnitude': 4.5,
'b_value': [0.8, 0.05]}
anderson_luco_config2 = {'Model_Name': 'AndersonLucoAreaMmax',
'Model_Type': 'Second',
'Model_Weight': 1.0,
'MFD_spacing': 0.1,
'Maximum_Magnitude': None,
'Minimum_Magnitude': 4.5,
'b_value': [0.8, 0.05]}
anderson_luco_config3 = {'Model_Name': 'AndersonLucoAreaMmax',
'Model_Type': 'Third',
'Model_Weight': 1.0,
'MFD_spacing': 0.1,
'Maximum_Magnitude': None,
'Minimum_Magnitude': 4.5,
'b_value': [0.8, 0.05]}
# For these models a displacement to length ratio is needed
disp_length_ratio = 1.25E-5
# Create a list of the configurations
anderson_luco_area_mmax = [anderson_luco_config1, anderson_luco_config2, anderson_luco_config3]
# View the corresponding magnitude recurrence model
plot_recurrence_models(anderson_luco_area_mmax, area, slip, msr, rake, msr_sigma=0.0)
"""
Explanation: Anderson & Luco (Area - MMax)
This describes a set of distributons where the maximum rupture extent is limited to only part of the fault surface
End of explanation
"""
characteristic = [{'Model_Name': 'Characteristic',
'MFD_spacing': 0.05,
'Model_Weight': 1.0,
'Maximum_Magnitude': None,
'Sigma': 0.15, # Standard Deviation of Distribution (in Magnitude Units) - omit for fixed value
'Lower_Bound': -3.0, # Bounds of the distribution correspond to the number of sigma for truncation
'Upper_Bound': 3.0}]
# View the corresponding magnitude recurrence model
plot_recurrence_models(characteristic, area, slip, msr, rake, msr_sigma=0.0)
"""
Explanation: Characteristic Earthquake
The following example illustrates a "Characteristic" Model, represented by a Truncated Gaussian Distribution
End of explanation
"""
exponential = {'Model_Name': 'YoungsCoppersmithExponential',
'MFD_spacing': 0.1,
'Maximum_Magnitude': None,
'Maximum_Magnitude_Uncertainty': None,
'Minimum_Magnitude': 5.0,
'Model_Weight': 1.0,
'b_value': [0.8, 0.1]}
hybrid = {'Model_Name': 'YoungsCoppersmithCharacteristic',
'MFD_spacing': 0.1,
'Maximum_Magnitude': None,
'Maximum_Magnitude_Uncertainty': None,
'Minimum_Magnitude': 5.0,
'Model_Weight': 1.0,
'b_value': [0.8, 0.1],
'delta_m': None}
youngs_coppersmith = [exponential, hybrid]
# View the corresponding magnitude recurrence model
plot_recurrence_models(youngs_coppersmith, area, slip, msr, rake, msr_sigma=0.0)
"""
Explanation: Youngs & Coppersmith (1985) Models
The following describes the recurrence from two distributions presented by Youngs & Coppersmith (1985): 1) Exponential Distribution, 2) Hybrid Exponential-Characteristic Distribution
End of explanation
"""
def show_file_contents(filename):
"""
Shows the file contents
"""
fid = open(filename, 'r')
for row in fid.readlines():
print row
fid.close()
input_file = 'input_data/simple_fault_example_4branch.yml'
show_file_contents(input_file)
"""
Explanation: Epistemic Uncertainty Examples
This example considers the fault defined at the top of the page. This fault defines two values of slip rate and two different magnitude frequency distributions
End of explanation
"""
# Import the Parser
from hmtk.parsers.faults.fault_yaml_parser import FaultYmltoSource
# Fault mesh discretization step
mesh_spacing = 1.0 # (km)
# Read in the fault model
reader = FaultYmltoSource(input_file)
fault_model, tectonic_region = reader.read_file(mesh_spacing)
# Construct the fault source model (this is really running the MFD calculation code)
fault_model.build_fault_model()
# Write to an output NRML file
output_file_1 = 'output_data/fault_example_enumerated.xml'
fault_model.source_model.serialise_to_nrml(output_file_1)
show_file_contents(output_file_1)
"""
Explanation: Example 1 - Full Enumeration
In this example each individual MFD for each branch is determined. In the resulting file the fault is duplicated n_branches number of times, with the
corresponding MFD multiplied by the end-branch weight
End of explanation
"""
# Read in the fault model
reader = FaultYmltoSource(input_file)
fault_model, tectonic_region = reader.read_file(mesh_spacing)
# Scaling relation for export
output_msr = WC1994()
# Construct the fault source model - collapsing the branches
fault_model.build_fault_model(collapse=True, rendered_msr=output_msr)
# Write to an output NRML file
output_file_2 = 'output_data/fault_example_collapsed.xml'
fault_model.source_model.serialise_to_nrml(output_file_2)
show_file_contents(output_file_2)
"""
Explanation: Example 2: Collapsed Branches
In the following example we implement the same model, this time collapsing the branched. This means that the MFD is discretised and the incremental rate
in each magnitude bin is the weighted sum of the rates in that bin from all the end branches of the logic tree.
When collapsing the branches, however, it is necessary to define a single Magnitude Scaling Relation that will need to be assigned to the fault for
use in OpenQuake.
End of explanation
"""
|
saashimi/CPO-datascience | Normalized Dataset.ipynb | mit | #Import required packages
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
def format_date(df_date):
"""
Splits Meeting Times and Dates into datetime objects where applicable using regex.
"""
df_date['Days'] = df_date['Meeting_Times'].str.extract('([^\s]+)', expand=True)
df_date['Start_Date'] = df_date['Meeting_Dates'].str.extract('([^\s]+)', expand=True)
df_date['Year'] = df_date['Term'].astype(str).str.slice(0,4)
df_date['Quarter'] = df_date['Term'].astype(str).str.slice(4,6)
df_date['Term_Date'] = pd.to_datetime(df_date['Year'] + df_date['Quarter'], format='%Y%m')
df_date['End_Date'] = df_date['Meeting_Dates'].str.extract('(?<=-)(.*)(?= )', expand=True)
df_date['Start_Time'] = df_date['Meeting_Times'].str.extract('(?<= )(.*)(?=-)', expand=True)
df_date['Start_Time'] = pd.to_datetime(df_date['Start_Time'], format='%H%M')
df_date['End_Time'] = df_date['Meeting_Times'].str.extract('((?<=-).*$)', expand=True)
df_date['End_Time'] = pd.to_datetime(df_date['End_Time'], format='%H%M')
df_date['Duration_Hr'] = ((df_date['End_Time'] - df_date['Start_Time']).dt.seconds)/3600
return df_date
def format_xlist(df_xl):
"""
revises % capacity calculations by using Max Enrollment instead of room capacity.
"""
df_xl['Cap_Diff'] = np.where(df_xl['Xlst'] != '',
df_xl['Max_Enrl'].astype(int) - df_xl['Actual_Enrl'].astype(int),
df_xl['Room_Capacity'].astype(int) - df_xl['Actual_Enrl'].astype(int))
df_xl = df_xl.loc[df_xl['Room_Capacity'].astype(int) < 999]
return df_xl
"""
Explanation: OLS Analysis Using Full PSU dataset
End of explanation
"""
pd.set_option('display.max_rows', None)
df = pd.read_csv('data/PSU_master_classroom_91-17.csv', dtype={'Schedule': object, 'Schedule Desc': object})
df = df.fillna('')
df = format_date(df)
# Avoid classes that only occur on a single day
df = df.loc[df['Start_Date'] != df['End_Date']]
#terms = [199104, 199204, 199304, 199404, 199504, 199604, 199704, 199804, 199904, 200004, 200104, 200204, 200304, 200404, 200504, 200604, 200704, 200804, 200904, 201004, 201104, 201204, 201304, 201404, 201504, 201604]
terms = [200604, 200704, 200804, 200904, 201004, 201104, 201204, 201304, 201404, 201504, 201604]
df = df.loc[df['Term'].isin(terms)]
df = df.loc[df['Online Instruct Method'] != 'Fully Online']
# Calculate number of days per week and treat Sunday condition
df['Days_Per_Week'] = df['Days'].str.len()
df['Room_Capacity'] = df['Room_Capacity'].apply(lambda x: x if (x != 'No Data Available') else 0)
df['Building'] = df['ROOM'].str.extract('([^\s]+)', expand=True)
df_cl = format_xlist(df)
df_cl['%_Empty'] = df_cl['Cap_Diff'].astype(float) / df_cl['Room_Capacity'].astype(float)
# Normalize the results
df_cl['%_Empty'] = df_cl['Actual_Enrl'].astype(np.float32)/df_cl['Room_Capacity'].astype(np.float32)
df_cl = df_cl.replace([np.inf, -np.inf], np.nan).dropna()
from sklearn.preprocessing import LabelEncoder
df_cl = df_cl.sample(n = 15000)
# Save as a 1D array. Otherwise will throw errors.
y = np.asarray(df_cl['%_Empty'], dtype="|S6")
df_cl = df_cl[['Dept', 'Class', 'Days', 'Start_Time', 'ROOM', 'Term', 'Room_Capacity', 'Building']]
cat_columns = ['Dept', 'Class', 'Days', 'Start_Time', 'ROOM', 'Building']
for column in cat_columns:
room_mapping = {label: idx for idx, label in enumerate(np.unique(df_cl['{0}'.format(column)]))}
df_cl['{0}'.format(column)] = df_cl['{0}'.format(column)].map(room_mapping)
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
if Version(sklearn_version) < '0.18':
from sklearn.cross_validation import train_test_split
else:
from sklearn.model_selection import train_test_split
X = df_cl.iloc[:, 1:].values
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.3, random_state=0)
"""
Explanation: Partitioning a dataset in training and test sets
End of explanation
"""
from sklearn.ensemble import RandomForestClassifier
feat_labels = df_cl.columns[1:]
forest = RandomForestClassifier(n_estimators=100,
random_state=0,
n_jobs=-1) # -1 sets n_jobs=n_CPU cores
forest.fit(X_train, y_train)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
for f in range(X_train.shape[1]):
print("%2d) %-*s %f" % (f + 1, 30,
feat_labels[indices[f]],
importances[indices[f]]))
plt.title('Feature Importances')
plt.bar(range(X_train.shape[1]),
importances[indices],
color='lightblue',
align='center')
plt.xticks(range(X_train.shape[1]),
feat_labels[indices], rotation=90)
plt.xlim([-1, X_train.shape[1]])
plt.tight_layout()
plt.show()
"""
Explanation: Determine Feature Importances
End of explanation
"""
# Random Forest Classification
from sklearn import model_selection
kfold = model_selection.KFold(n_splits=10, random_state=0)
model = RandomForestClassifier(n_estimators=20, random_state=0, n_jobs=-1)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
"""
Explanation: Test Prediction Results
Class, Term, and Start Times are the three most important factors in determining the percentage of empty seats expected.
End of explanation
"""
|
ssunkara1/bqplot | examples/Marks/Pyplot/GridHeatMap.ipynb | apache-2.0 | np.random.seed(0)
data = np.random.randn(10, 10)
"""
Explanation: Get Data
End of explanation
"""
fig = plt.figure(padding_y=0.0)
grid_map = plt.gridheatmap(data)
fig
"""
Explanation: Basic Heat map
End of explanation
"""
axes_options = {'column': {'visible': False}, 'row': {'visible': False}, 'color': {'visible': False}}
fig = plt.figure(padding_y=0.0)
grid_map = plt.gridheatmap(data, axes_options=axes_options)
fig
"""
Explanation: Hide tick_labels and color axis using 'axes_options'
End of explanation
"""
fig = plt.figure(padding_y=0.0)
plt.scales(scales={'x': LinearScale(), 'y': LinearScale(reverse=True)})
## The data along the rows is not uniform. Hence the 5th row(from top) of the map
## is twice the height of the remaining rows.
row_data = np.arange(10)
row_data[5:] = np.arange(6, 11)
column_data = np.arange(10, 20)
grid_map = plt.gridheatmap(data, row=row_data, column=column_data)
fig
print(row_data.shape)
print(column_data.shape)
print(data.shape)
"""
Explanation: Non Uniform Heat map
End of explanation
"""
fig = plt.figure(padding_y=0.0)
plt.scales(scales={'x': LinearScale(), 'y': LinearScale(reverse=True)})
row_data = np.arange(11)
column_data = np.arange(10, 21)
grid_map = plt.gridheatmap(data, row=row_data, column=column_data)
fig
"""
Explanation: Alignment of the data with respect to the grid
For a N-by-N matrix, N+1 points along the row or the column are assumed to be end points.
End of explanation
"""
fig = plt.figure(padding_y=0.0)
plt.scales(scales={'x': LinearScale(),
'y': LinearScale(reverse=True, max=15)})
row_data = np.arange(10)
column_data = np.arange(10, 20)
grid_map = plt.gridheatmap(data, row=row_data, column=column_data)
fig
"""
Explanation: By default, for N points along any dimension, data aligns to the start of the rectangles in the grid.
The grid extends infinitely in the other direction. By default, the grid extends infintely
towards the bottom and the right.
End of explanation
"""
fig = plt.figure(padding_y=0.0)
plt.scales(scales={'x': LinearScale(),
'y': LinearScale(reverse=True, min=-5, max=15)})
row_data = np.arange(10)
column_data = np.arange(10, 20)
grid_map = plt.gridheatmap(data, row=row_data, column=column_data, row_align='end')
fig
"""
Explanation: By changing the row_align and column_align properties, the grid can extend in the opposite direction
End of explanation
"""
fig = plt.figure(padding_y=0.0)
plt.scales(scales={'x': LinearScale(),
'y': LinearScale(reverse=True, min=-5, max=15)})
row_data = np.arange(9)
column_data = np.arange(10, 20)
grid_map = plt.gridheatmap(data, row=row_data, column=column_data, row_align='end')
fig
"""
Explanation: For N+1 points on any direction, the grid extends infintely in both directions
End of explanation
"""
fig = plt.figure(padding_y=0.0)
grid_map = plt.gridheatmap(data, opacity=0.3, stroke='white', axes_options=axes_options)
fig
"""
Explanation: Changing opacity and stroke
End of explanation
"""
data = np.random.randn(10, 10)
fig = plt.figure(padding_y=0.0)
grid_map = plt.gridheatmap(data, interactions={'click':'select'},
selected_style={'opacity': '1.0'},
unselected_style={'opacity': 0.4},
axes_options=axes_options)
fig
"""
Explanation: Selections on the grid map
Selection on the GridHeatMap works similar to excel. Clicking on a cell selects the cell, and deselects the previous selection. Using the Ctrl key allows multiple cells to be selected, while the Shift key selects the range from the last cell in the selection to the current cell.
End of explanation
"""
grid_map.selected
"""
Explanation: The selected trait of a GridHeatMap contains a list of lists, with each sub-list containing the row and column index of a selected cell.
End of explanation
"""
|
shngli/Data-Mining-Python | Mining massive datasets/Data stream mining.ipynb | gpl-3.0 | import numpy as np
A = np.array([#A B C D E F G H
[0,0,1,0,0,1,0,0],
[0,0,0,0,1,0,0,1],
[1,0,0,1,0,1,0,0],
[0,0,1,0,1,0,1,0],
[0,1,0,1,0,0,0,1],
[1,0,1,0,0,0,1,0],
[0,0,0,1,0,1,0,1],
[0,1,0,0,1,0,1,0]])
print A
D = np.sum(A,0)
D = np.diag(D)
print D
L = D - A
print L
for matrix, tag in zip((A, D, L), ("Adjacency A", "Degree D", "Laplacian L")):
print "For {}, Entry sum:".format(tag), np.sum(matrix), "\t", "Non-zero entries: ", np.sum(np.where(matrix != 0, 1, 0))
"""
Explanation: Data Stream Mining
Question 1
For the following graph:
C -- D -- E
/ | | | \
A | | | B
\ | | | /
F -- G -- H
Write the adjacency matrix A, the degree matrix D, and the Laplacian matrix L. For each, find the sum of all entries and the number of nonzero entries.
End of explanation
"""
import json
import pprint
def surprise(seq):
timestamp = {}
for n in seq:
timestamp[n] = timestamp.get(n, 0) + 1
pprint.pprint(timestamp)
return sum([v*v for k,v in timestamp.items()])
triples = [[20, 49, 53],
[17, 43, 51],
[25, 34, 47],
[37, 46, 55]]
def median(triple):
return sorted(triple)[len(triple)>>1]
def estimate(r, s):
v = [sum([v == s[x-1] and p>=(x-1) for p, v in enumerate(s)]) for x in r]
return [len(s) * (2*m - 1) for m in v]
def main():
s = [(i % 10) + 1 for i in range(0, 75)]
print surprise(s)
for a in triples:
print '%s => %d' % (json.dumps(a), median(estimate(a, s)))
return 0
if __name__ == '__main__':
main()
"""
Explanation: Question 2
We wish to estimate the surprise number (2nd moment) of a data stream, using the method of AMS. It happens that our stream consists of ten different values, which we'll call 1, 2,..., 10, that cycle repeatedly. That is, at timestamps 1 through 10, the element of the stream equals the timestamp, at timestamps 11 through 20, the element is the timestamp minus 10, and so on. It is now timestamp 75, and a 5 has just been read from the stream. As a start, you should calculate the surprise number for this time.
For our estimate of the surprise number, we shall choose three timestamps at random, and estimate the surprise number from each, using the AMS approach (length of the stream times 2m-1, where m is the number of occurrences of the element of the stream at that timestamp, considering all times from that timestamp on, to the current time). Then, our estimate will be the median of the three resulting values.
You should discover the simple rules that determine the estimate derived from any given timestamp and from any set of three timestamps. Then, identify the set of three "random" timestamps that give the closest estimate from {20, 49, 53}, {17, 43, 51}, {25, 34, 47}, {37, 46, 55}.
End of explanation
"""
import json
streams = [[3, 4, 8, 10],
[1, 2, 3, 9],
[4, 5, 6, 7],
[3, 7, 8, 10]]
def mod(x):
return (3*x + 7) % 11
def bit(x):
m = [0xf, 0x7, 0x3, 0x1]
for i in range(0, 4):
if m[i] & x == 0:
return 4-i
return 0
def main():
for stream in streams:
r = max([bit(mod(x)) for x in stream])
print '%s = %d' % (json.dumps(stream), r*r)
if __name__ == '__main__':
main()
"""
Explanation: Question 3
We wish to use the Flagolet-Martin lgorithm to count the number of distinct elements in a stream. Suppose that there ten possible elements, 1, 2,..., 10, that could appear in the stream, but only four of them have actually appeared. To make our estimate of the count of distinct elements, we hash each element to a 4-bit binary number. The element x is hashed to 3x + 7 (modulo 11). For example, element 8 hashes to 3x8+7 = 31, which is 9 modulo 11 (i.e., the remainder of 31/11 is 9). Thus, the 4-bit string for element 8 is 1001.
A set of four of the elements 1 through 10 could give an estimate that is exact (if the estimate is 4), or too high, or too low. You should figure out under what circumstances a set of four elements falls into each of those categories. Then, identify the set of four elements that gives the exactly correct estimate from {3, 4, 8, 10}, {1, 2, 3, 9}, {4, 5, 6, 7}, { 3, 7, 8, 10}.
End of explanation
"""
def email(n):
t = (10**14)/n-1
return t
print "t-value when n=10^9: ", email(10**9)
print "t-value when n=10^10: ", email(10**10)
print "t-value when n=10^11: ", email(10**11)
print "t-value when n=10^12: ", email(10**12)
print "t-value when n=10^13: ", email(10**13)
print "t-value when n=10^14: ", email(10**14)
"""
Explanation: Correct answer: { 3, 7, 8, 10} ie. estimate = 4.
Question 4
A certain Web mail service has 10^8 users and wishes to create a sample of data about these users, occupying 10^10 bytes. Activity at the service can be viewed as a stream of elements, each of which is an email. The element contains the ID of the sender, which must be one of the 10^8 users of the service, and other information, e.g., the recipient(s), and contents of the message. The plan is to pick a subset of the users and collect in the 10^10 bytes records of length 100 bytes about every email sent by the users in the selected set (and nothing about other users).
User ID's will be hashed to a bucket number, from 0 to 999,999. At all times, there will be a threshold t such that the 100-byte records for all the users whose ID's hash to t or less will be retained, and other users' records will not be retained. You may assume that each user generates emails at exactly the same rate as other users. As a function of n, the number of emails in the stream so far, what should the threshold t be in order that the selected records will not exceed the 10^10 bytes available to store records? Identify the value of n and its corresponding value of t.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.20/_downloads/7b1b17f7cd0e886e3d0da4385e8a1630/plot_psf_ctf_vertices.ipynb | bsd-3-clause | # Authors: Olaf Hauk <olaf.hauk@mrc-cbu.cam.ac.uk>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
from mne.minimum_norm import (make_inverse_resolution_matrix, get_cross_talk,
get_point_spread)
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects/'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
fname_evo = data_path + '/MEG/sample/sample_audvis-ave.fif'
# read forward solution
forward = mne.read_forward_solution(fname_fwd)
# forward operator with fixed source orientations
forward = mne.convert_forward_solution(forward, surf_ori=True,
force_fixed=True)
# noise covariance matrix
noise_cov = mne.read_cov(fname_cov)
# evoked data for info
evoked = mne.read_evokeds(fname_evo, 0)
# make inverse operator from forward solution
# free source orientation
inverse_operator = mne.minimum_norm.make_inverse_operator(
info=evoked.info, forward=forward, noise_cov=noise_cov, loose=0.,
depth=None)
# regularisation parameter
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'MNE' # can be 'MNE' or 'sLORETA'
# compute resolution matrix for sLORETA
rm_lor = make_inverse_resolution_matrix(forward, inverse_operator,
method='sLORETA', lambda2=lambda2)
# get PSF and CTF for sLORETA at one vertex
sources = [1000]
stc_psf = get_point_spread(rm_lor, forward['src'], sources, norm=True)
stc_ctf = get_cross_talk(rm_lor, forward['src'], sources, norm=True)
"""
Explanation: Plot point-spread functions (PSFs) and cross-talk functions (CTFs)
Visualise PSF and CTF at one vertex for sLORETA.
End of explanation
"""
# Which vertex corresponds to selected source
vertno_lh = forward['src'][0]['vertno']
verttrue = [vertno_lh[sources[0]]] # just one vertex
# find vertices with maxima in PSF and CTF
vert_max_psf = vertno_lh[stc_psf.data.argmax()]
vert_max_ctf = vertno_lh[stc_ctf.data.argmax()]
brain_psf = stc_psf.plot('sample', 'inflated', 'lh', subjects_dir=subjects_dir,
figure=1)
brain_psf.show_view('ventral')
brain_psf.add_text(0.1, 0.9, 'sLORETA PSF', 'title', font_size=16)
# True source location for PSF
brain_psf.add_foci(verttrue, coords_as_verts=True, scale_factor=1., hemi='lh',
color='green')
# Maximum of PSF
brain_psf.add_foci(vert_max_psf, coords_as_verts=True, scale_factor=1.,
hemi='lh', color='black')
brain_ctf = stc_ctf.plot('sample', 'inflated', 'lh', subjects_dir=subjects_dir,
figure=2)
brain_ctf.add_text(0.1, 0.9, 'sLORETA CTF', 'title', font_size=16)
brain_ctf.show_view('ventral')
brain_ctf.add_foci(verttrue, coords_as_verts=True, scale_factor=1., hemi='lh',
color='green')
# Maximum of CTF
brain_ctf.add_foci(vert_max_ctf, coords_as_verts=True, scale_factor=1.,
hemi='lh', color='black')
print('The green spheres indicate the true source location, and the black '
'spheres the maximum of the distribution.')
"""
Explanation: Visualise
End of explanation
"""
|
Chipe1/aima-python | notebooks/chapter24/Image Edge Detection.ipynb | mit | import os, sys
sys.path = [os.path.abspath("../../")] + sys.path
from perception4e import *
from notebook4e import *
"""
Explanation: Edge Detection
Edge detection is one of the earliest and popular image processing tasks. Edges are straight lines or curves in the image plane across which there is a “significant” change in image brightness. The goal of edge detection is to abstract away from the messy, multi-megabyte image and towards a more compact, abstract representation.
There are multiple ways to detect an edge in an image but the most may be grouped into two categories, gradient, and Laplacian. Here we will introduce some algorithms among them and their intuitions. First, let's import the necessary packages.
End of explanation
"""
psource(gradient_edge_detector)
"""
Explanation: Gradient Edge Detection
Because edges correspond to locations in images where the brightness undergoes a sharp change, a naive idea would be to differentiate the image and look for places where the magnitude of the derivative is large. For many simple cases with regular geometry topologies, this simple method could work.
Here we introduce a 2D function $f(x,y)$ to represent the pixel values on a 2D image plane. Thus this method follows the math intuition below:
$$\frac{\partial f(x,y)}{\partial x} = \lim_{\epsilon \rightarrow 0} \frac{f(x+\epsilon,y)-\partial f(x,y)}{\epsilon}$$
Above is exactly the definition of the edges in an image. In real cases, $\epsilon$ cannot be 0. We can only investigate the pixels in the neighborhood of the current one to get the derivation of a pixel. Thus the previous formula becomes
$$\frac{\partial f(x,y)}{\partial x} = \lim_{\epsilon \rightarrow 0} \frac{f(x+1,y)-\partial f(x,y)}{1}$$
To implement the above formula, we can simply apply a filter $[1,-1]$ to extract the differentiated image. For the case of derivation in the y-direction, we can transpose the above filter and apply it to the original image. The relation of partial deviation of the direction of edges are summarized in the following picture:
<img src="images/gradients.png" width="700"/>
Implementation
We implemented an edge detector using a gradient method as gradient_edge_detector in perceptron.py. There are two filters defined as $[[1, -1]], [[1], [-1]]$ to extract edges in x and y directions respectively. The filters are applied to an image using convolve2d method in scipy.single package. The image passed into the function needs to be in the form of numpy.ndarray or an iterable object that can be transformed into a ndarray.
To view the detailed implementation, please execute the following block
End of explanation
"""
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.image as mpimg
im =mpimg.imread('images/stapler.png')
print("image height:", len(im))
print("image width:", len(im[0]))
"""
Explanation: Example
Now let's try the detector for real case pictures. First, we will show the original picture before edge detection:
<img src="images/stapler.png" width="500"/>
We will use matplotlib to read the image as a numpy ndarray:
End of explanation
"""
edges = gradient_edge_detector(im)
print("image height:", len(edges))
print("image width:", len(edges[0]))
"""
Explanation: The code shows we get an image with a size of $787*590$. gaussian_derivative_edge_detector can extract images in both x and y direction and then put them together in a ndarray:
End of explanation
"""
show_edges(edges)
"""
Explanation: The edges are in the same shape of the original image. Now we will try print out the image, we implemented a show_edges function to do this:
End of explanation
"""
x_filter = scipy.signal.convolve2d(gaussian_filter, np.asarray([[1, -1]]), 'same')
y_filter = scipy.signal.convolve2d(gaussian_filter, np.asarray([[1], [-1]]), 'same')
"""
Explanation: We can see that the edges are extracted well. We can use the result of this simple algorithm as a baseline and compare the results of other algorithms to it.
Derivative of Gaussian
When considering the situation when there is strong noise in an image, the ups and downs of the noise will induce strong peaks in the gradient profile. In order to be more noise-robust, an algorithm introduced a Gaussian filter before applying the gradient filer. In another way, convolving a gradient filter after a Gaussian filter equals to convolving a derivative of Gaussian filter directly to the image.
Here is how this intuition is represented in math:
$$(I\bigotimes g)\bigotimes h = I\bigotimes (g\bigotimes h) $$
Where $I$ is the image, $g$ is the gradient filter and $h$ is the Gaussian filter. A two dimensional derivative of Gaussian kernel is dipicted in the following figure:
<img src="images/derivative_of_gaussian.png" width="400"/>
Implementation
In our implementation, we initialize Gaussian filters by applying the 2D Gaussian function on a given size of the grid which is the same as the kernel size. Then the x and y direction image filters are calculated as the convolution of the Gaussian filter and the gradient filter:
End of explanation
"""
psource(gaussian_derivative_edge_detector)
"""
Explanation: Then both of the filters are applied to the input image to extract the x and y direction edges. For detailed implementation, please view by:
End of explanation
"""
e = gaussian_derivative_edge_detector(im)
show_edges(e)
"""
Explanation: Example
Now let's try again on the stapler image and plot the extracted edges:
End of explanation
"""
e = laplacian_edge_detector(im)
show_edges(e)
"""
Explanation: We can see that the extracted edges are more similar to the original one. The resulting edges are depending on the initial Gaussian kernel size and how it is initialized.
Laplacian Edge Detector
Laplacian is somewhat different from the methods we have discussed so far. Unlike the above kernels which are only using the first-order derivatives of the original image, the Laplacian edge detector uses the second-order derivatives of the image. Using the second derivatives also makes the detector very sensitive to noise. Thus the image is often Gaussian smoothed before applying the Laplacian filter.
Here are how the Laplacian detector looks like:
<img src="images/laplacian.png" width="200"/>
Implementation
There are two commonly used small Laplacian kernels:
<img src="images/laplacian_kernels.png" width="300"/>
In our implementation, we used the first one as the default kernel and convolve it with the original image using packages provided by scipy.
Example
Now let's use the Laplacian edge detector to extract edges of the staple example:
End of explanation
"""
|
jepegit/cellpy | dev_utils/batch_notebooks/creating_journals_by_different_methods.ipynb | mit | %load_ext autoreload
%autoreload 2
import os
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cellpy
from cellpy import prms
from cellpy import prmreader
from cellpy.utils import batch
import holoviews as hv
%matplotlib inline
hv.extension("bokeh")
name = "first"
project = "ocv_tests"
print(" INITIALISATION OF BATCH ".center(80, "="))
b = batch.init(name, project, default_log_level="INFO")
p = b.experiment.journal.create_empty_pages()
filename = "20190204_FC_snx012_01_cc_03"
mass = 0.5
total_mass = 1.0
loading = 0.1
fixed = False
label = "fc_snx012_01"
cell_type = "full_cell"
raw_file_name = [Path(".") / "20190204_FC_snx012_01_cc_01.res"]
cellpy_file_name = Path(".") / "20190204_FC_snx012_01_cc_01.h5"
group = 1
sub_group = 1
p.loc[filename] = [
mass,
total_mass,
loading,
fixed,
label,
cell_type,
raw_file_name,
cellpy_file_name,
group,
sub_group,
]
p
b.pages = p
b.pages
"""
Explanation: Create journals
We need some more ways to create the journals for the batch runs. Currently, these methods are supported:
- b.from_db()
- b.from_file(filename)
Within the Journal class
```python
def from_file(self, file_name=None):
"""Loads a DataFrame with all the needed info about the experiment"""
file_name = self._check_file_name(file_name)
with open(file_name, "r") as infile:
top_level_dict = json.load(infile)
pages_dict = top_level_dict["info_df"]
pages = pd.DataFrame(pages_dict)
pages.cellpy_file_names = pages.cellpy_file_names.apply(self._fix_cellpy_paths)
self.pages = pages
self.file_name = file_name
self._prm_packer(top_level_dict["metadata"])
self.generate_folder_names()
self.paginate()
```
Within the Batch class
```python
def create_journal(self, description=None, from_db=True):
logging.debug("Creating a journal")
logging.debug(f"description: {description}")
logging.debug(f"from_db: {from_db}")
# rename to: create_journal (combine this with function above)
logging.info(f"name: {self.experiment.journal.name}")
logging.info(f"project: {self.experiment.journal.project}")
if description is not None:
from_db = False
if from_db:
self.experiment.journal.from_db()
self.experiment.journal.to_file()
else:
# TODO: move this into the bacth journal class
if description is not None:
print(f"Creating from {type(description)} is not implemented yet")
logging.info("Creating an empty journal")
logging.info(f"name: {self.experiment.journal.name}")
logging.info(f"project: {self.experiment.journal.project}")
self.experiment.journal.pages = pd.DataFrame(
columns=[
"filenames",
"masses",
"total_masses",
"loadings",
"fixed",
"labels",
"cell_type",
"raw_file_names",
"cellpy_file_names",
"groups",
"sub_groups",
]
)
self.experiment.journal.pages.set_index("filenames", inplace=True)
self.experiment.journal.generate_folder_names()
self.experiment.journal.paginate()
```
End of explanation
"""
b2 = batch.init(default_log_level="DEBUG")
b2.experiment.journal.name = "first"
b2.experiment.journal.project = "ocv_tests"
# see if it finds files (str)
b2.create_journal("creating_journals_by_different_methods.ipynb")
# see if it finds files (pathlib.Path)
b2.create_journal(Path("creating_journals_by_different_methods.ipynb"))
"""
Explanation: Checking the create_journal function in Batch
End of explanation
"""
filename = "20190204_FC_snx012_01_cc_03"
mass = 0.5
total_mass = 1.0
loading = 0.1
fixed = False
label = "fc_snx012_01"
cell_type = "full_cell"
raw_file_name = [Path(".") / "20190204_FC_snx012_01_cc_01.res"]
cellpy_file_name = Path(".") / "20190204_FC_snx012_01_cc_01.h5"
group = 1
sub_group = 1
d = {
"filenames": filename,
"masses": mass,
"total_masses": total_mass,
"loadings": loading,
"fixed": fixed,
"labels": label,
"cell_type": cell_type,
"raw_file_names": raw_file_name,
"cellpy_file_names": cellpy_file_name,
"groups": group,
"sub_groups": sub_group,
}
d2 = {
"filenames": [filename],
"masses": [mass],
"total_masses": [total_mass],
"loadings": [loading],
"fixed": [fixed],
"labels": [label],
"cell_type": [cell_type],
"raw_file_names": [raw_file_name],
"cellpy_file_names": [cellpy_file_name],
"groups": [group],
"sub_groups": [sub_group],
}
d3 = {
"filenames": [filename, filename + "b"],
"masses": [mass, 0.4],
"total_masses": [total_mass, 1.0],
"loadings": [loading, 0.2],
"fixed": [fixed, 1],
"labels": [label, "JPM"],
"cell_type": [cell_type, "anode"],
"raw_file_names": [raw_file_name, raw_file_name],
"cellpy_file_names": [cellpy_file_name, cellpy_file_name],
"groups": [group, 2],
"sub_groups": [sub_group, 1],
}
d4 = {
"filenames": [filename, filename + "b"],
"masses": [mass], # Different length
"total_masses": [total_mass, 1.0],
"loadings": [loading, 0.2],
"fixed": [fixed, 1],
"labels": [label, "JPM"],
"cell_type": [cell_type, "anode"],
"raw_file_names": [raw_file_name, raw_file_name],
"cellpy_file_names": [cellpy_file_name, cellpy_file_name],
"groups": [group, 2],
"sub_groups": [sub_group, 1],
} # this should fail
d5 = {
"filenames": [filename, filename + "b"],
"masses": [mass, 0.2], # Different length
"total_masses": [total_mass, 1.0],
"loadings": [loading, 0.2],
# "fixed": [fixed, 1],
"labels": [label, "JPM"],
"cell_type": [cell_type, "anode"],
"raw_file_names": [raw_file_name, raw_file_name],
"cellpy_file_names": [cellpy_file_name, cellpy_file_name],
"groups": [group, 2],
"sub_groups": [sub_group, 1],
}
p = pd.DataFrame(d)
p2 = pd.DataFrame(d2)
p3 = pd.DataFrame(d3)
p5 = pd.DataFrame(d5)
b3 = batch.init(name, project, default_log_level="DEBUG")
b3.create_journal(d)
b3.pages
b3.create_journal(d2)
b3.pages
b3.create_journal(d3)
b3.pages
b3.create_journal(d5)
b3.pages
b3.create_journal(p)
b3.pages
b3.create_journal(p2)
b3.pages
b3.create_journal(p3)
b3.pages
b3.create_journal(p5)
b3.pages
"""
Explanation: different methods
dataframe
End of explanation
"""
|
karlnapf/shogun | doc/ipython-notebooks/pca/pca_notebook.ipynb | bsd-3-clause | %pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
# import all shogun classes
from shogun import *
import shogun as sg
"""
Explanation: Principal Component Analysis in Shogun
By Abhijeet Kislay (GitHub ID: <a href='https://github.com/kislayabhi'>kislayabhi</a>)
This notebook is about finding Principal Components (<a href="http://en.wikipedia.org/wiki/Principal_component_analysis">PCA</a>) of data (<a href="http://en.wikipedia.org/wiki/Unsupervised_learning">unsupervised</a>) in Shogun. Its <a href="http://en.wikipedia.org/wiki/Dimensionality_reduction">dimensional reduction</a> capabilities are further utilised to show its application in <a href="http://en.wikipedia.org/wiki/Data_compression">data compression</a>, image processing and <a href="http://en.wikipedia.org/wiki/Facial_recognition_system">face recognition</a>.
End of explanation
"""
#number of data points.
n=100
#generate a random 2d line(y1 = mx1 + c)
m = random.randint(1,10)
c = random.randint(1,10)
x1 = random.random_integers(-20,20,n)
y1=m*x1+c
#generate the noise.
noise=random.random_sample([n]) * random.random_integers(-35,35,n)
#make the noise orthogonal to the line y=mx+c and add it.
x=x1 + noise*m/sqrt(1+square(m))
y=y1 + noise/sqrt(1+square(m))
twoD_obsmatrix=array([x,y])
#to visualise the data we must plot it.
rcParams['figure.figsize'] = 7, 7
figure,axis=subplots(1,1)
xlim(-50,50)
ylim(-50,50)
axis.plot(twoD_obsmatrix[0,:],twoD_obsmatrix[1,:],'o',color='green',markersize=6)
#the line from which we generated the data is plotted in red
axis.plot(x1[:],y1[:],linewidth=0.3,color='red')
title('One-Dimensional sub-space with noise')
xlabel("x axis")
_=ylabel("y axis")
"""
Explanation: Some Formal Background (Skip if you just want code examples)
PCA is a useful statistical technique that has found application in fields such as face recognition and image compression, and is a common technique for finding patterns in data of high dimension.
In machine learning problems data is often high dimensional - images, bag-of-word descriptions etc. In such cases we cannot expect the training data to densely populate the space, meaning that there will be large parts in which little is known about the data. Hence it is expected that only a small number of directions are relevant for describing the data to a reasonable accuracy.
The data vectors may be very high dimensional, they will therefore typically lie closer to a much lower dimensional 'manifold'.
Here we concentrate on linear dimensional reduction techniques. In this approach a high dimensional datapoint $\mathbf{x}$ is 'projected down' to a lower dimensional vector $\mathbf{y}$ by:
$$\mathbf{y}=\mathbf{F}\mathbf{x}+\text{const}.$$
where the matrix $\mathbf{F}\in\mathbb{R}^{\text{M}\times \text{D}}$, with $\text{M}<\text{D}$. Here $\text{M}=\dim(\mathbf{y})$ and $\text{D}=\dim(\mathbf{x})$.
From the above scenario, we assume that
The number of principal components to use is $\text{M}$.
The dimension of each data point is $\text{D}$.
The number of data points is $\text{N}$.
We express the approximation for datapoint $\mathbf{x}^n$ as:$$\mathbf{x}^n \approx \mathbf{c} + \sum\limits_{i=1}^{\text{M}}y_i^n \mathbf{b}^i \equiv \tilde{\mathbf{x}}^n.$$
* Here the vector $\mathbf{c}$ is a constant and defines a point in the lower dimensional space.
* The $\mathbf{b}^i$ define vectors in the lower dimensional space (also known as 'principal component coefficients' or 'loadings').
* The $y_i^n$ are the low dimensional co-ordinates of the data.
Our motive is to find the reconstruction $\tilde{\mathbf{x}}^n$ given the lower dimensional representation $\mathbf{y}^n$(which has components $y_i^n,i = 1,...,\text{M})$. For a data space of dimension $\dim(\mathbf{x})=\text{D}$, we hope to accurately describe the data using only a small number $(\text{M}\ll \text{D})$ of coordinates of $\mathbf{y}$.
To determine the best lower dimensional representation it is convenient to use the square distance error between $\mathbf{x}$ and its reconstruction $\tilde{\mathbf{x}}$:$$\text{E}(\mathbf{B},\mathbf{Y},\mathbf{c})=\sum\limits_{n=1}^{\text{N}}\sum\limits_{i=1}^{\text{D}}[x_i^n - \tilde{x}i^n]^2.$$
* Here the basis vectors are defined as $\mathbf{B} = [\mathbf{b}^1,...,\mathbf{b}^\text{M}]$ (defining $[\text{B}]{i,j} = b_i^j$).
* Corresponding low dimensional coordinates are defined as $\mathbf{Y} = [\mathbf{y}^1,...,\mathbf{y}^\text{N}].$
* Also, $x_i^n$ and $\tilde{x}_i^n$ represents the coordinates of the data points for the original and the reconstructed data respectively.
* The bias $\mathbf{c}$ is given by the mean of the data $\sum_n\mathbf{x}^n/\text{N}$.
Therefore, for simplification purposes we centre our data, so as to set $\mathbf{c}$ to zero. Now we concentrate on finding the optimal basis $\mathbf{B}$( which has the components $\mathbf{b}^i, i=1,...,\text{M} $).
Deriving the optimal linear reconstruction
To find the best basis vectors $\mathbf{B}$ and corresponding low dimensional coordinates $\mathbf{Y}$, we may minimize the sum of squared differences between each vector $\mathbf{x}$ and its reconstruction $\tilde{\mathbf{x}}$:
$\text{E}(\mathbf{B},\mathbf{Y}) = \sum\limits_{n=1}^{\text{N}}\sum\limits_{i=1}^{\text{D}}\left[x_i^n - \sum\limits_{j=1}^{\text{M}}y_j^nb_i^j\right]^2 = \text{trace} \left( (\mathbf{X}-\mathbf{B}\mathbf{Y})^T(\mathbf{X}-\mathbf{B}\mathbf{Y}) \right)$
where $\mathbf{X} = [\mathbf{x}^1,...,\mathbf{x}^\text{N}].$
Considering the above equation under the orthonormality constraint $\mathbf{B}^T\mathbf{B} = \mathbf{I}$ (i.e the basis vectors are mutually orthogonal and of unit length), we differentiate it w.r.t $y_k^n$. The squared error $\text{E}(\mathbf{B},\mathbf{Y})$ therefore has zero derivative when:
$y_k^n = \sum_i b_i^kx_i^n$
By substituting this solution in the above equation, the objective becomes
$\text{E}(\mathbf{B}) = (\text{N}-1)\left[\text{trace}(\mathbf{S}) - \text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right)\right],$
where $\mathbf{S}$ is the sample covariance matrix of the data.
To minimise equation under the constraint $\mathbf{B}^T\mathbf{B} = \mathbf{I}$, we use a set of Lagrange Multipliers $\mathbf{L}$, so that the objective is to minimize:
$-\text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right)+\text{trace}\left(\mathbf{L}\left(\mathbf{B}^T\mathbf{B} - \mathbf{I}\right)\right).$
Since the constraint is symmetric, we can assume that $\mathbf{L}$ is also symmetric. Differentiating with respect to $\mathbf{B}$ and equating to zero we obtain that at the optimum
$\mathbf{S}\mathbf{B} = \mathbf{B}\mathbf{L}$.
This is a form of eigen-equation so that a solution is given by taking $\mathbf{L}$ to be diagonal and $\mathbf{B}$ as the matrix whose columns are the corresponding eigenvectors of $\mathbf{S}$. In this case,
$\text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right) =\text{trace}(\mathbf{L}),$
which is the sum of the eigenvalues corresponding to the eigenvectors forming $\mathbf{B}$. Since we wish to minimise $\text{E}(\mathbf{B})$, we take the eigenvectors with the largest corresponding eigenvalues.
Whilst the solution to this eigen-problem is unique, this only serves to define the solution subspace since one may rotate and scale $\mathbf{B}$ and $\mathbf{Y}$ such that the value of the squared loss is exactly the same. The justification for choosing the non-rotated eigen solution is given by the additional requirement that the principal components corresponds to directions of maximal variance.
Maximum variance criterion
We aim to find that single direction $\mathbf{b}$ such that, when the data is projected onto this direction, the variance of this projection is maximal amongst all possible such projections.
The projection of a datapoint onto a direction $\mathbf{b}$ is $\mathbf{b}^T\mathbf{x}^n$ for a unit length vector $\mathbf{b}$. Hence the sum of squared projections is: $$\sum\limits_{n}\left(\mathbf{b}^T\mathbf{x}^n\right)^2 = \mathbf{b}^T\left[\sum\limits_{n}\mathbf{x}^n(\mathbf{x}^n)^T\right]\mathbf{b} = (\text{N}-1)\mathbf{b}^T\mathbf{S}\mathbf{b} = \lambda(\text{N} - 1)$$
which ignoring constants, is simply the negative of the equation for a single retained eigenvector $\mathbf{b}$(with $\mathbf{S}\mathbf{b} = \lambda\mathbf{b}$). Hence the optimal single $\text{b}$ which maximises the projection variance is given by the eigenvector corresponding to the largest eigenvalues of $\mathbf{S}.$ The second largest eigenvector corresponds to the next orthogonal optimal direction and so on. This explains why, despite the squared loss equation being invariant with respect to arbitrary rotation of the basis vectors, the ones given by the eigen-decomposition have the additional property that they correspond to directions of maximal variance. These maximal variance directions found by PCA are called the $\text{principal} $ $\text{directions}.$
There are two eigenvalue methods through which shogun can perform PCA namely
* Eigenvalue Decomposition Method.
* Singular Value Decomposition.
EVD vs SVD
The EVD viewpoint requires that one compute the eigenvalues and eigenvectors of the covariance matrix, which is the product of $\mathbf{X}\mathbf{X}^\text{T}$, where $\mathbf{X}$ is the data matrix. Since the covariance matrix is symmetric, the matrix is diagonalizable, and the eigenvectors can be normalized such that they are orthonormal:
$\mathbf{S}=\frac{1}{\text{N}-1}\mathbf{X}\mathbf{X}^\text{T},$
where the $\text{D}\times\text{N}$ matrix $\mathbf{X}$ contains all the data vectors: $\mathbf{X}=[\mathbf{x}^1,...,\mathbf{x}^\text{N}].$
Writing the $\text{D}\times\text{N}$ matrix of eigenvectors as $\mathbf{E}$ and the eigenvalues as an $\text{N}\times\text{N}$ diagonal matrix $\mathbf{\Lambda}$, the eigen-decomposition of the covariance $\mathbf{S}$ is
$\mathbf{X}\mathbf{X}^\text{T}\mathbf{E}=\mathbf{E}\mathbf{\Lambda}\Longrightarrow\mathbf{X}^\text{T}\mathbf{X}\mathbf{X}^\text{T}\mathbf{E}=\mathbf{X}^\text{T}\mathbf{E}\mathbf{\Lambda}\Longrightarrow\mathbf{X}^\text{T}\mathbf{X}\tilde{\mathbf{E}}=\tilde{\mathbf{E}}\mathbf{\Lambda},$
where we defined $\tilde{\mathbf{E}}=\mathbf{X}^\text{T}\mathbf{E}$. The final expression above represents the eigenvector equation for $\mathbf{X}^\text{T}\mathbf{X}.$ This is a matrix of dimensions $\text{N}\times\text{N}$ so that calculating the eigen-decomposition takes $\mathcal{O}(\text{N}^3)$ operations, compared with $\mathcal{O}(\text{D}^3)$ operations in the original high-dimensional space. We then can therefore calculate the eigenvectors $\tilde{\mathbf{E}}$ and eigenvalues $\mathbf{\Lambda}$ of this matrix more easily. Once found, we use the fact that the eigenvalues of $\mathbf{S}$ are given by the diagonal entries of $\mathbf{\Lambda}$ and the eigenvectors by
$\mathbf{E}=\mathbf{X}\tilde{\mathbf{E}}\mathbf{\Lambda}^{-1}$
On the other hand, applying SVD to the data matrix $\mathbf{X}$ follows like:
$\mathbf{X}=\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}$
where $\mathbf{U}^\text{T}\mathbf{U}=\mathbf{I}\text{D}$ and $\mathbf{V}^\text{T}\mathbf{V}=\mathbf{I}\text{N}$ and $\mathbf{\Sigma}$ is a diagonal matrix of the (positive) singular values. We assume that the decomposition has ordered the singular values so that the upper left diagonal element of $\mathbf{\Sigma}$ contains the largest singular value.
Attempting to construct the covariance matrix $(\mathbf{X}\mathbf{X}^\text{T})$from this decomposition gives:
$\mathbf{X}\mathbf{X}^\text{T} = \left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)\left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)^\text{T}$
$\mathbf{X}\mathbf{X}^\text{T} = \left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)\left(\mathbf{V}\mathbf{\Sigma}\mathbf{U}^\text{T}\right)$
and since $\mathbf{V}$ is an orthogonal matrix $\left(\mathbf{V}^\text{T}\mathbf{V}=\mathbf{I}\right),$
$\mathbf{X}\mathbf{X}^\text{T}=\left(\mathbf{U}\mathbf{\Sigma}^\mathbf{2}\mathbf{U}^\text{T}\right)$
Since it is in the form of an eigen-decomposition, the PCA solution given by performing the SVD decomposition of $\mathbf{X}$, for which the eigenvectors are then given by $\mathbf{U}$, and corresponding eigenvalues by the square of the singular values.
CPCA Class Reference (Shogun)
CPCA class of Shogun inherits from the CPreprocessor class. Preprocessors are transformation functions that doesn't change the domain of the input features. Specifically, CPCA performs principal component analysis on the input vectors and keeps only the specified number of eigenvectors. On preprocessing, the stored covariance matrix is used to project vectors into eigenspace.
Performance of PCA depends on the algorithm used according to the situation in hand.
Our PCA preprocessor class provides 3 method options to compute the transformation matrix:
$\text{PCA(EVD)}$ sets $\text{PCAmethod == EVD}$ : Eigen Value Decomposition of Covariance Matrix $(\mathbf{XX^T}).$
The covariance matrix $\mathbf{XX^T}$ is first formed internally and then
its eigenvectors and eigenvalues are computed using QR decomposition of the matrix.
The time complexity of this method is $\mathcal{O}(D^3)$ and should be used when $\text{N > D.}$
$\text{PCA(SVD)}$ sets $\text{PCAmethod == SVD}$ : Singular Value Decomposition of feature matrix $\mathbf{X}$.
The transpose of feature matrix, $\mathbf{X^T}$, is decomposed using SVD. $\mathbf{X^T = UDV^T}.$
The matrix V in this decomposition contains the required eigenvectors and
the diagonal entries of the diagonal matrix D correspond to the non-negative
eigenvalues.The time complexity of this method is $\mathcal{O}(DN^2)$ and should be used when $\text{N < D.}$
$\text{PCA(AUTO)}$ sets $\text{PCAmethod == AUTO}$ : This mode automagically chooses one of the above modes for the user based on whether $\text{N>D}$ (chooses $\text{EVD}$) or $\text{N<D}$ (chooses $\text{SVD}$)
PCA on 2D data
Step 1: Get some data
We will generate the toy data by adding orthogonal noise to a set of points lying on an arbitrary 2d line. We expect PCA to recover this line, which is a one-dimensional linear sub-space.
End of explanation
"""
#convert the observation matrix into dense feature matrix.
train_features = features(twoD_obsmatrix)
#PCA(EVD) is choosen since N=100 and D=2 (N>D).
#However we can also use PCA(AUTO) as it will automagically choose the appropriate method.
preprocessor = sg.transformer('PCA', method='EVD')
#since we are projecting down the 2d data, the target dim is 1. But here the exhaustive method is detailed by
#setting the target dimension to 2 to visualize both the eigen vectors.
#However, in future examples we will get rid of this step by implementing it directly.
preprocessor.put('target_dim', 2)
#Centralise the data by subtracting its mean from it.
preprocessor.fit(train_features)
#get the mean for the respective dimensions.
mean_datapoints=preprocessor.get('mean_vector')
mean_x=mean_datapoints[0]
mean_y=mean_datapoints[1]
"""
Explanation: Step 2: Subtract the mean.
For PCA to work properly, we must subtract the mean from each of the data dimensions. The mean subtracted is the average across each dimension. So, all the $x$ values have $\bar{x}$ subtracted, and all the $y$ values have $\bar{y}$ subtracted from them, where:$$\bar{\mathbf{x}} = \frac{\sum\limits_{i=1}^{n}x_i}{n}$$ $\bar{\mathbf{x}}$ denotes the mean of the $x_i^{'s}$
Shogun's way of doing things :
Preprocessor PCA performs principial component analysis on input feature vectors/matrices. It provides an interface to set the target dimension by $\text{put('target_dim', target_dim) method}.$ When the $\text{init()}$ method in $\text{PCA}$ is called with proper
feature matrix $\text{X}$ (with say $\text{N}$ number of vectors and $\text{D}$ feature dimension), a transformation matrix is computed and stored internally.It inherenty also centralizes the data by subtracting the mean from it.
End of explanation
"""
#Get the eigenvectors(We will get two of these since we set the target to 2).
E = preprocessor.get('transformation_matrix')
#Get all the eigenvalues returned by PCA.
eig_value=preprocessor.get('eigenvalues_vector')
e1 = E[:,0]
e2 = E[:,1]
eig_value1 = eig_value[0]
eig_value2 = eig_value[1]
"""
Explanation: Step 3: Calculate the covariance matrix
To understand the relationship between 2 dimension we define $\text{covariance}$. It is a measure to find out how much the dimensions vary from the mean $with$ $respect$ $to$ $each$ $other.$$$cov(X,Y)=\frac{\sum\limits_{i=1}^{n}(X_i-\bar{X})(Y_i-\bar{Y})}{n-1}$$
A useful way to get all the possible covariance values between all the different dimensions is to calculate them all and put them in a matrix.
Example: For a 3d dataset with usual dimensions of $x,y$ and $z$, the covariance matrix has 3 rows and 3 columns, and the values are this:
$$\mathbf{S} = \quad\begin{pmatrix}cov(x,x)&cov(x,y)&cov(x,z)\cov(y,x)&cov(y,y)&cov(y,z)\cov(z,x)&cov(z,y)&cov(z,z)\end{pmatrix}$$
Step 4: Calculate the eigenvectors and eigenvalues of the covariance matrix
Find the eigenvectors $e^1,....e^M$ of the covariance matrix $\mathbf{S}$.
Shogun's way of doing things :
Step 3 and Step 4 are directly implemented by the PCA preprocessor of Shogun toolbar. The transformation matrix is essentially a $\text{D}$$\times$$\text{M}$ matrix, the columns of which correspond to the eigenvectors of the covariance matrix $(\text{X}\text{X}^\text{T})$ having top $\text{M}$ eigenvalues.
End of explanation
"""
#find out the M eigenvectors corresponding to top M number of eigenvalues and store it in E
#Here M=1
#slope of e1 & e2
m1=e1[1]/e1[0]
m2=e2[1]/e2[0]
#generate the two lines
x1=range(-50,50)
x2=x1
y1=multiply(m1,x1)
y2=multiply(m2,x2)
#plot the data along with those two eigenvectors
figure, axis = subplots(1,1)
xlim(-50, 50)
ylim(-50, 50)
axis.plot(x[:], y[:],'o',color='green', markersize=5, label="green")
axis.plot(x1[:], y1[:], linewidth=0.7, color='black')
axis.plot(x2[:], y2[:], linewidth=0.7, color='blue')
p1 = Rectangle((0, 0), 1, 1, fc="black")
p2 = Rectangle((0, 0), 1, 1, fc="blue")
legend([p1,p2],["1st eigenvector","2nd eigenvector"],loc='center left', bbox_to_anchor=(1, 0.5))
title('Eigenvectors selection')
xlabel("x axis")
_=ylabel("y axis")
"""
Explanation: Step 5: Choosing components and forming a feature vector.
Lets visualize the eigenvectors and decide upon which to choose as the $principle$ $component$ of the data set.
End of explanation
"""
#The eigenvector corresponding to higher eigenvalue(i.e eig_value2) is choosen (i.e e2).
#E is the feature vector.
E=e2
"""
Explanation: In the above figure, the blue line is a good fit of the data. It shows the most significant relationship between the data dimensions.
It turns out that the eigenvector with the $highest$ eigenvalue is the $principle$ $component$ of the data set.
Form the matrix $\mathbf{E}=[\mathbf{e}^1,...,\mathbf{e}^M].$
Here $\text{M}$ represents the target dimension of our final projection
End of explanation
"""
#transform all 2-dimensional feature matrices to target-dimensional approximations.
yn=preprocessor.transform(train_features).get('feature_matrix')
#Since, here we are manually trying to find the eigenvector corresponding to the top eigenvalue.
#The 2nd row of yn is choosen as it corresponds to the required eigenvector e2.
yn1=yn[1,:]
"""
Explanation: Step 6: Projecting the data to its Principal Components.
This is the final step in PCA. Once we have choosen the components(eigenvectors) that we wish to keep in our data and formed a feature vector, we simply take the vector and multiply it on the left of the original dataset.
The lower dimensional representation of each data point $\mathbf{x}^n$ is given by
$\mathbf{y}^n=\mathbf{E}^T(\mathbf{x}^n-\mathbf{m})$
Here the $\mathbf{E}^T$ is the matrix with the eigenvectors in rows, with the most significant eigenvector at the top. The mean adjusted data, with data items in each column, with each row holding a seperate dimension is multiplied to it.
Shogun's way of doing things :
Step 6 can be performed by shogun's PCA preprocessor as follows:
The transformation matrix that we got after $\text{init()}$ is used to transform all $\text{D-dim}$ feature matrices (with $\text{D}$ feature dimensions) supplied, via $\text{apply_to_feature_matrix methods}$.This transformation outputs the $\text{M-Dim}$ approximation of all these input vectors and matrices (where $\text{M}$ $\leq$ $\text{min(D,N)}$).
End of explanation
"""
x_new=(yn1 * E[0]) + tile(mean_x,[n,1]).T[0]
y_new=(yn1 * E[1]) + tile(mean_y,[n,1]).T[0]
"""
Explanation: Step 5 and Step 6 can be applied directly with Shogun's PCA preprocessor (from next example). It has been done manually here to show the exhaustive nature of Principal Component Analysis.
Step 7: Form the approximate reconstruction of the original data $\mathbf{x}^n$
The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\tilde{\mathbf{x}}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$
End of explanation
"""
figure, axis = subplots(1,1)
xlim(-50, 50)
ylim(-50, 50)
axis.plot(x[:], y[:],'o',color='green', markersize=5, label="green")
axis.plot(x_new, y_new, 'o', color='blue', markersize=5, label="red")
title('PCA Projection of 2D data into 1D subspace')
xlabel("x axis")
ylabel("y axis")
#add some legend for information
p1 = Rectangle((0, 0), 1, 1, fc="r")
p2 = Rectangle((0, 0), 1, 1, fc="g")
p3 = Rectangle((0, 0), 1, 1, fc="b")
legend([p1,p2,p3],["normal projection","2d data","1d projection"],loc='center left', bbox_to_anchor=(1, 0.5))
#plot the projections in red:
for i in range(n):
axis.plot([x[i],x_new[i]],[y[i],y_new[i]] , color='red')
"""
Explanation: The new data is plotted below
End of explanation
"""
rcParams['figure.figsize'] = 8,8
#number of points
n=100
#generate the data
a=random.randint(1,20)
b=random.randint(1,20)
c=random.randint(1,20)
d=random.randint(1,20)
x1=random.random_integers(-20,20,n)
y1=random.random_integers(-20,20,n)
z1=-(a*x1+b*y1+d)/c
#generate the noise
noise=random.random_sample([n])*random.random_integers(-30,30,n)
#the normal unit vector is [a,b,c]/magnitude
magnitude=sqrt(square(a)+square(b)+square(c))
normal_vec=array([a,b,c]/magnitude)
#add the noise orthogonally
x=x1+noise*normal_vec[0]
y=y1+noise*normal_vec[1]
z=z1+noise*normal_vec[2]
threeD_obsmatrix=array([x,y,z])
#to visualize the data, we must plot it.
from mpl_toolkits.mplot3d import Axes3D
fig = pyplot.figure()
ax=fig.add_subplot(111, projection='3d')
#plot the noisy data generated by distorting a plane
ax.scatter(x, y, z,marker='o', color='g')
ax.set_xlabel('x label')
ax.set_ylabel('y label')
ax.set_zlabel('z label')
legend([p2],["3d data"],loc='center left', bbox_to_anchor=(1, 0.5))
title('Two dimensional subspace with noise')
xx, yy = meshgrid(range(-30,30), range(-30,30))
zz=-(a * xx + b * yy + d) / c
"""
Explanation: PCA on a 3d data.
Step1: Get some data
We generate points from a plane and then add random noise orthogonal to it. The general equation of a plane is: $$\text{a}\mathbf{x}+\text{b}\mathbf{y}+\text{c}\mathbf{z}+\text{d}=0$$
End of explanation
"""
#convert the observation matrix into dense feature matrix.
train_features = features(threeD_obsmatrix)
#PCA(EVD) is choosen since N=100 and D=3 (N>D).
#However we can also use PCA(AUTO) as it will automagically choose the appropriate method.
preprocessor = sg.transformer('PCA', method='EVD')
#If we set the target dimension to 2, Shogun would automagically preserve the required 2 eigenvectors(out of 3) according to their
#eigenvalues.
preprocessor.put('target_dim', 2)
preprocessor.fit(train_features)
#get the mean for the respective dimensions.
mean_datapoints=preprocessor.get('mean_vector')
mean_x=mean_datapoints[0]
mean_y=mean_datapoints[1]
mean_z=mean_datapoints[2]
"""
Explanation: Step 2: Subtract the mean.
End of explanation
"""
#get the required eigenvectors corresponding to top 2 eigenvalues.
E = preprocessor.get('transformation_matrix')
"""
Explanation: Step 3 & Step 4: Calculate the eigenvectors of the covariance matrix
End of explanation
"""
#This can be performed by shogun's PCA preprocessor as follows:
yn=preprocessor.transform(train_features).get('feature_matrix')
"""
Explanation: Steps 5: Choosing components and forming a feature vector.
Since we performed PCA for a target $\dim = 2$ for the $3 \dim$ data, we are directly given
the two required eigenvectors in $\mathbf{E}$
E is automagically filled by setting target dimension = M. This is different from the 2d data example where we implemented this step manually.
Step 6: Projecting the data to its Principal Components.
End of explanation
"""
new_data=dot(E,yn)
x_new=new_data[0,:]+tile(mean_x,[n,1]).T[0]
y_new=new_data[1,:]+tile(mean_y,[n,1]).T[0]
z_new=new_data[2,:]+tile(mean_z,[n,1]).T[0]
#all the above points lie on the same plane. To make it more clear we will plot the projection also.
fig=pyplot.figure()
ax=fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z,marker='o', color='g')
ax.set_xlabel('x label')
ax.set_ylabel('y label')
ax.set_zlabel('z label')
legend([p1,p2,p3],["normal projection","3d data","2d projection"],loc='center left', bbox_to_anchor=(1, 0.5))
title('PCA Projection of 3D data into 2D subspace')
for i in range(100):
ax.scatter(x_new[i], y_new[i], z_new[i],marker='o', color='b')
ax.plot([x[i],x_new[i]],[y[i],y_new[i]],[z[i],z_new[i]],color='r')
"""
Explanation: Step 7: Form the approximate reconstruction of the original data $\mathbf{x}^n$
The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\tilde{\mathbf{x}}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$
End of explanation
"""
rcParams['figure.figsize'] = 10, 10
import os
def get_imlist(path):
""" Returns a list of filenames for all jpg images in a directory"""
return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.pgm')]
#set path of the training images
path_train=os.path.join(SHOGUN_DATA_DIR, 'att_dataset/training/')
#set no. of rows that the images will be resized.
k1=100
#set no. of columns that the images will be resized.
k2=100
filenames = get_imlist(path_train)
filenames = array(filenames)
#n is total number of images that has to be analysed.
n=len(filenames)
"""
Explanation: PCA Performance
Uptill now, we were using the EigenValue Decomposition method to compute the transformation matrix$\text{(N>D)}$ but for the next example $\text{(N<D)}$ we will be using Singular Value Decomposition.
Practical Example : Eigenfaces
The problem with the image representation we are given is its high dimensionality. Two-dimensional $\text{p} \times \text{q}$ grayscale images span a $\text{m=pq}$ dimensional vector space, so an image with $\text{100}\times\text{100}$ pixels lies in a $\text{10,000}$ dimensional image space already.
The question is, are all dimensions really useful for us?
$\text{Eigenfaces}$ are based on the dimensional reduction approach of $\text{Principal Component Analysis(PCA)}$. The basic idea is to treat each image as a vector in a high dimensional space. Then, $\text{PCA}$ is applied to the set of images to produce a new reduced subspace that captures most of the variability between the input images. The $\text{Pricipal Component Vectors}$(eigenvectors of the sample covariance matrix) are called the $\text{Eigenfaces}$. Every input image can be represented as a linear combination of these eigenfaces by projecting the image onto the new eigenfaces space. Thus, we can perform the identfication process by matching in this reduced space. An input image is transformed into the $\text{eigenspace,}$ and the nearest face is identified using a $\text{Nearest Neighbour approach.}$
Step 1: Get some data.
Here data means those Images which will be used for training purposes.
End of explanation
"""
# we will be using this often to visualize the images out there.
def showfig(image):
imgplot=imshow(image, cmap='gray')
imgplot.axes.get_xaxis().set_visible(False)
imgplot.axes.get_yaxis().set_visible(False)
from PIL import Image
from scipy import misc
# to get a hang of the data, lets see some part of the dataset images.
fig = pyplot.figure()
title('The Training Dataset')
for i in range(49):
fig.add_subplot(7,7,i+1)
train_img=array(Image.open(filenames[i]).convert('L'))
train_img=misc.imresize(train_img, [k1,k2])
showfig(train_img)
"""
Explanation: Lets have a look on the data:
End of explanation
"""
#To form the observation matrix obs_matrix.
#read the 1st image.
train_img = array(Image.open(filenames[0]).convert('L'))
#resize it to k1 rows and k2 columns
train_img=misc.imresize(train_img, [k1,k2])
#since features accepts only data of float64 datatype, we do a type conversion
train_img=array(train_img, dtype='double')
#flatten it to make it a row vector.
train_img=train_img.flatten()
# repeat the above for all images and stack all those vectors together in a matrix
for i in range(1,n):
temp=array(Image.open(filenames[i]).convert('L'))
temp=misc.imresize(temp, [k1,k2])
temp=array(temp, dtype='double')
temp=temp.flatten()
train_img=vstack([train_img,temp])
#form the observation matrix
obs_matrix=train_img.T
"""
Explanation: Represent every image $I_i$ as a vector $\Gamma_i$
End of explanation
"""
train_features = features(obs_matrix)
preprocessor= sg.transformer('PCA', method='AUTO')
preprocessor.put('target_dim', 100)
preprocessor.fit(train_features)
mean=preprocessor.get('mean_vector')
"""
Explanation: Step 2: Subtract the mean
It is very important that the face images $I_1,I_2,...,I_M$ are $centered$ and of the $same$ size
We observe here that the no. of $\dim$ for each image is far greater than no. of training images. This calls for the use of $\text{SVD}$.
Setting the $\text{PCA}$ in the $\text{AUTO}$ mode does this automagically according to the situation.
End of explanation
"""
#get the required eigenvectors corresponding to top 100 eigenvalues
E = preprocessor.get('transformation_matrix')
#lets see how these eigenfaces/eigenvectors look like:
fig1 = pyplot.figure()
title('Top 20 Eigenfaces')
for i in range(20):
a = fig1.add_subplot(5,4,i+1)
eigen_faces=E[:,i].reshape([k1,k2])
showfig(eigen_faces)
"""
Explanation: Step 3 & Step 4: Calculate the eigenvectors and eigenvalues of the covariance matrix.
End of explanation
"""
#we perform the required dot product.
yn=preprocessor.transform(train_features).get('feature_matrix')
"""
Explanation: These 20 eigenfaces are not sufficient for a good image reconstruction. Having more eigenvectors gives us the most flexibility in the number of faces we can reconstruct. Though we are adding vectors with low variance, they are in directions of change nonetheless, and an external image that is not in our database could in fact need these eigenvectors to get even relatively close to it. But at the same time we must also keep in mind that adding excessive eigenvectors results in addition of little or no variance, slowing down the process.
Clearly a tradeoff is required.
We here set for M=100.
Step 5: Choosing components and forming a feature vector.
Since we set target $\dim = 100$ for this $n \dim$ data, we are directly given the $100$ required eigenvectors in $\mathbf{E}$
E is automagically filled. This is different from the 2d data example where we implemented this step manually.
Step 6: Projecting the data to its Principal Components.
The lower dimensional representation of each data point $\mathbf{x}^n$ is given by $$\mathbf{y}^n=\mathbf{E}^T(\mathbf{x}^n-\mathbf{m})$$
End of explanation
"""
re=tile(mean,[n,1]).T[0] + dot(E,yn)
#lets plot the reconstructed images.
fig2 = pyplot.figure()
title('Reconstructed Images from 100 eigenfaces')
for i in range(1,50):
re1 = re[:,i].reshape([k1,k2])
fig2.add_subplot(7,7,i)
showfig(re1)
"""
Explanation: Step 7: Form the approximate reconstruction of the original image $I_n$
The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\mathbf{x}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$
End of explanation
"""
#set path of the training images
path_train=os.path.join(SHOGUN_DATA_DIR, 'att_dataset/testing/')
test_files=get_imlist(path_train)
test_img=array(Image.open(test_files[0]).convert('L'))
rcParams.update({'figure.figsize': (3, 3)})
#we plot the test image , for which we have to identify a good match from the training images we already have
fig = pyplot.figure()
title('The Test Image')
showfig(test_img)
#We flatten out our test image just the way we have done for the other images
test_img=misc.imresize(test_img, [k1,k2])
test_img=array(test_img, dtype='double')
test_img=test_img.flatten()
#We centralise the test image by subtracting the mean from it.
test_f=test_img-mean
"""
Explanation: Recognition part.
In our face recognition process using the Eigenfaces approach, in order to recognize an unseen image, we proceed with the same preprocessing steps as applied to the training images.
Test images are represented in terms of eigenface coefficients by projecting them into face space$\text{(eigenspace)}$ calculated during training. Test sample is recognized by measuring the similarity distance between the test sample and all samples in the training. The similarity measure is a metric of distance calculated between two vectors. Traditional Eigenface approach utilizes $\text{Euclidean distance}$.
End of explanation
"""
#We have already projected our training images into pca subspace as yn.
train_proj = yn
#Projecting our test image into pca subspace
test_proj = dot(E.T, test_f)
"""
Explanation: Here we have to project our training image as well as the test image on the PCA subspace.
The Eigenfaces method then performs face recognition by:
1. Projecting all training samples into the PCA subspace.
2. Projecting the query image into the PCA subspace.
3. Finding the nearest neighbour between the projected training images and the projected query image.
End of explanation
"""
#To get Eucledian Distance as the distance measure use EuclideanDistance.
workfeat = features(mat(train_proj))
testfeat = features(mat(test_proj).T)
RaRb = sg.distance('EuclideanDistance')
RaRb.init(testfeat, workfeat)
#The distance between one test image w.r.t all the training is stacked in matrix d.
d=empty([n,1])
for i in range(n):
d[i]= RaRb.distance(0,i)
#The one having the minimum distance is found out
min_distance_index = d.argmin()
iden=array(Image.open(filenames[min_distance_index]))
title('Identified Image')
showfig(iden)
"""
Explanation: Shogun's way of doing things:
Shogun uses CEuclideanDistance class to compute the familiar Euclidean distance for real valued features. It computes the square root of the sum of squared disparity between the corresponding feature dimensions of two data points.
$\mathbf{d(x,x')=}$$\sqrt{\mathbf{\sum\limits_{i=0}^{n}}|\mathbf{x_i}-\mathbf{x'_i}|^2}$
End of explanation
"""
|
daniel-acuna/python_data_science_intro | notebooks/lab-sentiment_analysis.ipynb | mit | import findspark
findspark.init()
import pyspark
import numpy as np
conf = pyspark.SparkConf().\
setAppName('sentiment-analysis').\
setMaster('local[*]')
from pyspark.sql import SQLContext, HiveContext
sc = pyspark.SparkContext(conf=conf)
sqlContext = HiveContext(sc)
# dataframe functions
from pyspark.sql import functions as fn
"""
Explanation: Download this notebook from https://raw.githubusercontent.com/daniel-acuna/python_data_science_intro/master/notebooks/lab-sentiment_analysis.ipynb
Introduction to Spark ML: An application to Sentiment Analysis
Spark ML
In previous versions of Spark, most Machine Learning funcionality was provided through RDD (Resilient Distributed Datasets). However, to improve performance and communicability of results, Spark developers ported the ML functionality to work almost exclusively with DataFrames. Future releases of Spark will not update the support of ML with RDDs.
In this modern Spark ML approach, there are Estimators and Transformers. Estimators have some parameters that need to be fit into the data. After fitting, Estimators return Transformers. Tranformers can be applied to dataframes, taking one (or several) columns as input and creating (or several) columns as output.
A Pipeline combines several Tranformers with a final Estimator. The Pipeline, therefore, can be fit to the data because the final step of the process (the Estimator) is fit to the data. The result of the fitting is a pipelined Transformer that takes an input dataframe through all the stages of the Pipeline.
There is a third type of functionality that allows to select features.
For example, for analyzing text, a typical pipelined estimator is as follows:
<img src="http://spark.apache.org/docs/latest/img/ml-Pipeline.png" alt="ML Pipeline" style="width: 100%;"/>
After fitting, the Pipeline becomes a transformer:
<img src="http://spark.apache.org/docs/latest/img/ml-PipelineModel.png" alt="ML Model" style="width: 100%;"/>
(Images from http://spark.apache.org/docs/latest/ml-pipeline.html)
Importantly, transformers can be saved and exchanged with other data scientists, improving reproducibility.
Loading packages and connecting to Spark cluster
End of explanation
"""
# Create a RDDs
documents_rdd = sc.parallelize([
[1, 'cats are cute', 0],
[2, 'dogs are playfull', 0],
[3, 'lions are big', 1],
[4, 'cars are fast', 1]])
users_rdd = sc.parallelize([
[0, 'Alice', 20],
[1, 'Bob', 23],
[2, 'Charles', 32]])
"""
Explanation: Introduction to dataframes
A DataFrame is a relatively new addition to Spark that stores a distributed dataset of structured columns. It is very similar to an R dataframe or a RDBS table. All columns are of the same type. A DataFrame can be constructed out of a variety of sources, such as a database, CSV files, JSON files, or a Parquet file (columnar storage). The preferred method for storing dataframes is Parquet due to its speed and compression ratio.
Manipulating a DataFrame
We can create a dataframe from a RDD using the sqlContext.
End of explanation
"""
documents_df = documents_rdd.toDF(['doc_id', 'text', 'user_id'])
users_df = users_rdd.toDF(['user_id', 'name', 'age'])
"""
Explanation: From the previous RDDs, we can call the toDF method and specify the name of columns:
End of explanation
"""
documents_df.printSchema()
users_df.printSchema()
"""
Explanation: Spark will automatically try to guess the column types. We can take a look at those types:
End of explanation
"""
from pyspark.sql import functions as fn
# compute the average age of users
user_age_df = users_df.select(fn.avg('age'))
user_age_df
"""
Explanation: Similar to SQL, we can apply a function to a column or several columns.
End of explanation
"""
user_age_df.show()
"""
Explanation: As you can see, the function is not evaluated until an action (e.g., take, show, collect) is taken
End of explanation
"""
users_df.join(documents_df, on='user_id').show()
"""
Explanation: We can cross (e.g., join) two dataframes ala SQL
End of explanation
"""
users_df.join(documents_df, on='user_id', how='left').show()
"""
Explanation: We can also do outer joins
End of explanation
"""
users_df.join(documents_df, 'user_id', how='left').\
groupby('user_id', 'name').\
agg(fn.count('text')).\
show()
"""
Explanation: We can apply group functions
End of explanation
"""
users_df.join(documents_df, 'user_id', how='left').\
groupby('user_id', 'name').\
agg(fn.count('text').alias('n_pets')).\
show()
"""
Explanation: We can change the name of computed columns:
End of explanation
"""
users_df.withColumn('name_length', fn.length('name')).show()
"""
Explanation: Add columns:
End of explanation
"""
from pyspark.ml.feature import Tokenizer
"""
Explanation: There are many, many types of functions. E.g., see here
Transformers and Estimators
There are several ways of transforming the data from raw input to something that can be analyzed with a statistical model.
Some examples of such transformers are displayed below:
Tokenizer
Suppose that we want to split the words or tokens of a document. This is what Tokenizer does.
End of explanation
"""
# the tokenizer object
tokenizer = Tokenizer().setInputCol('text').setOutputCol('words')
"""
Explanation: Almost all transfomers and estimator require you to specificy the input column of the dataframe and the output column that will be added to the dataframe.
End of explanation
"""
tokenizer.transform(documents_df).show()
"""
Explanation: We can now transform the dataframe
End of explanation
"""
from pyspark.ml.feature import CountVectorizer
"""
Explanation: CountVectorizer
This transformer counts how many times a word appears in a list and produces a vector with such counts. This is very useful for text analysis.
End of explanation
"""
count_vectorizer_estimator = CountVectorizer().setInputCol('words').setOutputCol('features')
"""
Explanation: A CountVectorizer is different from a Tokenizer because it needs to learn how many different tokens there are in the input column. With that number, it will output vectors with consistent dimensions. Therefore, CountVectorizer is an Estimator that, when fitted, returns a Transformer.
End of explanation
"""
count_vectorizer_transformer = count_vectorizer_estimator.fit(tokenizer.transform(documents_df))
"""
Explanation: Now we need to user the words column that generated by the tokenizer transformer
End of explanation
"""
count_vectorizer_transformer.transform(tokenizer.transform(documents_df)).show(truncate=False)
"""
Explanation: which results in:
End of explanation
"""
# list of words in the vocabulary
count_vectorizer_transformer.vocabulary
np.array(count_vectorizer_transformer.vocabulary)[[0, 3, 5]]
"""
Explanation: The column features is a sparse vector representation. For example, for the first document, we have three features present: 0, 3, and 5. By looking at the vocabulary learned by count_vectorizer_transformer, we can know which words those feature indices refer to:
End of explanation
"""
from pyspark.ml import Pipeline
pipeline_cv_estimator = Pipeline(stages=[tokenizer, count_vectorizer_estimator])
pipeline_cv_transformer = pipeline_cv_estimator.fit(documents_df)
pipeline_cv_transformer.transform(documents_df).show()
"""
Explanation: Pipelines
Sometimes, we have long preprocessing steps that take raw data and transform it through several stages. As explained before, these complex transformations can be captured by Pipelines.
Pipelines are always estimators, even when they contain several transformers. After a pipeline is fit to the data, the pipeline becomes an transformer.
We will now define a pipeline that takes the raw text column and produces the features column previously explained
End of explanation
"""
!wget https://github.com/daniel-acuna/python_data_science_intro/blob/master/data/imdb_reviews_preprocessed.parquet.zip?raw=true -O imdb_reviews_preprocessed.parquet.zip && unzip imdb_reviews_preprocessed.parquet.zip && rm imdb_reviews_preprocessed.parquet.zip
!wget https://github.com/daniel-acuna/python_data_science_intro/blob/master/data/sentiments.parquet.zip?raw=true -O sentiments.parquet.zip && unzip sentiments.parquet.zip && rm sentiments.parquet.zip
!wget https://github.com/daniel-acuna/python_data_science_intro/blob/master/data/tweets.parquet.zip?raw=true -O tweets.parquet.zip && unzip tweets.parquet.zip && rm tweets.parquet.zip
"""
Explanation: In more complex scenarios, you can even chain Pipeline transformers. We will see this case in the actual use case below.
For a more detail explanation of Pipelines, Estimators, and Transformers, see here
Download the review, sentiment, and tweet datasets
End of explanation
"""
sentiments_df = sqlContext.read.parquet('sentiments.parquet')
sentiments_df.printSchema()
"""
Explanation: Load sentiment data
End of explanation
"""
# a sample of positive words
sentiments_df.where(fn.col('sentiment') == 1).show(5)
# a sample of negative words
sentiments_df.where(fn.col('sentiment') == -1).show(5)
"""
Explanation: The schema is very simple: for each word, we have whether it is positive (+1) or negative (-1)
End of explanation
"""
sentiments_df.groupBy('sentiment').agg(fn.count('*')).show()
"""
Explanation: Lets see how many of each category we have
End of explanation
"""
imdb_reviews_df = sqlContext.read.parquet('imdb_reviews_preprocessed.parquet')
"""
Explanation: We have almost two times the number of negative words!
A simple approach to sentiment analysis
One simple approach for sentiment analysis is to simple count the number of positive and negative words in a text and then compute the average sentiment. Assuming that positive words are +1 and negative words are -1, we can classify a text as positive if the average sentiment is greater than zero and negative otherwise
To test our approach, we will use a sample of IMDB reviews that were tagged as positive and negative.
Let's load them:
End of explanation
"""
imdb_reviews_df.where(fn.col('score') == 1).first()
"""
Explanation: Let's take a look at a positive review
End of explanation
"""
imdb_reviews_df.where(fn.col('score') == 0).first()
"""
Explanation: And a negative one
End of explanation
"""
from pyspark.ml.feature import RegexTokenizer
"""
Explanation: The first problem that we encounter is that the reviews are in plain text. We need to split the words and then match them to sentiment_df.
To do, we will use a transformation that takes raw text and outputs a list of words
End of explanation
"""
tokenizer = RegexTokenizer().setGaps(False)\
.setPattern("\\p{L}+")\
.setInputCol("review")\
.setOutputCol("words")
"""
Explanation: RegexTokenizer extracts a sequence of matches from the input text. Regular expressions are a powerful tool to extract strings with certain characteristics.
End of explanation
"""
review_words_df = tokenizer.transform(imdb_reviews_df)
print(review_words_df)
"""
Explanation: The pattern \p{L}+ means that it will extract letters without accents (e.g., it will extract "Acuna" from "Acuña"). setGaps means that it will keep applying the rule until it can't extract new words. You have to set the input column from the incoming dataframe (in our case the review column) and the new column that will be added (e.g., words).
We are ready to transform the input dataframe imdb_reviews_df with the tokenizer:
End of explanation
"""
review_words_df.show(5)
"""
Explanation: Applying the transformation doesn't actually do anything until you apply an action. But as you can see, a new column words of type array of string was added by the transformation. We can see how it looks:
End of explanation
"""
review_words_df.select('id', fn.explode('words').alias('word')).show(5)
"""
Explanation: Now, we want to match every word from sentiment_df in the array words shown before. One way of doing this is to explode the column words to create a row for each element in that list. Then, we would join that result with the dataframe sentiment to continue further.
End of explanation
"""
review_word_sentiment_df = review_words_df.\
select('id', fn.explode('words').alias('word')).\
join(sentiments_df, 'word')
review_word_sentiment_df.show(5)
"""
Explanation: Now if we join that with sentiment, we can see if there are positive and negative words in each review:
End of explanation
"""
simple_sentiment_prediction_df = review_word_sentiment_df.\
groupBy('id').\
agg(fn.avg('sentiment').alias('avg_sentiment')).\
withColumn('predicted', fn.when(fn.col('avg_sentiment') > 0, 1.0).otherwise(0.))
simple_sentiment_prediction_df.show(5)
"""
Explanation: Now we can simply average the sentiment per review id and, say, pick positive when the average is above 0, and negative otherwise.
End of explanation
"""
imdb_reviews_df.\
join(simple_sentiment_prediction_df, 'id').\
select(fn.expr('float(score = predicted)').alias('correct')).\
select(fn.avg('correct')).\
show()
"""
Explanation: Now, lets compute the accuracy of our prediction
End of explanation
"""
# we obtain the stop words from a website
import requests
stop_words = requests.get('http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words').text.split()
stop_words[0:10]
from pyspark.ml.feature import StopWordsRemover
sw_filter = StopWordsRemover()\
.setStopWords(stop_words)\
.setCaseSensitive(False)\
.setInputCol("words")\
.setOutputCol("filtered")
"""
Explanation: Not bad with such a simple approach! But can we do better than this?
A data-driven sentiment prediction
There are couple of problems with the previous approach:
1. Positive and negative words had the same weight (e.g., good == amazing)
1. Maybe a couple of negative words make the entire review negative, whereas positive words do not
1. While our dataset is artificially balanced (12500 positive and 12500 negative), there are usually more positive than negative reviews, and therefore we should bias our predictions towards positive ones.
We could use data to estimate the sentiment that each word is contributing to the final sentiment of a review. Given that we are trying to predict negative and positve reviews, then we can use logistic regression for such binary prediction.
From text to numerical features
One typical approach is to count how many times a word appears in the text and then perform a reweighting so that words that are very common are "counted" less.
In Spark, we can achieve this by using several transformers:
Raw text => Tokens => Remove stop words => Term Frequency => Reweighting by Inverse Document frequency
To perform this sequence we will create a Pipeline to consistently represent the steps from raw text to TF-IDF.
First, we need to create a sequence to take from raw text to term frequency. This is necessary because we don't know the number of tokens in the text and therefore we need to estimate such quantity from the data.
End of explanation
"""
from pyspark.ml.feature import CountVectorizer
# we will remove words that appear in 5 docs or less
cv = CountVectorizer(minTF=1., minDF=5., vocabSize=2**17)\
.setInputCol("filtered")\
.setOutputCol("tf")
# we now create a pipelined transformer
cv_pipeline = Pipeline(stages=[tokenizer, sw_filter, cv]).fit(imdb_reviews_df)
# now we can make the transformation between the raw text and the counts
cv_pipeline.transform(imdb_reviews_df).show(5)
"""
Explanation: Finally, for this initial Pipeline, we define a counter vectorizer estimator
End of explanation
"""
from pyspark.ml.feature import IDF
idf = IDF().\
setInputCol('tf').\
setOutputCol('tfidf')
idf_pipeline = Pipeline(stages=[cv_pipeline, idf]).fit(imdb_reviews_df)
idf_pipeline.transform(imdb_reviews_df).show(5)
"""
Explanation: The term frequency vector is represented with a sparse vector. We have 26,384 terms.
Finally, we build another pipeline that takes the output of the previous pipeline and lowers the terms of documents that are very common.
End of explanation
"""
tfidf_df = idf_pipeline.transform(imdb_reviews_df)
"""
Explanation: Therefore, the idf_pipeline takes the raw text from the datafarme imdb_reviews_df and creates a feature vector vector called tfidf!
End of explanation
"""
training_df, validation_df, testing_df = imdb_reviews_df.randomSplit([0.6, 0.3, 0.1], seed=0)
[training_df.count(), validation_df.count(), testing_df.count()]
"""
Explanation: Data science pipeline for estimating sentiments
First, let's split the data into training, validation, and testing.
End of explanation
"""
from pyspark.ml.classification import LogisticRegression
lr = LogisticRegression().\
setLabelCol('score').\
setFeaturesCol('tfidf').\
setRegParam(0.0).\
setMaxIter(100).\
setElasticNetParam(0.)
"""
Explanation: One immediately apparent problem is that the number of features in the dataset is far larger than the number of training examples. This can lead to serious overfitting.
Let's look at this more closely. Let's apply a simple prediction model known as logistic regression.
Logistic regression will take the tfidf features and predict whether the review is positive (score == 1) or negative (score == 0).
End of explanation
"""
lr_pipeline = Pipeline(stages=[idf_pipeline, lr]).fit(training_df)
"""
Explanation: Lets create a pipeline transformation by chaining the idf_pipeline with the logistic regression step (lr)
End of explanation
"""
lr_pipeline.transform(validation_df).\
select(fn.expr('float(prediction = score)').alias('correct')).\
select(fn.avg('correct')).show()
"""
Explanation: Lets estimate the accuracy:
End of explanation
"""
import pandas as pd
vocabulary = idf_pipeline.stages[0].stages[-1].vocabulary
weights = lr_pipeline.stages[-1].coefficients.toArray()
coeffs_df = pd.DataFrame({'word': vocabulary, 'weight': weights})
"""
Explanation: The performance is much better than before.
The problem however is that we are overfitting because we have many features compared to the training examples:
For example, if we look at the weights of the features, there is a lot of noise:
End of explanation
"""
coeffs_df.sort_values('weight').head(5)
"""
Explanation: The most negative words are:
End of explanation
"""
coeffs_df.sort_values('weight', ascending=False).head(5)
"""
Explanation: And the most positive:
End of explanation
"""
idf_pipeline.transform(training_df).\
select('id', fn.explode('words').alias('word')).\
where(fn.col('word') == 'helming').\
join(training_df, 'id').\
first()
"""
Explanation: But none of them make sense. What is happening? We are overfitting the data. Those words that don't make sense are capturing just noise in the reviews.
For example, the word helming appears in only one review:
End of explanation
"""
lambda_par = 0.02
alpha_par = 0.3
en_lr = LogisticRegression().\
setLabelCol('score').\
setFeaturesCol('tfidf').\
setRegParam(lambda_par).\
setMaxIter(100).\
setElasticNetParam(alpha_par)
"""
Explanation: Regularization
One way to prevent overfitting during training is to modify the loss function and penalize weight values that are too large.
There are two major regularization techniques, one based on penalizing the squared value of the weight (called L2 or ridge regularization) and anotherbased on penalizing the absolute value of the weight (called L1 or lasso regularization).
The unregularized logistic regression loss function is:
\begin{equation}
L_\theta(p(X),Y) = - \left( \sum_i Y_i p_\theta(X_i) + (1-Y_i)(1-p_\theta(X_i)) \right)
\end{equation}
where $p_\theta(\cdot)$ is the sigmoid function:
\begin{equation}
p_\theta(X) = \frac{1}{1+\exp(-(\theta_0 + \sum_{j>0} x_j \theta_j))}
\end{equation}
If we modify the loss function $L_\theta$ slightly
\begin{equation}
L_\theta^{\lambda}(p(X),Y) = -\left( \sum_i Y_i p_\theta(X_i) + (1-Y_i)(1-p_\theta(X_i)) \right) + \lambda \sum_{j>0} \theta_j^2
\end{equation}
we obtain what is known as L2 regularization.
Notice how we increase the loss function by $\lambda$ times the square of the weights. In practice, this means that we will think twice about increasing the importance of a feature. This loss function will prevent the algorithm for fitting certain data points, such as outliers or noise, unless the decrease in loss for the data grants it. Also, notice that the penalization doesn't apply to the bias parameter $\theta_0$.
You can see more clearly the effect of such cost function when $\lambda$ goes to infinity: the features will not be used for predicting and only the bias term will matter! This prevents the algorithm from learning altogether, forcing it to underfit!
One problem with L2 regularization is that all weights go to zero uniformly. In a sense, all features will matter but less than with the unregularized loss function. This is a really strange because we do not want all features to matter. In sentiment analysis, we want to select certain features because we want to understand that only some words have effects on the sentiment.
A different modification of the original loss function can achieve this. This regularization is known as L1 or lasso reguarlization and penalizes the absolute value of the weight
\begin{equation}
L_\theta^{\lambda}(p(X),Y) = -\left( \sum_i Y_i p_\theta(X_i) + (1-Y_i)(1-p_\theta(X_i)) \right) + \lambda \sum_{j>0} \left| \theta_j \right|
\end{equation}
The practical effect of L1 regularization is that the difference between a feature having no importance vs some small importance is massively bigger than with L2 regularization. Therefore, optimizing the L1 loss function usually brings some features to have exactly zero weight.
One problem with L1 regularization is that it will never select more features that the number of examples. This is because it can always fit the training data perfectly when the number of features equals the number of examples. In our sentimental analysis, this is the case (there are more words than examples).
One way of remedying this is to have a combination of both L1 and L2. This is known as elastic net regularization. For this type of regularization, we have to pick a parameter ($\alpha$) deciding to consider L1 vs L2 regularization. If $\alpha=0$, then we choose L2, and if $\alpha=1$ we choose L1. For example, $\alpha=0.5$ means half L1 and half L2.
\begin{equation}
L_\theta^{\lambda,\alpha}(p(X),Y) = -\left( \sum_i Y_i p_\theta(X_i) + (1-Y_i)(1-p_\theta(X_i)) \right) + \lambda \left[(1-\alpha) \sum_{j>0} \theta_j^2 + \alpha \sum_{j>0} \left| \theta_j \right| \right]
\end{equation}
Unfortunately, elastic net regularization comes with two additional parameters, $\lambda$ and $\alpha$, and we must either select them a priori or used the validation set to choose the best one.
Spark allows to fit elatic net regularization easily
End of explanation
"""
en_lr_pipeline = Pipeline(stages=[idf_pipeline, en_lr]).fit(training_df)
"""
Explanation: And we define a new Pipeline
End of explanation
"""
en_lr_pipeline.transform(validation_df).select(fn.avg(fn.expr('float(prediction = score)'))).show()
"""
Explanation: Let's look at the performance
End of explanation
"""
en_weights = en_lr_pipeline.stages[-1].coefficients.toArray()
en_coeffs_df = pd.DataFrame({'word': vocabulary, 'weight': en_weights})
"""
Explanation: We improve performance slightly, but whats more important is that we improve the understanding of the word sentiments. Lets take at the weights:
End of explanation
"""
en_coeffs_df.sort_values('weight').head(15)
"""
Explanation: The most negative words all make sense ("worst" is actually more negative than than "worse")!
End of explanation
"""
en_coeffs_df.sort_values('weight', ascending=False).head(15)
"""
Explanation: Same thing with positive words
End of explanation
"""
en_coeffs_df.query('weight == 0.0').shape
"""
Explanation: Are there words with literarily zero importance for predicting sentiment? Yes, and most of them!
End of explanation
"""
en_coeffs_df.query('weight == 0.0').shape[0]/en_coeffs_df.shape[0]
"""
Explanation: In fact, more than 95% of features are not needed to achieve a better performance than all previous models!
End of explanation
"""
en_coeffs_df.query('weight == 0.0').head(15)
"""
Explanation: Let's look at these neutral words
End of explanation
"""
from pyspark.ml.tuning import ParamGridBuilder
"""
Explanation: But, did we choose the right $\lambda$ and $\alpha$ parameters? We should run an experiment where we try different combinations of them. Fortunately, Spark let us do this by using a grid - a method that generates combination of parameters.
End of explanation
"""
en_lr_estimator = Pipeline(stages=[idf_pipeline, en_lr])
grid = ParamGridBuilder().\
addGrid(en_lr.regParam, [0., 0.01, 0.02]).\
addGrid(en_lr.elasticNetParam, [0., 0.2, 0.4]).\
build()
"""
Explanation: We need to build a new estimator pipeline
End of explanation
"""
grid
all_models = []
for j in range(len(grid)):
print("Fitting model {}".format(j+1))
model = en_lr_estimator.fit(training_df, grid[j])
all_models.append(model)
# estimate the accuracy of each of them:
accuracies = [m.\
transform(validation_df).\
select(fn.avg(fn.expr('float(score = prediction)')).alias('accuracy')).\
first().\
accuracy for m in all_models]
import numpy as np
best_model_idx = np.argmax(accuracies)
"""
Explanation: This is the list of parameters that we will try:
End of explanation
"""
grid[best_model_idx]
best_model = all_models[best_model_idx]
accuracies[best_model_idx]
"""
Explanation: So the best model we found has the following parameters
End of explanation
"""
tweets_df = sqlContext.read.parquet('tweets.parquet')
tweets_df.show(5, truncate=False)
"""
Explanation: Finally, predicting tweet sentiments
Now we can use this model to predict sentiments on Twitter
End of explanation
"""
tweets_df.groupby('handle').agg(fn.count('*')).show()
"""
Explanation: We have 1K tweets from each candidate
End of explanation
"""
best_model.transform(tweets_df.withColumnRenamed('text', 'review')).select('review', 'prediction').show()
"""
Explanation: We can now predict the sentiment of the Tweet using our best model, we need to rename the column so that it matches our previous pipeline (review => ...)
End of explanation
"""
%matplotlib inline
import seaborn
sentiment_pd = best_model.\
transform(tweets_df.withColumnRenamed('text', 'review')).\
groupby('handle').\
agg(fn.avg('prediction').alias('prediction'),
(2*fn.stddev('prediction')/fn.sqrt(fn.count('*'))).alias('err')).\
toPandas()
sentiment_pd.head()
sentiment_pd.plot(x='handle', y='prediction', xerr='err', kind='barh');
"""
Explanation: Now, lets summarize our results in a graph!
End of explanation
"""
best_model.\
transform(tweets_df.withColumnRenamed('text', 'review')).\
where(fn.col('handle') == '@realDonaldTrump').\
where(fn.col('prediction') == 0).\
select('review').\
take(5)
"""
Explanation: But let's examine some "negative" tweets by Trump
End of explanation
"""
best_model.\
transform(tweets_df.withColumnRenamed('text', 'review')).\
where(fn.col('handle') == '@HillaryClinton').\
where(fn.col('prediction') == 0).\
select('review').\
take(5)
"""
Explanation: And Clinton
End of explanation
"""
from pyspark.sql import types
def probability_positive(probability_column):
return float(probability_column[1])
func_probability_positive = fn.udf(probability_positive, types.DoubleType())
prediction_probability_df = best_model.transform(validation_df).\
withColumn('probability_positive', func_probability_positive('probability')).\
select('id', 'review', 'score', 'probability_positive')
prediction_probability_df.show()
"""
Explanation: As you can see, there are lots of room for improvement.
Part 2: Test yourself
From the IMDB dataframe (imdb_reviews_df), compute the average review length between positive and negative reviews. Hint: use the spark sql function length. In particular, as we imported the funcions with the name fn (using from pyspark.sql import function as fn), use fn.length with the name of the column.
In the IMDB review database, are positive reviews longer than negative reviews?
Using the sentiment dataframe sentiments_df, find the imdb reviews with the most number of negative words. Hint: You need to tokenize the review field in imdb_review_df and then join with sentiments_df. Finally, perform selection and summary query
Similar to 3, find the imdb review with the most number of positive words.
Part 3: On our own
1) Using the best model fitted (best_model), estimate the generalization error in the testing set (testing_df)
2) One way of analyzing what is wrong with a model is to examine when they fail the hardest. In our case, we could do this by looking at cases in which logistic regression is predicting with high probability a positive sentiment when in fact the actual sentiment is negative.
To extract the probability of positive sentiment, however, we must extract it from the prediction with a custom function.
End of explanation
"""
|
kdmurray91/kwip-experiments | writeups/coalescent/50reps_2016-05-18/50reps.ipynb | mit | expts = list(map(lambda fp: path.basename(fp.rstrip('/')), glob('data/*/')))
print("Number of replicate experiments:", len(expts))
def process_expt(expt):
expt_results = []
def extract_info(filename):
return re.search(r'kwip/(\d\.?\d*)x-(0\.\d+)-(wip|ip).dist', filename).groups()
# dict of scale: distance matrix, populated as we go
truths = {}
for distfile in glob("data/{}/kwip/*.dist".format(expt)):
cov, scale, metric = extract_info(distfile)
if scale not in truths:
genome_dist_path = 'data/{ex}/all_genomes-{sc}.dist'.format(ex=expt, sc=scale)
truths[scale] = load_sample_matrix_to_runs(genome_dist_path)
exptmat = DistanceMatrix.read(distfile)
rho = distmat_corr(truths[scale], exptmat, stats.spearmanr).correlation
expt_results.append({
"coverage": cov,
"scale": scale,
"metric": metric,
"rho": rho,
"seed": expt,
})
return expt_results
#process_expt('3662')
results = []
for res in map(process_expt, expts):
results.extend(res)
results = pd.DataFrame(results)
"""
Explanation: Calculate performance of kWIP
The next bit of python code calculates the performance of kWIP against the distance between samples calulcated from the alignments of their genomes.
This code caluclates spearman's $\rho$ between the off-diagonal elements of the triagnular distance matrices.
End of explanation
"""
%%R -i results
results$coverage = as.numeric(as.character(results$coverage))
results$scale = as.numeric(as.character(results$scale))
print(summary(results))
str(results)
"""
Explanation: Statistical analysis
Is done is R, as that's easier.
Below we see a summary and structure of the data
End of explanation
"""
%%R
ggplot(results, aes(x=coverage, y=scale)) +
geom_point() +
scale_x_log10() +
scale_y_log10() +
theme_bw()
"""
Explanation: Experiment design
Below we see the design of the experiment in terms of the two major variables.
We have a series (vertically) that, at 30x coverage, looks at the effect of genetic variation on performance. There is a second series that examines the effect of coverage at an average pairwise genetic distance of 0.001.
There are 100 replicates for each data point, performed as a separate bootstrap across the random creation of the tree and sampling of reads etc.
End of explanation
"""
%%R
dat = results %>%
filter(scale==0.001, coverage<=30) %>%
select(rho, metric, coverage)
dat$coverage = as.factor(dat$coverage)
ggplot(dat, aes(x=coverage, y=rho, fill=metric)) +
geom_boxplot(aes(fill=metric))
%%R
# AND AGAIN WITHOUT SUBSETTING
dat = results %>%
filter(scale==0.001) %>%
select(rho, metric, coverage)
dat$coverage = as.factor(dat$coverage)
ggplot(dat, aes(x=coverage, y=rho, fill=metric)) +
geom_boxplot(aes(fill=metric)) +
theme_bw()
%%R
dat = subset(results, scale==0.001, select=-scale)
ggplot(dat, aes(x=coverage, y=rho, colour=seed, linetype=metric)) +
geom_line() +
scale_x_log10()
%%R
summ = results %>%
filter(scale==0.001) %>%
select(-scale) %>%
group_by(coverage, metric) %>%
summarise(rho_av=mean(rho), rho_err=sd(rho))
p = ggplot(summ, aes(x=coverage, y=rho_av, ymin=rho_av-rho_err, ymax=rho_av+rho_err, group=metric)) +
geom_line(aes(linetype=metric)) +
geom_ribbon(aes(fill=metric), alpha=0.2) +
xlab('Genome Coverage') +
ylab(expression(paste("Spearman's ", rho, " +- SD"))) +
#scale_x_log10()+
#ggtitle("Performance of WIP & IP") +
theme_bw()
pdf("coverage-vs-rho_full.pdf",width=7, height=4)
print(p)
dev.off()
p
%%R
summ = results %>%
filter(scale==0.001, coverage <= 50) %>%
select(-scale) %>%
group_by(coverage, metric) %>%
summarise(rho_av=mean(rho), rho_err=sd(rho))
p = ggplot(summ, aes(x=coverage, y=rho_av, ymin=rho_av-rho_err, ymax=rho_av+rho_err, group=metric)) +
geom_line(aes(linetype=metric)) +
geom_ribbon(aes(fill=metric), alpha=0.2) +
xlab('Genome Coverage') +
ylab(expression(paste("Spearman's ", rho, " +- SD"))) +
#scale_x_log10()+
#ggtitle("Performance of WIP & IP") +
theme_bw()
pdf("coverage-vs-rho_50x.pdf",width=5, height=4)
print(p)
dev.off()
p
%%R
sem <- function(x) sqrt(var(x,na.rm=TRUE)/length(na.omit(x)))
summ = results %>%
filter(scale==0.001) %>%
select(-scale) %>%
group_by(coverage, metric) %>%
summarise(rho_av=mean(rho), rho_err=sem(rho))
ggplot(summ, aes(x=coverage, y=rho_av, ymin=rho_av-rho_err, ymax=rho_av+rho_err, group=metric)) +
geom_line(aes(linetype=metric)) +
geom_ribbon(aes(fill=metric), alpha=0.2) +
xlab('Genome Coverage') +
ylab(expression(paste("Spearman's ", rho))) +
scale_x_log10()+
theme_bw()
%%R
cov_diff = results %>%
filter(scale==0.001) %>%
select(rho, metric, coverage, seed) %>%
spread(metric, rho) %>%
mutate(diff=wip-ip) %>%
select(coverage, seed, diff)
print(summary(cov_diff))
p = ggplot(cov_diff, aes(x=coverage, y=diff, colour=seed)) +
geom_line() +
scale_x_log10() +
ggtitle("Per expt difference in performance (wip - ip)")
print(p)
summ = cov_diff %>%
group_by(coverage) %>%
summarise(diff_av=mean(diff), diff_sd=sd(diff))
ggplot(summ, aes(x=coverage, y=diff_av, ymin=diff_av-diff_sd, ymax=diff_av+diff_sd)) +
geom_line() +
geom_ribbon(alpha=0.2) +
xlab('Genome Coverage') +
ylab(expression(paste("Improvment in Spearman's ", rho, " (wip - IP)"))) +
scale_x_log10() +
theme_bw()
%%R
var = results %>%
filter(coverage == 10, scale <= 0.05) %>%
select(metric, rho, scale)
var$scale = as.factor(as.character(var$scale))
str(var)
ggplot(var, aes(x=scale, y=rho, fill=metric)) +
geom_boxplot(aes(fill=metric)) +
xlab('Mean pairwise variation') +
ylab(expression(paste("Spearman's ", rho))) +
theme_bw()
%%R
summ = results %>%
filter(coverage == 10, scale <= 0.04) %>%
select(-coverage) %>%
group_by(scale, metric) %>%
summarise(rho_av=mean(rho), rho_sd=sd(rho))
str(summ)
p = ggplot(summ, aes(x=scale, y=rho_av, ymin=rho_av-rho_sd, ymax=rho_av+rho_sd, group=metric)) +
geom_line(aes(linetype=metric)) +
geom_ribbon(aes(fill=metric), alpha=0.2) +
xlab(expression(paste('Mean pairwise variation (', pi, ')'))) +
ylab(expression(paste("Spearman's ", rho, " +- SD"))) +
scale_x_log10()+
theme_bw()
pdf("pi-vs-performance.pdf",width=5, height=4)
print(p)
dev.off()
p
%%R
var_diff = results %>%
filter(coverage==10) %>%
select(rho, metric, scale, seed) %>%
spread(metric, rho) %>%
mutate(diff=wip-ip) %>%
select(scale, seed, diff)
summ_var_diff = var_diff %>%
group_by(scale) %>%
summarise(diff_av=mean(diff), diff_sd=sd(diff))
%%R
p = ggplot(var_diff, aes(x=scale, y=diff, colour=seed)) +
geom_line() +
scale_x_log10() +
ggtitle("Per expt difference in performance (wip - ip)")
print(p)
%%R
ggplot(summ_var_diff, aes(x=scale, y=diff_av, ymin=diff_av-diff_sd, ymax=diff_av+diff_sd)) +
geom_line() +
geom_ribbon(alpha=0.2) +
xlab('Average variants/site') +
ylab(expression(paste("Improvment in Spearman's ", rho, " (wip - IP)"))) +
scale_x_log10() +
theme_bw()
"""
Explanation: Effect of Coverage
Here we show the spread of data across the 100 reps as boxplots per metric and covreage level.
I note that the weighted product seems slightly more variable, particularly at higher coverage. Though the median is nearly always higher
End of explanation
"""
|
ueapy/enveast_python_course_materials | Day_3/22-Final-Project.ipynb | mit | # import pandas as pd
# df = pd.read_csv('../data/earthquakes_2015_2016_gt45.csv', parse_dates = ['time',], index_col='time')
# df.head()
"""
Explanation: Final Micro Project
The time has come to apply what you have learned throughout the course by doing a micro project.
You have two options now.
Choose from our list of projects
Significant Earthquakes
World Ocean Atlas
Arctic Sea Ice
Other
Use your own data
If you find our ideas terrible or you are eager to start using Python in your work, you can use your own data.
The only requirement is that data should be in a format that we have used in this course, preferably CSV/ASCII or netCDF file.
We might not be very helpful, but at least we can help you get started and/or point you to a relevant resource.
Significant Earthquakes
US Geological Survey (USGS) provides various earthquakes data on a global scale. Its Earthquake Catalog contains earthquake source parameters (e.g. hypocenters, magnitudes, phase picks and amplitudes) and other products (e.g. moment tensor solutions, macroseismic information, tectonic summaries, maps) produced by contributing seismic networks.
If you follow this link, you can search throught the catalog and filter data by the magnitude, time and geographic region. In the data/ folder, we provide an example dataset of earthquakes with magnitude >4.5 that occurred around the world throughout the last year.
So if you want to build your project on these data, some possible ideas are:
* pandas package will be most useful to read in the data, as well as analyse them
* Use cartopy or basemap to plot the data using longitude and latitude columns
* Explore pandas' groupby() method, which you can use to aggregate data by time or other parameter
* Create a histogram of earthquakes magnitude
To get you started, we provided the minimal code to load the data.
End of explanation
"""
# import cartopy.crs as ccrs
# import matplotlib.pyplot as plt
# import xarray as xr
# %matplotlib inline
# ds = xr.open_mfdataset('../data/seaice_conc_monthly_*.nc')
## or
# ds1 = xr.open_dataset('../data/seaice_conc_monthly_nh_f08_199109_v02r00.nc')
# ds2 = xr.open_dataset('../data/seaice_conc_monthly_nh_f17_201209_v02r00.nc')
## Extract longitude and latitude values, then the sea ice concentration itself
## Code for creating a map
# fig = plt.figure()
# ax = fig.add_subplot(111, projection=ccrs.???(central_longitude=0))
# ax.coastlines(resolution='110m', linewidth=0.5)
# ax.gridlines()
# ax.set_extent([-180, 180, 40, 90], crs=ccrs.PlateCarree())
"""
Explanation: World Ocean Atlas
Inspired by this blog post: https://ocefpaf.github.io/python4oceanographers/blog/2015/05/04/woa13/
NOAA's World Ocean Atlas provides open-access gridded data of temperature, salinity and other ocean parameters in netCDF format.
It is a set of objectively analyzed (1$^\circ$ grid) climatological fields at standard depth levels for annual, seasonal, and monthly compositing periods. It also includes associated statistical fields of observed oceanographic profile data.
If you choose to analyse these data, we recommend that you start by:
downloading 5-degree data of temperature and oxygen
plotting the data on the global map (do not use jet/rainbow colormap!)
calculating an average depth profile and plotting it beside the map
Arctic Sea Ice
Data
In this project you are offered to use NOAA/NSIDC Climate Data Record of Passive Microwave Sea Ice Concentration.
In the ../data/ directory, there are 2 netCDF files seaice_conc_monthly* that correspond to September 1991 (original FTP link) and September 2012 (original FTP link).
If you want to download data for other months, visit the NSIDC's data portal.
Ideas for the project
Plot one of the time slices on a map with North Polar Stereographic projection
Create a figure with 3 subplots
Plot the 1991 sea ice concentration in the 1st subplot, 2012 sea ice in the 2nd, and the difference in the 3rd.
Getting started
For this project, we recommend that you:
* use xarray for opening and reading the netCDF files
* may use xarray.open_mf_dataset() to load both files at once
* use cartopy for creating a plot with a correct map projection
* use appropriate colormaps for the sea ice concentration and difference
To get started, copy the following cell into your Project notebook.
End of explanation
"""
|
pyReef-model/wavesed | wavesed2.ipynb | gpl-3.0 | file1='../data/gbr_south.csv'
file2='../data/topoGBR1000.csv'
# Bathymetric filename
bfile = file1
# Resolution factor
rfac = 4
"""
Explanation: Definition of model variables
Model domain / grid parameters
End of explanation
"""
# Wave heights (m)
H0 = [2,3,2]
# Define wave source direction at boundary
# (angle in degrees counterclock wise from horizontal axis)
dir = [300,0,90]
# Percentage of each wave scenario activity (in %)
perc = [3,3,4]
# Maximum depth for wave influence (m)
wbase = 20
# Sea level position (m)
slvl = 0.
"""
Explanation: Definition of wave parameters
End of explanation
"""
# Mean grain size diameter in m
d50 = 0.0001
# Steps used to perform sediment transport
tsteps = 1000
# Steps used to perform sediment diffusion
dsteps = 1000
"""
Explanation: Definition of sediment parameters
End of explanation
"""
#help(ocean.runWaveSed)
avewH,avewS,aveDZ,sim = ocean.runWaveSed(bfile,rfac,H0,dir,perc,
wbase,slvl,d50,tsteps,
dsteps,size = (10,40))
"""
Explanation: Running wavesed
Here we use a unique function that computes wave and sediment evolution for a set of multiple input forcing conditions
End of explanation
"""
size = (20,40)
# i1 = 0
# i2 = -1
# j1 = 0
# j2 = -1
# Zooming to a specific region
i1 = 600
i2 = 1200
j1 = 0
j2 = 500
fig = plt.figure(figsize=size)
ax = plt.gca()
ax.set_title('Erosion/deposition (m)', fontsize=10)
im = ax.imshow(np.flipud(aveDZ[i1:i2,j1:j2].T),interpolation='nearest',
cmap=cmo.cm.balance,vmin=-0.25, vmax=0.25)
ax.contour(np.flipud(sim.regZ[i1:i2,j1:j2].T-sim.sealvl), 0,
colors='k', linewidths=2)
divider1 = make_axes_locatable(ax)
cax1 = divider1.append_axes("right", size="5%", pad=0.05)
cbar1 = plt.colorbar(im,cax=cax1)
plt.tight_layout()
plt.show()
"""
Explanation: Plotting combined evolution
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.