repo_name stringlengths 6 77 | path stringlengths 8 215 | license stringclasses 15
values | content stringlengths 335 154k |
|---|---|---|---|
paris-saclay-cds/python-workshop | Day_2_Software_engineering_best_practices/solutions/03_code_style.ipynb | bsd-3-clause | def read_spectra(path_csv):
"""Read and parse data in pandas DataFrames.
Parameters
----------
path_csv : str
Path to the CSV file to read.
Returns
-------
spectra : pandas DataFrame, shape (n_spectra, n_freq_point)
DataFrame containing all Raman spectra.
concentration : pandas Series, shape (n_spectra,)
Series containing the concentration of the molecule.
molecule : pandas Series, shape (n_spectra,)
Series containing the type of chemotherapeutic agent.
"""
if not isinstance(path_csv, six.string_types):
raise TypeError("'path_csv' needs to be string. Got {}"
" instead.".format(type(path_csv)))
else:
if not path_csv.endswith('.csv'):
raise ValueError('Wrong file format. Expecting csv file')
data = pd.read_csv(path_csv)
concentration = data['concentration']
molecule = data['molecule']
spectra_string = data['spectra']
spectra = []
for spec in spectra_string:
# remove the first and last bracket and convert to a numpy array
spectra.append(np.fromstring(spec[1:-1], sep=','))
spectra = pd.DataFrame(spectra)
return spectra, concentration, molecule
# read the frequency and get a pandas serie
frequency = pd.read_csv('data/freq.csv')['freqs']
# read all data for training
filenames = ['data/spectra_{}.csv'.format(i)
for i in range(4)]
spectra, concentration, molecule = [], [], []
for filename in filenames:
spectra_file, concentration_file, molecule_file = read_spectra(filename)
spectra.append(spectra_file)
concentration.append(concentration_file)
molecule.append(molecule_file)
# Concatenate in single DataFrame and Serie
spectra = pd.concat(spectra)
concentration = pd.concat(concentration)
molecule = pd.concat(molecule)
"""
Explanation: IO: Reading and preprocess the data
We can define a function which will read the data and process them.
End of explanation
"""
def _apply_axis_layout(ax, title):
"""Apply despine style and add labels to axis."""
ax.set_xlabel('Frequency')
ax.set_ylabel('Intensity')
ax.set_title(title)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
def plot_spectra(frequency, spectra, title=''):
"""Plot a bunch of Raman spectra.
Parameters
----------
frequency : pandas Series, shape (n_freq_points,)
Frequencies for which the Raman spectra were acquired.
spectra : pandas DataFrame, shape (n_spectra, n_freq_points)
DataFrame containing all Raman spectra.
title : str
Title added to the plot.
Returns
-------
None
"""
fig, ax = plt.subplots()
ax.plot(frequency, spectra.T)
_apply_axis_layout(ax, title)
return fig, ax
def plot_spectra_by_type(frequency, spectra, classes, title=''):
"""Plot mean spectrum with its variance for a given class.
Parameters
----------
frequency : pandas Series, shape (n_freq_points,)
Frequencies for which the Raman spectra were acquired.
spectra : pandas DataFrame, shape (n_spectra, n_freq_points)
DataFrame containing all Raman spectra.
classes : array-like, shape (n_classes,)
Array contining the different spectra class which will be plotted.
title : str
Title added to the plot.
Returns
-------
None
"""
fig, ax = plt.subplots()
for label in np.unique(classes):
label_index = np.flatnonzero(classes == label)
spectra_mean = np.mean(spectra.iloc[label_index], axis=0)
spectra_std = np.std(spectra.iloc[label_index], axis=0)
ax.plot(frequency, spectra_mean, label=label)
ax.fill_between(frequency,
spectra_mean + spectra_std,
spectra_mean - spectra_std,
alpha=0.2)
_apply_axis_layout(ax, title)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
return fig, ax
fig, ax = plot_spectra(frequency, spectra, 'All training spectra')
fig, ax = plot_spectra_by_type(frequency, spectra, molecule)
ax.set_title('Mean spectra in function of the molecules')
fig, ax = plot_spectra_by_type(frequency, spectra, concentration,
'Mean spectra in function of the concentrations')
"""
Explanation: Plot helper functions
We can create two functions: (i) to plot all spectra and (ii) plot the mean spectra with the std intervals.
We will make a "private" function which will be used by both plot types.
End of explanation
"""
spectra_test, concentration_test, molecule_test = read_spectra('data/spectra_4.csv')
plot_spectra(frequency, spectra_test,
'All training spectra')
plot_spectra_by_type(frequency, spectra_test, molecule_test,
'Mean spectra in function of the molecules')
plot_spectra_by_type(frequency, spectra_test, concentration_test,
'Mean spectra in function of the concentrations');
"""
Explanation: Reusability for new data:
End of explanation
"""
def plot_cm(cm, classes, title):
"""Plot a confusion matrix.
Parameters
----------
cm : ndarray, shape (n_classes, n_classes)
Confusion matrix.
classes : array-like, shape (n_classes,)
Array contining the different spectra classes used in the
classification problem.
title : str
Title added to the plot.
Returns
-------
None
"""
fig, ax = plt.subplots()
plt.imshow(cm, interpolation='nearest', cmap='bwr')
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return fig, ax
for clf in [RandomForestClassifier(random_state=0),
LinearSVC(random_state=0)]:
pipeline = make_pipeline(StandardScaler(),
PCA(n_components=100, random_state=0),
clf)
y_pred = pipeline.fit(spectra, molecule).predict(spectra_test)
plot_cm(confusion_matrix(molecule_test, y_pred),
pipeline.classes_,
'Confusion matrix using {}'.format(clf.__class__.__name__))
print('Accuracy score: {0:.2f}'.format(pipeline.score(spectra_test,
molecule_test)))
"""
Explanation: Training and testing a machine learning model for classification
End of explanation
"""
def plot_regression(y_true, y_pred, title):
"""Plot actual vs. predicted scatter plot.
Parameters
----------
y_true : array-like, shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like, shape (n_samples,)
Estimated targets as returned by a regressor.
title : str
Title added to the plot.
Returns
-------
None
"""
fig, ax = plt.subplots()
ax.scatter(y_true, y_pred)
ax.plot([0, 25000], [0, 25000], '--k')
ax.set_ylabel('Target predicted')
ax.set_xlabel('True Target')
ax.set_title(title)
ax.text(1000, 20000, r'$R^2$=%.2f, MAE=%.2f' % (
r2_score(y_true, y_pred), median_absolute_error(y_true, y_pred)))
ax.set_xlim([0, 25000])
ax.set_ylim([0, 25000])
return fig, ax
def regression_experiment(X_train, X_test, y_train, y_test):
"""Perform regression experiment.
Build a pipeline using PCA and either a Ridge
or a RandomForestRegressor model.
Parameters
----------
X_train : pandas DataFrame, shape (n_spectra, n_freq_points)
DataFrame containing training Raman spectra.
X_test : pandas DataFrame, shape (n_spectra, n_freq_points)
DataFrame containing testing Raman spectra.
y_training : pandas Serie, shape (n_spectra,)
Serie containing the training concentrations acting as targets.
y_testing : pandas Serie, shape (n_spectra,)
Serie containing the testing concentrations acting as targets.
Returns
-------
None
"""
for reg in [RidgeCV(), RandomForestRegressor(random_state=0)]:
pipeline = make_pipeline(PCA(n_components=100), reg)
y_pred = pipeline.fit(X_train, y_train).predict(X_test)
plot_regression(y_test, y_pred,
'Regression using {}'.format(reg.__class__.__name__))
regression_experiment(spectra, spectra_test,
concentration, concentration_test)
def fit_params(data):
"""Compute statistics for robustly scale data.
Compute the median and the variance, i.e. the difference
between the 75th and 25th percentiles.
These statistics are used later to scale data.
Parameters
----------
data : pandas DataFrame, shape (n_spectra, n_freq_point)
DataFrame containing all Raman spectra.
Returns
-------
median : ndarray, shape (n_freq_point,)
Median for each wavelength.
variance : ndarray, shape (n_freq_point,)
Variance (difference between the 75th and 25th
percentiles) for each wavelength.
"""
median = np.median(data, axis=0)
percentile_25 = np.percentile(data, 25, axis=0)
percentile_75 = np.percentile(data, 75, axis=0)
return median, (percentile_75 - percentile_25)
def transform(data, median, var_25_75):
"""Scale data using robust estimators.
Scale the data by subtracting the median and dividing by the
variance, i.e. the difference between the 75th and 25th percentiles.
Parameters
----------
data : pandas DataFrame, shape (n_spectra, n_freq_point)
DataFrame containing all Raman spectra.
median : ndarray, shape (n_freq_point,)
Median for each wavelength.
var_25_75 : ndarray, shape (n_freq_point,)
Variance (difference between the 75th and 25th
percentiles) for each wavelength.
Returns
-------
data_scaled : pandas DataFrame, shape (n_spectra, n_freq_point)
DataFrame containing all scaled Raman spectra.
"""
return (data - median) / var_25_75
# compute the statistics on the training data
med, var = fit_params(spectra)
# transform the training and testing data
spectra_scaled = transform(spectra, med, var)
spectra_test_scaled = transform(spectra_test, med, var)
regression_experiment(spectra_scaled, spectra_test_scaled,
concentration, concentration_test)
"""
Explanation: Training and testing a machine learning model for regression
End of explanation
"""
|
vsingla2/Self-Driving-Car-NanoDegree-Udacity | Term1-Computer-Vision-and-Deep-Learning/Project1-Finding-Lane-Lines/P1.ipynb | mit | #importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
"""
Explanation: Self-Driving Car Engineer Nanodegree
Project: Finding Lane Lines on the Road
In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a write up template that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the rubric points for this project.
Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".
The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.
<figure>
<img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
</figcaption>
</figure>
<p></p>
<figure>
<img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
</figcaption>
</figure>
Run the cell below to import some packages. If you get an import error for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, see this forum post for more troubleshooting tips.
Import Packages
End of explanation
"""
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
"""
Explanation: Read in an Image
End of explanation
"""
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def rgbtohsv(img):
"Applies rgb to hsv transform"
return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[200, 0, 0], thickness = 10):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
x_left = []
y_left = []
x_right = []
y_right = []
imshape = image.shape
ysize = imshape[0]
ytop = int(0.6*ysize) # need y coordinates of the top and bottom of left and right lane
ybtm = int(ysize) # to calculate x values once a line is found
for line in lines:
for x1,y1,x2,y2 in line:
slope = float(((y2-y1)/(x2-x1)))
if (slope > 0.5): # if the line slope is greater than tan(26.52 deg), it is the left line
x_left.append(x1)
x_left.append(x2)
y_left.append(y1)
y_left.append(y2)
if (slope < -0.5): # if the line slope is less than tan(153.48 deg), it is the right line
x_right.append(x1)
x_right.append(x2)
y_right.append(y1)
y_right.append(y2)
# only execute if there are points found that meet criteria, this eliminates borderline cases i.e. rogue frames
if (x_left!=[]) & (x_right!=[]) & (y_left!=[]) & (y_right!=[]):
left_line_coeffs = np.polyfit(x_left, y_left, 1)
left_xtop = int((ytop - left_line_coeffs[1])/left_line_coeffs[0])
left_xbtm = int((ybtm - left_line_coeffs[1])/left_line_coeffs[0])
right_line_coeffs = np.polyfit(x_right, y_right, 1)
right_xtop = int((ytop - right_line_coeffs[1])/right_line_coeffs[0])
right_xbtm = int((ybtm - right_line_coeffs[1])/right_line_coeffs[0])
cv2.line(img, (left_xtop, ytop), (left_xbtm, ybtm), color, thickness)
cv2.line(img, (right_xtop, ytop), (right_xbtm, ybtm), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, λ)
"""
Explanation: Ideas for Lane Detection Pipeline
Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:
cv2.inRange() for color selection
cv2.fillPoly() for regions selection
cv2.line() to draw lines on an image given endpoints
cv2.addWeighted() to coadd / overlay two images
cv2.cvtColor() to grayscale or change color
cv2.imwrite() to output images to file
cv2.bitwise_and() to apply a mask to an image
Check out the OpenCV documentation to learn about these and discover even more awesome functionality!
Helper Functions
Below are some helper functions to help get you started. They should look familiar from the lesson!
End of explanation
"""
import os
test_images_list = os.listdir("test_images/") # modified a little to save filenames of test images
"""
Explanation: Test Images
Build your pipeline to work on the images in the directory "test_images"
You should make sure your pipeline works well on these images before you try the videos.
End of explanation
"""
# define parameters needed for helper functions (given inline)
kernel_size = 5 # gaussian blur
low_threshold = 60 # canny edge detection
high_threshold = 180 # canny edge detection
# Define the Hough transform parameters
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 20 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 40 # minimum number of pixels making up a line
max_line_gap = 25 # maximum gap in pixels between connectable line segments
for test_image in test_images_list: # iterating through the images in test_images folder
image = mpimg.imread('test_images/' + test_image) # reading in an image
gray = grayscale(image) # convert to grayscale
blur_gray = gaussian_blur(gray, kernel_size) # add gaussian blur to remove noise
edges = canny(blur_gray, low_threshold, high_threshold) # perform canny edge detection
# extract image size and define vertices of the four sided polygon for masking
imshape = image.shape
xsize = imshape[1]
ysize = imshape[0]
vertices = np.array([[(0.05*xsize, ysize ),(0.44*xsize, 0.6*ysize),\
(0.55*xsize, 0.6*ysize), (0.95*xsize, ysize)]], dtype=np.int32) #
masked_edges = region_of_interest(edges, vertices) # retain information only in the region of interest
line_image = hough_lines(masked_edges, rho, theta, threshold,\
min_line_length, max_line_gap) # perform hough transform and retain lines with specific properties
lines_edges = weighted_img(line_image, image, α=0.8, β=1., λ=0.) # Draw the lines on the edge image
plt.imshow(lines_edges) # Display the image
plt.show()
mpimg.imsave('test_images_output/' + test_image, lines_edges) # save the resulting image
"""
Explanation: Build a Lane Finding Pipeline
Build the pipeline and run your solution on all test_images. Make copies into the test_images_output directory, and you can use the images in your writeup report.
Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
End of explanation
"""
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
gray = grayscale(image) # convert to grayscale
blur_gray = gaussian_blur(gray, kernel_size) # add gaussian blur to remove noise
edges = canny(blur_gray, low_threshold, high_threshold) # perform canny edge detection
# extract image size and define vertices of the four sided polygon for masking
imshape = image.shape
xsize = imshape[1]
ysize = imshape[0]
vertices = np.array([[(0.05*xsize, ysize ),(0.44*xsize, 0.6*ysize),\
(0.55*xsize, 0.6*ysize), (0.95*xsize, ysize)]], dtype=np.int32) #
masked_edges = region_of_interest(edges, vertices) # retain information only in the region of interest
line_image = hough_lines(masked_edges, rho, theta, threshold,\
min_line_length, max_line_gap) # perform hough transform and retain lines with specific properties
lines_edges = weighted_img(line_image, image, α=0.8, β=1., λ=0.) # Draw the lines on the edge image
return lines_edges
"""
Explanation: Test on Videos
You know what's cooler than drawing lanes over images? Drawing lanes over video!
We can test our solution on two provided videos:
solidWhiteRight.mp4
solidYellowLeft.mp4
Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, check out this forum post for more troubleshooting tips.
If you get an error that looks like this:
NeedDownloadError: Need ffmpeg exe.
You can download it by calling:
imageio.plugins.ffmpeg.download()
Follow the instructions in the error message and check out this forum post for more troubleshooting tips across operating systems.
End of explanation
"""
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
"""
Explanation: Let's try the one with the solid white lane on the right first ...
End of explanation
"""
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
"""
Explanation: Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
End of explanation
"""
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
"""
Explanation: Improve the draw_lines() function
At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".
Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.
Now for the one with the solid yellow lane on the left. This one's more tricky!
End of explanation
"""
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))
"""
Explanation: Writeup and Submission
If you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a link to the writeup template file.
Optional Challenge
Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!
End of explanation
"""
|
drivendata/data-science-is-software | notebooks/lectures/3.0-refactoring.ipynb | mit | %matplotlib inline
from __future__ import print_function
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
PROJ_ROOT = os.path.join(os.pardir, os.pardir)
"""
Explanation: <table style="width:100%; border: 0px solid black;">
<tr style="width: 100%; border: 0px solid black;">
<td style="width:75%; border: 0px solid black;">
<a href="http://www.drivendata.org">
<img src="https://s3.amazonaws.com/drivendata.org/kif-example/img/dd.png" />
</a>
</td>
</tr>
</table>
Data Science is Software
Developer #lifehacks for the Jupyter Data Scientist
Section 3: Writing code for reusability
End of explanation
"""
## Try adding parameter index=0
pump_data_path = os.path.join(PROJ_ROOT,
"data",
"raw",
"pumps_train_values.csv")
df = pd.read_csv(pump_data_path, index=0)
df.head(1)
pd.read_csv?
# Tab completion for parsing dates in the date_recoreded column
# Shift tab for documentation
df = pd.read_csv("../data/water-pumps.csv", index_col=0)
df.head(1)
"""
Explanation: 3.1 No more docs-guessing
Don't edit-run-repeat to try to remember the name of a function or argument. Jupyter provides great docs integration and easy ways to remember the arguments to a function.
End of explanation
"""
df.describe()
## Paste for 'construction_year' and plot
## Paste for 'gps_height' and plot
plot_data = df['amount_tsh']
sns.kdeplot(plot_data, bw=1000)
plt.show()
def kde_plot(dataframe, variable, upper=None, lower=None, bw=0.1):
""" Plots a density plot for a variable with optional upper and
lower bounds on the data (inclusive).
"""
plot_data = dataframe[variable]
if upper is not None:
plot_data = plot_data[plot_data <= upper]
if lower is not None:
plot_data = plot_data[plot_data >= lower]
sns.kdeplot(plot_data, bw=bw)
plt.show()
kde_plot(df, 'amount_tsh', bw=1000, lower=0)
kde_plot(df, 'construction_year', bw=1, lower=1000, upper=2016)
kde_plot(df, 'gps_height', bw=100)
"""
Explanation: 3.2 No more copy-pasta
Don't repeat yourself.
End of explanation
"""
# add local python functions
import sys
# add the 'src' directory as one where we can import modules
src_dir = os.path.join(PROJ_ROOT, "src")
sys.path.append(src_dir)
# import my method from the source code
from features.build_features import remove_invalid_data
df = remove_invalid_data(pump_data_path)
df.shape
# TRY ADDING print("lalalala") to the method
df = remove_invalid_data(pump_data_path)
"""
Explanation: 3.3 No more copy-pasta between notebooks
Have a method that gets used in multiple notebooks? Refactor it into a separate .py file so it can live a happy life!
Note: In order to import your local modules, you must do three things:
put the .py file in a separate folder
add an empty __init__.py file to the folder
add that folder to the Python path with sys.path.append
End of explanation
"""
# Load the "autoreload" extension
%load_ext autoreload
# always reload modules marked with "%aimport"
%autoreload 1
import os
import sys
# add the 'src' directory as one where we can import modules
src_dir = os.path.join(os.getcwd(), os.pardir, 'src')
sys.path.append(src_dir)
# import my method from the source code
%aimport features.build_features
from features.build_features import remove_invalid_data
df = remove_invalid_data(pump_data_path)
df.head()
"""
Explanation: Restart the kernel, let's try this again....
End of explanation
"""
kde_plot(df,
'date_recorded',
upper=pd.to_datetime('2017-01-01'),
lower=pd.to_datetime('1900-01-01'))
%debug
# "1" turns pdb on, "0" turns pdb off
%pdb 1
kde_plot(df, 'date_recorded')
# turn off debugger
%pdb 0
"""
Explanation: 3.4 I'm too good! Now this code is useful to other projects!
Importing local code is great if you want to use it in multiple notebooks, but once you want to use the code in multiple projects or repositories, it gets complicated. This is when we get serious about isolation!
We can build a python package to solve that! In fact, there is a cookiecutter to create Python packages.
Once we create this package, we can install it in "editable" mode, which means that as we change the code the changes will get picked up if the package is used. The process looks like
cookiecutter https://github.com/wdm0006/cookiecutter-pipproject
cd package_name
pip install -e .
Now we can have a separate repository for this code and it can be used across projects without having to maintain code in multiple places.
3.5 Sometimes things go wrong
Interrupt execution with:
- %debug magic: drops you out into the most recent error stacktrace in pdb
- import q;q.d(): drops you into pdb, even outside of IPython
Interrupt execution on an Exception with %pdb magic. Use pdb the Python debugger to debug inside a notebook. Key commands for pdb are:
p: Evaluate and print Python code
w: Where in the stack trace am I?
u: Go up a frame in the stack trace.
d: Go down a frame in the stack trace.
c: Continue execution
q: Stop execution
End of explanation
"""
import numpy as np
from mcmc.hamiltonian import hamiltonian, run_diagnostics
f = lambda X: np.exp(-100*(np.sqrt(X[:,1]**2 + X[:,0]**2)- 1)**2 + (X[:,0]-1)**3 - X[:,1] - 5)
# potential and kinetic energies
U = lambda q: -np.log(f(q))
K = lambda p: p.dot(p.T) / 2
# gradient of the potential energy
def grad_U(X):
x, y = X[0,:]
xy_sqrt = np.sqrt(y**2 + x**2)
mid_term = 100*2*(xy_sqrt - 1)
grad_x = 3*((x-1)**2) - mid_term * ((x) / (xy_sqrt))
grad_y = -1 - mid_term * ((y) / (xy_sqrt))
return -1*np.array([grad_x, grad_y]).reshape(-1, 2)
ham_samples, H = hamiltonian(2500, U, K, grad_U)
run_diagnostics(ham_samples)
%prun ham_samples, H = hamiltonian(2500, U, K, grad_U)
run_diagnostics(ham_samples)
"""
Explanation: #lifehack: %debug and %pdb are great, but pdb can be clunky. Try the 'q' module. Adding the line import q;q.d() anywhere in a project gives you a normal python console at that point. This is great if you're running outside of IPython.
3.6 Code profiling
Sometimes your code is slow. See which functions are called, how many times, and how long they take!
The %prun magic reports these to you right in the Jupyter notebook!
End of explanation
"""
|
ituethoslab/navcom-2017 | exercises/Week 11-Tooltrack 3/Social media scraping.ipynb | gpl-3.0 | biposts = pd.read_csv('page_20446254070_2017_11_14_15_20_00.tab',
sep='\t',
parse_dates=['post_published'])
"""
Explanation: Social media scraping 3/3
What have we achieved in the past 2 week?
1. Sanity checks
Do them
Srsly
E.g. a student email 💬
Message from Netvizz
Getting posts between 2017-09-11T00:00:00+0000 and 2017-09-18T23:59:59+0000.
pid: 20446254070 / until:2017-06-19T01:15:00+0000 (100,1835008)
No posts were retrieved.
hmm... 🤔
Let's investigate
Read a Netvizz output file for scraping the page since beginning of June until mid-November 2017.
End of explanation
"""
biweeks = biposts.set_index('post_published')
ax = biweeks.resample('W')['post_id'].count().plot(title="Posts per week")
"""
Explanation: Re-index by dates, resample weekly, and plot counts
End of explanation
"""
biweeks = biposts.set_index('post_published')
ax = biweeks.resample('W')['post_id'].count().plot(title="Posts per week")
ax.annotate('"until"', xy=('2017-06-19T01:15:00+0000', 100))
ax.annotate('Requested interval', xy=('2017-09-11T00:00:00+0000', 100))
ax.axvline('2017-06-19T01:15:00+0000', linestyle='dotted')
ax.axvspan('2017-09-11T00:00:00+0000', '2017-09-18T23:59:59+0000', alpha=0.3);
"""
Explanation: "Between 2017-09-11 and 2017-09-18", and
"until 2017-06-19"
End of explanation
"""
|
samoturk/HUB-ipython | notebooks/Intro to Python and Jupyter.ipynb | mit | print('This is cell with code')
"""
Explanation: Python
Python is widely used general-purpose high-level programming language. Its design philosophy emphasizes code readability. It is very popular in science.
Jupyter
The Jupyter Notebook is a web application that allows you to create and share documents that contain live code, equations, visualizations and explanatory text.
* Evolved from IPython notebook
* In addition to Python it supports many other programming languages (Julija, R, Haskell, etc..)
* http://jupyter.org/
Getting started
Anaconda/Conda (need to install)
https://www.continuum.io/downloads
I recommend PYTHON 2.7
Web hosted (only need a web browser)
http://tmpnb.org
The notebook
Cell types - markdown and code
This is Markdown cell
End of explanation
"""
var1 = 1
my_string = "This is a string"
var1
print(my_string)
my_list = [1, 2, 3, 'x', 'y']
my_list
my_list[0]
my_list[1:3]
salaries = {'Mike':2000, 'Ann':3000}
salaries['Mike']
salaries['Jake'] = 2500
salaries
"""
Explanation: Variables, lists and dictionaries
End of explanation
"""
long_string = 'This is a string \n Second line of the string'
print(long_string)
long_string.split(" ")
long_string.split("\n")
long_string.count('s') # case sensitive!
long_string.upper()
"""
Explanation: Strings
End of explanation
"""
if long_string.startswith('X'):
print('Yes')
elif long_string.startswith('T'):
print('It has T')
else:
print('No')
"""
Explanation: Conditionals
End of explanation
"""
for line in long_string.split('\n'):
print line
c = 0
while c < 10:
c += 2
print c
"""
Explanation: Loops
End of explanation
"""
some_numbers = [1,2,3,4]
[x**2 for x in some_numbers]
"""
Explanation: List comprehensions
End of explanation
"""
with open('../README.md', 'r') as f:
content = f.read()
print(content)
"""
Explanation: File operations
End of explanation
"""
def average(numbers):
return float(sum(numbers)/len(numbers))
average([1,2,2,2.5,3,])
map(average, [[1,2,2,2.5,3,],[3,2.3,4.2,2.5,5,]])
# %load cool_events.py
#!/usr/bin/env python
from IPython.display import HTML
class HUB:
"""
HUB event class
"""
def __init__(self, version):
self.full_name = "Heidelberg Unseminars in Bioinformatics"
self.info = HTML("<p>Heidelberg Unseminars in Bioinformatics are participant-"
"driven meetings where people with an interest in bioinformatics "
"come together to discuss hot topics and exchange ideas and then go "
"for a drink and a snack afterwards.</p>")
self.version = version
def __repr__(self):
return self.full_name
this_event = HUB(21)
this_event
this_event.full_name
this_event.version
"""
Explanation: Functions
End of explanation
"""
from math import exp
exp(2) #shift tab to access documentation
import math
math.exp(10)
import numpy as np # Numpy - package for scientifc computing
#import pandas as pd # Pandas - package for working with data frames (tables)
#import Bio # BioPython - package for bioinformatics
#import sklearn # scikit-learn - package for machine larning
#from rdkit import Chem # RDKit - Chemoinformatics library
"""
Explanation: Python libraries
Library is a collection of resources. These include pre-written code, subroutines, classes, etc.
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
x_values = np.arange(0, 20, 0.1)
y_values = [math.sin(x) for x in x_values]
plt.plot(x_values, y_values)
plt.scatter(x_values, y_values)
plt.boxplot(y_values)
"""
Explanation: Plotting
End of explanation
"""
|
ucsdlib/python-novice-inflammation | 5-functions.ipynb | cc0-1.0 | import numpy
import matplotlib.pyplot
def fahr_to_kelvin(temp):
return ((temp - 32) * (5/9)) + 273.15
"""
Explanation: up to now:
* we've written code to draw out some interesting featurs on the inflammation,
* looped over our data files to draw plots,
* and have python to make decisions based on conditions related to the data.
problem is
* our code is getting long an complicated
* what if we had 1k's of datasets, but didnt' want to generate plots on every one? (commenting the code out will become a nuisance)
* What about reusing the code on a diff. dataset or at a diff. point in the program? (cutting and pasting is going make code longer and repetitive)
need a way to package our code for reuse
* this way in python is thru functions
* Python lets us define functions - a shorthand way of re-executing code.
* Let's write function to convert from fahrenheit to kelvin
End of explanation
"""
print('freezing point of water:', fahr_to_kelvin(32))
print('boiling point of water:', fahr_to_kelvin(212))
"""
Explanation: use def followed by function name and parenthesized list of parameter names
we call a function and the values we pass it are assigned to those varialbes so we can use inside
use return statement to send back result
We can now try our function out!
End of explanation
"""
def kelvin_to_celsius(temp_k):
return temp_k - 273.15
print('absolute zero in Celsius:', kelvin_to_celsius(0.0))
"""
Explanation: We've successfully called the funtion we defined
Now we have acces to the result
Inter division part - need to add
Composing Functions
Now we've seen how to turn Fahrenheit to Kelvin, its easy to turn kelvin into celsius
End of explanation
"""
def fahr_to_celsius(temp_f):
temp_k = fahr_to_kelvin(temp_f)
result = kelvin_to_celsius(temp_k)
return result
print('freezing point of water in Celsius:', fahr_to_celsius(32.0))
"""
Explanation: What about Fahrenheit to Celsius?
* we could write out the forumla, but we don't need to
* we can compose the two functions we have already created
End of explanation
"""
def fence(original, wrapper):
return wrapper + original + wrapper
print(fence('name', '*'))
"""
Explanation: first taste of how larger programs are built:
* we define basic operations
* combine them in ever-larger chunks
Challenge - function:
“Adding” two strings produces their concatenation: 'a' + 'b' is 'ab'. Write a function called fence that takes two parameters called original and wrapper and returns a new string that has the wrapper character at the beginning and end of the original. A call to your function should look like this:
print(fence('name', '*'))
End of explanation
"""
def analyze(filename):
data = numpy.loadtxt(fname=filename, delimiter=',')
fig = matplotlib.pyplot.figure(figsize=(10.0, 3.0))
axes1 = fig.add_subplot(1, 3, 1)
axes2 = fig.add_subplot(1, 3, 2)
axes3 = fig.add_subplot(1, 3, 3)
axes1.set_ylabel('average')
axes1.plot(data.mean(axis=0))
axes2.set_ylabel('max')
axes2.plot(data.max(axis=0))
axes3.set_ylabel('min')
axes3.plot(data.min(axis=0))
fig.tight_layout()
matplotlib.pyplot.show(fig)
"""
Explanation: Combining & tidying up
now we know how to write functions, we can make our analysis easier to read
first lest make an analyze function that generate our plots
End of explanation
"""
def detect_problems(filename):
data = numpy.loadtxt(fname=filename, delimiter=',')
if data.max(axis=0)[0] == 0 and data.max(axis=0)[20] == 20:
print('Suspicious looking maxima!')
elif data.min(axis=0).sum() == 0:
print('Minima add up to zero!')
else:
print('Seems OK!')
"""
Explanation: add another function called detect_problems that checks for those systematics we noticed:
End of explanation
"""
import glob
import numpy
import matplotlib.pyplot
%matplotlib inline
filenames = glob.glob('data/inflammation*.csv')
for f in filenames[:3]:
print(f)
analyze(f)
detect_problems(f)
"""
Explanation: notice this code is more readable
we can reproduce the previous analysis with a much simplier for loop:
End of explanation
"""
def outer(input_string):
return input_string[0] + input_string[-1]
print(outer('helium'))
"""
Explanation: Notice that rather than jumbling this code together in giant for loop, we can now read and resue both ideas separately
We can reproduce the prvious analysis with a much simplier for loop
Challenge - functions 2:
If the variable s refers to a string, then s[0] is the string’s first character and s[-1] is its last. Write a function called outer that returns a string made up of just the first and last characters of its input. A call to your function should look like this:
python
print(outer('helium'))
End of explanation
"""
def center(data, desired):
return (data - data.mean()) + desired
"""
Explanation: Testing and Documenting
Once start using functions for reuse, we need to start testing them
Let's write a function to center a dataset around a particular value
End of explanation
"""
z = numpy.zeros((2,2))
print(center(z, 3))
"""
Explanation: since we don't know what values ought to be in our data let's create a matrix of zeros and center around 3.
End of explanation
"""
data = numpy.loadtxt(fname='data/inflammation-01.csv', delimiter=',')
print(center(data, 0))
"""
Explanation: looks good, now let's try and center our real data
End of explanation
"""
print('original min, mean, and max are:', data.min(), data.mean(), data.max())
centered = center(data, 0)
print('min, mean, and and max of centered data are:', centered.min(), centered.mean(), centered.max())
"""
Explanation: hard to tell from the output if the result correct, let's test
End of explanation
"""
print('std dev before and after:', data.std(), centered.std())
"""
Explanation: seems right, original mean 6.1 and lower bound from zero is -6.1
we can check standard deviation
End of explanation
"""
print('difference in standard deviations before and after:', data.std() - centered.std())
"""
Explanation: those values look the same but we probably wouldn't notice if they weren't further out
let's take the diff
End of explanation
"""
# center(data, desired): return a new array containing the original data centered around the desired value.
def center(data, desired):
return (data - data.mean()) + desired
"""
Explanation: difference is zero
before we get back to our analysis let's add documentation
documentation is not only good for collaborators, but also the future you
we can add doc this way
End of explanation
"""
def center(data, desired):
'''Return a new array containing the original data centered around the desired value.'''
return (data - data.mean()) + desired
"""
Explanation: better way: if the first thing in a function is a string that isn't assigned
that string is attached to the function as documentation
End of explanation
"""
help(center)
"""
Explanation: better because we can now ask python's built-in help to show us the documentation
End of explanation
"""
def center(data, desired):
'''Return a new array containing the original data centered around the desired value.
Example: center([1, 2, 3], 0) => [-1, 0, 1]'''
return (data - data.mean()) + desired
help(center)
"""
Explanation: a string like this is called the docstring
we don't need to use 3 quotes when we write one but if we do we can use multiple lines
End of explanation
"""
numpy.loadtxt('data/inflammation-01.csv', delimiter=',')
"""
Explanation: Defining Defaults
we have passed parameters to functions in two ways:
directly as in type(data)
and by name as in numpy.loadtxt(fname='something.csv', delimiter=',')
we can pass the filename to loadtext without the filename
End of explanation
"""
numpy.loadtxt('data/inflammation-01.csv', ',')
"""
Explanation: but we still need the delimiter=:
End of explanation
"""
def center(data, desired=0.0):
'''Return a new array containing the original data centered around the desired value (0 by default).
Example: center([1, 2, 3], 0) => [-1, 0, 1]'''
return (data - data.mean()) + desired
"""
Explanation: to understand what's going on and make our functions easier to use
let's re-define our center function like this:
End of explanation
"""
test_data = numpy.zeros((2, 2))
print(center(test_data, 3))
"""
Explanation: key change is the second parameter is now written desired=0.0
if we call the function with two arguments, it works as it did before
End of explanation
"""
more_data = 5 + numpy.zeros((2, 2))
print('data before centering:')
print(more_data)
print('centered data:')
print(center(more_data))
"""
Explanation: but we can now call it with just one parameter
in this case desired is automatically assigned the default value of 0.0
End of explanation
"""
def display(a=1, b=2, c=3):
print('a:', a, 'b:', b, 'c:', c)
print('no parameters:')
display()
print('one parameter:')
display(55)
print('two parameters:')
display(55, 66)
"""
Explanation: this is handy:
* if we want a function to work one way, but occasionally need it to do something else, we can allow people to pass a diff parameter when they need to but provide a default
below example shows how python matches values to parameters:
End of explanation
"""
# we can override this behavior by naming value as we pass it in
print('only setting the value of c')
display(c=77)
"""
Explanation: as this shows parameters are matched up from left to right
any that haven't been given a value explicitly get their default value
End of explanation
"""
help(numpy.loadtxt)
"""
Explanation: Let's now look at numpy.loadtext:
End of explanation
"""
numpy.loadtxt('data/inflammation-01.csv', ',')
"""
Explanation: lots of info but look at this part:
python
loadtxt(fname, dtype=<type 'float'>, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None,
unpack=False, ndmin=0)
tells us that loadtxt has one parameter called fname that doesn't have a default value, & 8 others that do
if we call the function like this:
End of explanation
"""
|
camillescott/boink | notebooks/decision-nodes-Ast_gla.ipynb | mit | k27_df.hash.nunique(), k35_df.hash.nunique()
"""
Explanation: We can find the number of decision nodes in the dBG by counting unique hashes...
End of explanation
"""
k35_df['degree'] = k35_df['l_degree'] + k35_df['r_degree']
k27_df['degree'] = k27_df['l_degree'] + k27_df['r_degree']
"""
Explanation: We'll make a new column for total degree, for convenience.
End of explanation
"""
figsize(18,10)
fig, ax_mat = subplots(ncols=3, nrows=2)
top = ax_mat[0]
sns.distplot(k35_df.degree, kde=False, ax=top[0], bins=8)
sns.distplot(k35_df.l_degree, kde=False, ax=top[1], bins=5)
sns.distplot(k35_df.r_degree, kde=False, ax=top[2], bins=5)
bottom = ax_mat[1]
sns.distplot(k27_df.degree, kde=False, ax=bottom[0], bins=8)
sns.distplot(k27_df.l_degree, kde=False, ax=bottom[1], bins=5)
sns.distplot(k27_df.r_degree, kde=False, ax=bottom[2], bins=5)
"""
Explanation: Let's start with the overal degree distribution during the entire construction process.
End of explanation
"""
figsize(12,8)
sns.distplot(k35_df.position, kde=False, label='K=35')
sns.distplot(k27_df.position, kde=False, label='K=27')
legend()
k35_melted_df = k35_df.melt(id_vars=['hash', 'position'], value_vars=['l_degree', 'r_degree'], )
k27_melted_df = k27_df.melt(id_vars=['hash', 'position'], value_vars=['l_degree', 'r_degree'], )
k27_melted_df.head()
figsize(18,8)
sns.violinplot('position', 'value', 'variable', k27_melted_df)
k35_dnodes_per_read = k35_df.groupby('read_n').count().\
reset_index()[['read_n', 'hash']].rename({'hash': 'n_dnodes'}, axis='columns')
k27_dnodes_per_read = k27_df.groupby('read_n').count().\
reset_index()[['read_n', 'hash']].rename({'hash': 'n_dnodes'}, axis='columns')
ax = k35_dnodes_per_read.rolling(100, min_periods=10, on='read_n').mean().plot(x='read_n',
y='n_dnodes',
label='k = 35')
ax = k27_dnodes_per_read.rolling(100, min_periods=10, on='read_n').mean().plot(x='read_n',
y='n_dnodes',
label='k = 27',
ax=ax)
ax.xaxis.set_major_formatter(mpl.ticker.StrMethodFormatter("{x:,}"))
from goetia.minimizers import WKMinimizer
from goetia.processors import MinimizerProcessor
M = WKMinimizer(10, 25)
S = "GACAACGGTAAAAGTTCTAATGCTGCCGAGTCACGGGAAGGATAGAGTGAGTCCCACCATATGGCGCACC"
print(S)
for kmer, pos in M.get_minimizer_kmers(S):
print(pos * ' ', kmer, sep='')
%%time
proc = MinimizerProcessor(25, 25, 'minimizers.csv')
proc.process('/store/biodb/genomes/fugu/Takifugu_rubripes.FUGU5.dna_rm.toplevel.fa', output_interval=1)
"""
Explanation: So most decision nodes in this dataset have degree 3. Note that a few have degree 2; these forks without handles.
End of explanation
"""
|
afronski/playground-notes | scalable-machine-learning/solutions/ML_lab1_review_student.ipynb | mit | labVersion = 'cs190_week1_v_1_1'
"""
Explanation: Math and Python review and CTR data download
This notebook reviews vector and matrix math, the NumPy Python package, and Python lambda expressions. It also covers downloading the data required for Lab 4, where you will analyze website click-through rates. Part 1 covers vector and matrix math, and you'll do a few exercises by hand. In Part 2, you'll learn about NumPy and use ndarray objects to solve the math exercises. Part 3 provides additional information about NumPy and how it relates to array usage in Spark's MLlib. Part 4 provides an overview of lambda expressions, and you'll wrap up by downloading the dataset for Lab 4.
To move through the notebook just run each of the cells. You can run a cell by pressing "shift-enter", which will compute the current cell and advance to the next cell, or by clicking in a cell and pressing "control-enter", which will compute the current cell and remain in that cell. You should move through the notebook from top to bottom and run all of the cells. If you skip some cells, later cells might not work as expected.
Note that there are several exercises within this notebook. You will need to provide solutions for cells that start with: # TODO: Replace <FILL IN> with appropriate code.
This notebook covers:
Part 1: Math review
Part 2: NumPy
Part 3: Additional NumPy and Spark linear algebra
Part 4: Python lambda expressions
Part 5: CTR data download
End of explanation
"""
x = [3, -6, 0]
y = [4, 8, 16]
# TEST Scalar multiplication: vectors (1a)
# Import test library
from test_helper import Test
Test.assertEqualsHashed(x, 'e460f5b87531a2b60e0f55c31b2e49914f779981',
'incorrect value for vector x')
Test.assertEqualsHashed(y, 'e2d37ff11427dbac7f833a5a7039c0de5a740b1e',
'incorrect value for vector y')
"""
Explanation: Part 1: Math review
(1a) Scalar multiplication: vectors
In this exercise, you will calculate the product of a scalar and a vector by hand and enter the result in the code cell below. Scalar multiplication is straightforward. The resulting vector equals the product of the scalar, which is a single value, and each item in the original vector. In the example below, $ a $ is the scalar (constant) and $ \mathbf{v} $ is the vector. $$ a \mathbf{v} = \begin{bmatrix} a v_1 \\ a v_2 \\ \vdots \\ a v_n \end{bmatrix} $$
Calculate the value of $ \mathbf{x} $: $$ \mathbf{x} = 3 \begin{bmatrix} 1 \\ -2 \\ 0 \end{bmatrix} $$
Calculate the value of $ \mathbf{y} $: $$ \mathbf{y} = 2 \begin{bmatrix} 2 \\ 4 \\ 8 \end{bmatrix} $$
End of explanation
"""
z = [4,10,18]
# TEST Element-wise multiplication: vectors (1b)
Test.assertEqualsHashed(z, '4b5fe28ee2d274d7e0378bf993e28400f66205c2',
'incorrect value for vector z')
"""
Explanation: (1b) Element-wise multiplication: vectors
In this exercise, you will calculate the element-wise multiplication of two vectors by hand and enter the result in the code cell below. You'll later see that element-wise multiplication is the default method when two NumPy arrays are multiplied together. Note we won't be performing element-wise multiplication in future labs, but we are introducing it here to distinguish it from other vector operators, and to because it is a common operations in NumPy, as we will discuss in Part (2b).
The element-wise calculation is as follows: $$ \mathbf{x} \odot \mathbf{y} = \begin{bmatrix} x_1 y_1 \\ x_2 y_2 \\ \vdots \\ x_n y_n \end{bmatrix} $$
Calculate the value of $ \mathbf{z} $: $$ \mathbf{z} = \begin{bmatrix} 1 \\ 2 \\ 3 \end{bmatrix} \odot \begin{bmatrix} 4 \\ 5 \\ 6 \end{bmatrix} $$
End of explanation
"""
c1 = -11
c2 = 26
# TEST Dot product (1c)
Test.assertEqualsHashed(c1, '8d7a9046b6a6e21d66409ad0849d6ab8aa51007c', 'incorrect value for c1')
Test.assertEqualsHashed(c2, '887309d048beef83ad3eabf2a79a64a389ab1c9f', 'incorrect value for c2')
"""
Explanation: (1c) Dot product
In this exercise, you will calculate the dot product of two vectors by hand and enter the result in the code cell below. Note that the dot product is equivalent to performing element-wise multiplication and then summing the result.
Below, you'll find the calculation for the dot product of two vectors, where each vector has length $ n $: $$ \mathbf{w} \cdot \mathbf{x} = \sum_{i=1}^n w_i x_i $$
Note that you may also see $ \mathbf{w} \cdot \mathbf{x} $ represented as $ \mathbf{w}^\top \mathbf{x} $
Calculate the value for $ c_1 $ based on the dot product of the following two vectors:
$$ c_1 = \begin{bmatrix} 1 \\ -3 \end{bmatrix} \cdot \begin{bmatrix} 4 \\ 5 \end{bmatrix}$$
Calculate the value for $ c_2 $ based on the dot product of the following two vectors:
$$ c_2 = \begin{bmatrix} 3 \\ 4 \\ 5 \end{bmatrix} \cdot \begin{bmatrix} 1 \\ 2 \\ 3 \end{bmatrix}$$
End of explanation
"""
X = [[ 22, 28 ], [ 49, 64 ]]
Y = [[ 1, 2, 3], [2, 4, 6], [3, 6, 9]]
# TEST Matrix multiplication (1d)
Test.assertEqualsHashed(X, 'c2ada2598d8a499e5dfb66f27a24f444483cba13',
'incorrect value for matrix X')
Test.assertEqualsHashed(Y, 'f985daf651531b7d776523836f3068d4c12e4519',
'incorrect value for matrix Y')
"""
Explanation: (1d) Matrix multiplication
In this exercise, you will calculate the result of multiplying two matrices together by hand and enter the result in the code cell below.
Below, you'll find the calculation for multiplying two matrices together. Note that the number of columns for the first matrix and the number of rows for the second matrix have to be equal and are are represented by $ n $:
$$ [\mathbf{X} \mathbf{Y}]{i,j} = \sum{r=1}^n \mathbf{X}{i,r} \mathbf{Y}{r,j} $$
First, you'll calculate the value for $ \mathbf{X} $.
$$ \mathbf{X} = \begin{bmatrix} 1 & 2 & 3 \\ 4 & 5 & 6 \end{bmatrix} \begin{bmatrix} 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{bmatrix} $$
Next, you'll perform an outer product and calculate the value for $ \mathbf{Y} $. Note that outer product is just a special case of general matrix multiplication and follows the same rules as normal matrix multiplication.
$$ \mathbf{Y} = \begin{bmatrix} 1 \\ 2 \\ 3 \end{bmatrix} \begin{bmatrix} 1 & 2 & 3 \end{bmatrix} $$
End of explanation
"""
import numpy as np
simpleArray = np.array([1,2,3])
timesFive = simpleArray * 5
print simpleArray
print timesFive
# TEST Scalar multiplication (2a)
Test.assertTrue(np.all(timesFive == [5, 10, 15]), 'incorrect value for timesFive')
"""
Explanation: Part 2: NumPy
(2a) Scalar multiplication
NumPy is a Python library for working with arrays. NumPy provides abstractions that make it easy to treat these underlying arrays as vectors and matrices. The library is optimized to be fast and memory efficient, and we'll be using it throughout the course. The building block for NumPy is the ndarray, which is a multidimensional array of fixed-size that contains elements of one type (e.g. array of floats).
For this exercise, you'll create a ndarray consisting of the elements [1, 2, 3] and multiply this array by 5. Use np.array() to create the array. Note that you can pass a Python list into np.array(). To perform scalar multiplication with an ndarray just use *.
Note that if you create an array from a Python list of integers you will obtain a one-dimensional array, which is equivalent to a vector for our purposes.
End of explanation
"""
u = np.arange(0, 5, .5)
v = np.arange(5, 10, .5)
elementWise = u * v
dotProduct = np.dot(u, v)
print 'u: {0}'.format(u)
print 'v: {0}'.format(v)
print '\nelementWise\n{0}'.format(elementWise)
print '\ndotProduct\n{0}'.format(dotProduct)
# TEST Element-wise multiplication and dot product (2b)
Test.assertTrue(np.all(elementWise == [ 0., 2.75, 6., 9.75, 14., 18.75, 24., 29.75, 36., 42.75]),
'incorrect value for elementWise')
Test.assertEquals(dotProduct, 183.75, 'incorrect value for dotProduct')
"""
Explanation: (2b) Element-wise multiplication and dot product
NumPy arrays support both element-wise multiplication and dot product. Element-wise multiplication occurs automatically when you use the * operator to multiply two ndarray objects of the same length.
To perform the dot product you can use either np.dot() or np.ndarray.dot(). For example, if you had NumPy arrays x and y, you could compute their dot product four ways: np.dot(x, y), np.dot(y, x), x.dot(y), or y.dot(x).
For this exercise, multiply the arrays u and v element-wise and compute their dot product.
End of explanation
"""
from numpy.linalg import inv
A = np.matrix([[1,2,3,4],[5,6,7,8]])
print 'A:\n{0}'.format(A)
print '\nA transpose:\n{0}'.format(A.T)
AAt = A * np.matrix.transpose(A)
print '\nAAt:\n{0}'.format(AAt)
AAtInv = inv(AAt)
print '\nAAtInv:\n{0}'.format(AAtInv)
print '\nAAtInv * AAt:\n{0}'.format((AAtInv * AAt).round(4))
# TEST Matrix math (2c)
Test.assertTrue(np.all(AAt == np.matrix([[30, 70], [70, 174]])), 'incorrect value for AAt')
Test.assertTrue(np.allclose(AAtInv, np.matrix([[0.54375, -0.21875], [-0.21875, 0.09375]])),
'incorrect value for AAtInv')
"""
Explanation: (2c) Matrix math
With NumPy it is very easy to perform matrix math. You can use np.matrix() to generate a NumPy matrix. Just pass a two-dimensional ndarray or a list of lists to the function. You can perform matrix math on NumPy matrices using *.
You can transpose a matrix by calling numpy.matrix.transpose() or by using .T on the matrix object (e.g. myMatrix.T). Transposing a matrix produces a matrix where the new rows are the columns from the old matrix. For example: $$ \begin{bmatrix} 1 & 2 & 3 \\ 4 & 5 & 6 \end{bmatrix}^\mathbf{\top} = \begin{bmatrix} 1 & 4 \\ 2 & 5 \\ 3 & 6 \end{bmatrix} $$
Inverting a matrix can be done using numpy.linalg.inv(). Note that only square matrices can be inverted, and square matrices are not guaranteed to have an inverse. If the inverse exists, then multiplying a matrix by its inverse will produce the identity matrix. $ \scriptsize ( \mathbf{A}^{-1} \mathbf{A} = \mathbf{I_n} ) $ The identity matrix $ \scriptsize \mathbf{I_n} $ has ones along its diagonal and zero elsewhere. $$ \mathbf{I_n} = \begin{bmatrix} 1 & 0 & 0 & \dots & 0 \\ 0 & 1 & 0 & \dots & 0 \\ 0 & 0 & 1 & \dots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \dots & 1 \end{bmatrix} $$
For this exercise, multiply $ \mathbf{A} $ times its transpose $ ( \mathbf{A}^\top ) $ and then calculate the inverse of the result $ ( [ \mathbf{A} \mathbf{A}^\top ]^{-1} ) $.
End of explanation
"""
features = np.array([1, 2, 3, 4])
print 'features:\n{0}'.format(features)
lastThree = features[-3:]
print '\nlastThree:\n{0}'.format(lastThree)
# TEST Slices (3a)
Test.assertTrue(np.all(lastThree == [2, 3, 4]), 'incorrect value for lastThree')
"""
Explanation: Part 3: Additional NumPy and Spark linear algebra
(3a) Slices
You can select a subset of a one-dimensional NumPy ndarray's elements by using slices. These slices operate the same way as slices for Python lists. For example, [0, 1, 2, 3][:2] returns the first two elements [0, 1]. NumPy, additionally, has more sophisticated slicing that allows slicing across multiple dimensions; however, you'll only need to use basic slices in future labs for this course.
Note that if no index is placed to the left of a :, it is equivalent to starting at 0, and hence [0, 1, 2, 3][:2] and [0, 1, 2, 3][0:2] yield the same result. Similarly, if no index is placed to the right of a :, it is equivalent to slicing to the end of the object. Also, you can use negative indices to index relative to the end of the object, so [-2:] would return the last two elements of the object.
For this exercise, return the last 3 elements of the array features.
End of explanation
"""
zeros = np.zeros(8)
ones = np.ones(8)
print 'zeros:\n{0}'.format(zeros)
print '\nones:\n{0}'.format(ones)
zerosThenOnes = np.hstack((zeros, ones))
zerosAboveOnes = np.vstack((zeros, ones))
print '\nzerosThenOnes:\n{0}'.format(zerosThenOnes)
print '\nzerosAboveOnes:\n{0}'.format(zerosAboveOnes)
# TEST Combining ndarray objects (3b)
Test.assertTrue(np.all(zerosThenOnes == [0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]),
'incorrect value for zerosThenOnes')
Test.assertTrue(np.all(zerosAboveOnes == [[0,0,0,0,0,0,0,0],[1,1,1,1,1,1,1,1]]),
'incorrect value for zerosAboveOnes')
"""
Explanation: (3b) Combining ndarray objects
NumPy provides many functions for creating new arrays from existing arrays. We'll explore two functions: np.hstack(), which allows you to combine arrays column-wise, and np.vstack(), which allows you to combine arrays row-wise. Note that both np.hstack() and np.vstack() take in a tuple of arrays as their first argument. To horizontally combine three arrays a, b, and c, you would run np.hstack((a, b, c)).
If we had two arrays: a = [1, 2, 3, 4] and b = [5, 6, 7, 8], we could use np.vstack((a, b)) to produce the two-dimensional array: $$ \begin{bmatrix} 1 & 2 & 3 & 4 \\ 5 & 6 & 7 & 8 \end{bmatrix} $$
For this exercise, you'll combine the zeros and ones arrays both horizontally (column-wise) and vertically (row-wise).
Note that the result of stacking two arrays is an ndarray. If you need the result to be a matrix, you can call np.matrix() on the result, which will return a NumPy matrix.
End of explanation
"""
from pyspark.mllib.linalg import DenseVector
numpyVector = np.array([-3, -4, 5])
print '\nnumpyVector:\n{0}'.format(numpyVector)
myDenseVector = DenseVector([3.0, 4.0, 5.0])
denseDotProduct = DenseVector.dot(myDenseVector, numpyVector)
print 'myDenseVector:\n{0}'.format(myDenseVector)
print '\ndenseDotProduct:\n{0}'.format(denseDotProduct)
# TEST PySpark's DenseVector (3c)
Test.assertTrue(isinstance(myDenseVector, DenseVector), 'myDenseVector is not a DenseVector')
Test.assertTrue(np.allclose(myDenseVector, np.array([3., 4., 5.])),
'incorrect value for myDenseVector')
Test.assertTrue(np.allclose(denseDotProduct, 0.0), 'incorrect value for denseDotProduct')
"""
Explanation: (3c) PySpark's DenseVector
PySpark provides a DenseVector class within the module pyspark.mllib.linalg. DenseVector is used to store arrays of values for use in PySpark. DenseVector actually stores values in a NumPy array and delegates calculations to that object. You can create a new DenseVector using DenseVector() and passing in an NumPy array or a Python list.
DenseVector implements several functions. The only function needed for this course is DenseVector.dot(), which operates just like np.ndarray.dot().
Note that DenseVector stores all values as np.float64, so even if you pass in an NumPy array of integers, the resulting DenseVector will contain floating-point numbers. Also, DenseVector objects exist locally and are not inherently distributed. DenseVector objects can be used in the distributed setting by either passing functions that contain them to resilient distributed dataset (RDD) transformations or by distributing them directly as RDDs. You'll learn more about RDDs in the spark tutorial.
For this exercise, create a DenseVector consisting of the values [3.0, 4.0, 5.0] and compute the dot product of this vector with numpyVector.
End of explanation
"""
def addS(x):
return x + 's'
print type(addS)
print addS
print addS('cat')
addSLambda = lambda x: x + 's'
print type(addSLambda)
print addSLambda
print addSLambda('cat')
multiplyByTen = lambda x: x * 10
print multiplyByTen(5)
print '\n', multiplyByTen
# TEST Python lambda expressions (4a)
Test.assertEquals(multiplyByTen(10), 100, 'incorrect definition for multiplyByTen')
"""
Explanation: Part 4: Python lambda expressions
(4a) Lambda is an anonymous function
We can use a lambda expression to create a function. To do this, you type lambda followed by the names of the function's parameters separated by commas, followed by a :, and then the expression statement that the function will evaluate. For example, lambda x, y: x + y is an anonymous function that computes the sum of its two inputs.
Lambda expressions return a function when evaluated. The function is not bound to any variable, which is why lambdas are associated with anonymous functions. However, it is possible to assign the function to a variable. Lambda expressions are particularly useful when you need to pass a simple function into another function. In that case, the lambda expression generates a function that is bound to the parameter being passed into the function.
Below, we'll see an example of how we can bind the function returned by a lambda expression to a variable named addSLambda. From this example, we can see that lambda provides a shortcut for creating a simple function. Note that the behavior of the function created using def and the function created using lambda is equivalent. Both functions have the same type and return the same results. The only differences are the names and the way they were created.
For this exercise, first run the two cells below to compare a function created using def with a corresponding anonymous function. Next, write your own lambda expression that creates a function that multiplies its input (a single parameter) by 10.
Here are some additional references that explain lambdas: Lambda Functions, Lambda Tutorial, and Python Functions.
End of explanation
"""
def plus(x, y):
return x + y
def minus(x, y):
return x - y
functions = [plus, minus]
print functions[0](4, 5)
print functions[1](4, 5)
lambdaFunctions = [lambda x,y: x + y , lambda x,y: x - y]
print lambdaFunctions[0](4, 5)
print lambdaFunctions[1](4, 5)
# TEST lambda fewer steps than def (4b)
Test.assertEquals(lambdaFunctions[0](10, 10), 20, 'incorrect first lambdaFunction')
Test.assertEquals(lambdaFunctions[1](10, 10), 0, 'incorrect second lambdaFunction')
"""
Explanation: (4b) lambda fewer steps than def
lambda generates a function and returns it, while def generates a function and assigns it to a name. The function returned by lambda also automatically returns the value of its expression statement, which reduces the amount of code that needs to be written.
For this exercise, recreate the def behavior using lambda. Note that since a lambda expression returns a function, it can be used anywhere an object is expected. For example, you can create a list of functions where each function in the list was generated by a lambda expression.
End of explanation
"""
a1 = lambda x: x[0] + x[1]
a2 = lambda (x0, x1): x0 + x1
print 'a1( (3,4) ) = {0}'.format( a1( (3,4) ) )
print 'a2( (3,4) ) = {0}'.format( a2( (3,4) ) )
b1 = lambda x, y: (x[0] + y[0], x[1] + y[1])
b2 = lambda (x0, x1), (y0, y1): (x0 + x1, y0 + y1)
print '\nb1( (1,2), (3,4) ) = {0}'.format( b1( (1,2), (3,4) ) )
print 'b2( (1,2), (3,4) ) = {0}'.format( b1( (1,2), (3,4) ) )
swap1 = lambda x: (x[1], x[0])
swap2 = lambda (x0, x1): (x1, x0)
print 'swap1((1, 2)) = {0}'.format(swap1((1, 2)))
print 'swap2((1, 2)) = {0}'.format(swap2((1, 2)))
swapOrder = lambda (x0, x1, x2): (x1, x2, x0)
print 'swapOrder((1, 2, 3)) = {0}'.format(swapOrder((1, 2, 3)))
sumThree = lambda (x0, x1), (y0, y1), (z0, z1): (x0 + y0 + z0, x1 + y1 + z1)
print 'sumThree((1, 2), (3, 4), (5, 6)) = {0}'.format(sumThree((1, 2), (3, 4), (5, 6)))
# TEST Lambda expression arguments (4c)
Test.assertEquals(swap1((1, 2)), (2, 1), 'incorrect definition for swap1')
Test.assertEquals(swap2((1, 2)), (2, 1), 'incorrect definition for swap2')
Test.assertEquals(swapOrder((1, 2, 3)), (2, 3, 1), 'incorrect definition fo swapOrder')
Test.assertEquals(sumThree((1, 2), (3, 4), (5, 6)), (9, 12), 'incorrect definition for sumThree')
"""
Explanation: (4c) Lambda expression arguments
Lambda expressions can be used to generate functions that take in zero or more parameters. The syntax for lambda allows for multiple ways to define the same function. For example, we might want to create a function that takes in a single parameter, where the parameter is a tuple consisting of two values, and the function adds the two values together. The syntax could be either: lambda x: x[0] + x[1] or lambda (x0, x1): x0 + x1. If we called either function on the tuple (3, 4) it would return 7. Note that the second lambda relies on the tuple (3, 4) being unpacked automatically, which means that x0 is assigned the value 3 and x1 is assigned the value 4.
As an other example, consider the following parameter lambda expressions: lambda x, y: (x[0] + y[0], x[1] + y[1]) and lambda (x0, x1), (y0, y1): (x0 + x1, y0 + y1). The result of applying either of these functions to tuples (1, 2) and (3, 4) would be the tuple (4, 6).
For this exercise: you'll create one-parameter functions swap1 and swap2 that swap the order of a tuple; a one-parameter function swapOrder that takes in a tuple with three values and changes the order to: second element, third element, first element; and finally, a three-parameter function sumThree that takes in three tuples, each with two values, and returns a tuple containing two values: the sum of the first element of each tuple and the sum of second element of each tuple.
End of explanation
"""
import traceback
try:
exec "lambda x: print x"
except:
traceback.print_exc()
"""
Explanation: (4d) Restrictions on lambda expressions
Lambda expressions consist of a single expression statement and cannot contain other simple statements. In short, this means that the lambda expression needs to evaluate to a value and exist on a single logical line. If more complex logic is necessary, use def in place of lambda.
Expression statements evaluate to a value (sometimes that value is None). Lambda expressions automatically return the value of their expression statement. In fact, a return statement in a lambda would raise a SyntaxError.
The following Python keywords refer to simple statements that cannot be used in a lambda expression: assert, pass, del, print, return, yield, raise, break, continue, import, global, and exec. Also, note that assignment statements (=) and augmented assignment statements (e.g. +=) cannot be used either.
End of explanation
"""
class FunctionalWrapper(object):
def __init__(self, data):
self.data = data
def map(self, function):
"""Call `map` on the items in `data` using the provided `function`"""
return FunctionalWrapper(map(function, self.data))
def reduce(self, function):
"""Call `reduce` on the items in `data` using the provided `function`"""
return reduce(function, self.data)
def filter(self, function):
"""Call `filter` on the items in `data` using the provided `function`"""
return FunctionalWrapper(filter(function, self.data))
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __getattr__(self, name): return getattr(self.data, name)
def __getitem__(self, k): return self.data.__getitem__(k)
def __repr__(self): return 'FunctionalWrapper({0})'.format(repr(self.data))
def __str__(self): return 'FunctionalWrapper({0})'.format(str(self.data))
# Map example
# Create some data
mapData = FunctionalWrapper(range(5))
# Define a function to be applied to each element
f = lambda x: x + 3
# Imperative programming: loop through and create a new object by applying f
mapResult = FunctionalWrapper([]) # Initialize the result
for element in mapData:
mapResult.append(f(element)) # Apply f and save the new value
print 'Result from for loop: {0}'.format(mapResult)
# Functional programming: use map rather than a for loop
print 'Result from map call: {0}'.format(mapData.map(f))
# Note that the results are the same but that the map function abstracts away the implementation
# and requires less code
dataset = FunctionalWrapper(range(10))
mapResult = dataset.map(lambda x: x * 5)
filterResult = dataset.filter(lambda x: x % 2 == 0)
reduceResult = dataset.reduce(lambda x,y: x + y)
print 'mapResult: {0}'.format(mapResult)
print '\nfilterResult: {0}'.format(filterResult)
print '\nreduceResult: {0}'.format(reduceResult)
# TEST Functional programming (4e)
Test.assertEquals(mapResult, FunctionalWrapper([0, 5, 10, 15, 20, 25, 30, 35, 40, 45]),
'incorrect value for mapResult')
Test.assertEquals(filterResult, FunctionalWrapper([0, 2, 4, 6, 8]),
'incorrect value for filterResult')
Test.assertEquals(reduceResult, 45, 'incorrect value for reduceResult')
"""
Explanation: (4e) Functional programming
The lambda examples we have shown so far have been somewhat contrived. This is because they were created to demonstrate the differences and similarities between lambda and def. An excellent use case for lambda expressions is functional programming. In functional programming, you will often pass functions to other functions as parameters, and lambda can be used to reduce the amount of code necessary and to make the code more readable.
Some commonly used functions in functional programming are map, filter, and reduce. Map transforms a series of elements by applying a function individually to each element in the series. It then returns the series of transformed elements. Filter also applies a function individually to each element in a series; however, with filter, this function evaluates to True or False and only elements that evaluate to True are retained. Finally, reduce operates on pairs of elements in a series. It applies a function that takes in two values and returns a single value. Using this function, reduce is able to, iteratively, "reduce" a series to a single value.
For this exercise, you'll create three simple lambda functions, one each for use in map, filter, and reduce. The map lambda will multiply its input by 5, the filter lambda will evaluate to True for even numbers, and the reduce lambda will add two numbers. Note that we have created a class called FunctionalWrapper so that the syntax for this exercise matches the syntax you'll see in PySpark.
Note that map requires a one parameter function that returns a new value, filter requires a one parameter function that returns True or False, and reduce requires a two parameter function that combines the two parameters and returns a new value.
End of explanation
"""
# Example of a mult-line expression statement
# Note that placing parentheses around the expression allow it to exist on multiple lines without
# causing a syntax error.
(dataset
.map(lambda x: x + 2)
.reduce(lambda x, y: x * y))
finalSum = dataset.map(lambda x: x * 5).filter(lambda x: x % 2 == 0).reduce(lambda a,b: a + b)
print finalSum
# TEST Composability (4f)
Test.assertEquals(finalSum, 100, 'incorrect value for finalSum')
"""
Explanation: (4f) Composability
Since our methods for map and filter in the FunctionalWrapper class return FunctionalWrapper objects, we can compose (or chain) together our function calls. For example, dataset.map(f1).filter(f2).reduce(f3), where f1, f2, and f3 are functions or lambda expressions, first applies a map operation to dataset, then filters the result from map, and finally reduces the result from the first two operations.
Note that when we compose (chain) an operation, the output of one operation becomes the input for the next operation, and operations are applied from left to right. It's likely you've seen chaining used with Python strings. For example, 'Split this'.lower().split(' ') first returns a new string object 'split this' and then split(' ') is called on that string to produce ['split', 'this'].
For this exercise, reuse your lambda expressions from (4e) but apply them to dataset in the sequence: map, filter, reduce. Note that since we are composing the operations our result will be different than in (4e). Also, we can write our operations on separate lines to improve readability.
End of explanation
"""
from IPython.lib.display import IFrame
IFrame("http://labs.criteo.com/downloads/2014-kaggle-display-advertising-challenge-dataset/",
600, 350)
import glob
import os.path
import tarfile
import urllib
import urlparse
url = 'http://labs.criteo.com/wp-content/uploads/2015/04/dac_sample.tar.gz'
url = url.strip()
baseDir = os.path.join('data')
inputPath = os.path.join('cs190', 'dac_sample.txt')
fileName = os.path.join(baseDir, inputPath)
inputDir = os.path.split(fileName)[0]
def extractTar(check = False):
tars = glob.glob('dac_sample*.tar.gz*')
if check and len(tars) == 0:
return False
if len(tars) > 0:
try:
tarFile = tarfile.open(tars[0])
except tarfile.ReadError:
if not check:
print 'Unable to open tar.gz file. Check your URL.'
return False
tarFile.extract('dac_sample.txt', path=inputDir)
print 'Successfully extracted: dac_sample.txt'
return True
else:
print 'You need to retry the download with the correct url.'
print ('Alternatively, you can upload the dac_sample.tar.gz file to your Jupyter root ' +
'directory')
return False
if os.path.isfile(fileName):
print 'File is already available. Nothing to do.'
elif extractTar(check = True):
print 'tar.gz file was already available.'
elif not url.endswith('dac_sample.tar.gz'):
print 'Check your download url. Are you downloading the Sample dataset?'
else:
# Download the file and store it in the same directory as this notebook
try:
urllib.urlretrieve(url, os.path.basename(urlparse.urlsplit(url).path))
except IOError:
print 'Unable to download and store: {0}'.format(url)
extractTar()
import os.path
baseDir = os.path.join('data')
inputPath = os.path.join('cs190', 'dac_sample.txt')
fileName = os.path.join(baseDir, inputPath)
if os.path.isfile(fileName):
rawData = (sc
.textFile(fileName, 2)
.map(lambda x: x.replace('\t', ','))) # work with either ',' or '\t' separated data
print rawData.take(1)
rawDataCount = rawData.count()
print rawDataCount
# This line tests that the correct number of observations have been loaded
assert rawDataCount == 100000, 'incorrect count for rawData'
if rawDataCount == 100000:
print 'Criteo data loaded successfully!'
"""
Explanation: Part 5: CTR data download
Lab four will explore website click-through data provided by Criteo. To obtain the data, you must first accept Criteo's data sharing agreement. Below is the agreement from Criteo. After you accept the agreement, you can obtain the download URL by right-clicking on the "Download Sample" button and clicking "Copy link address" or "Copy Link Location", depending on your browser. Paste the URL into the # TODO cell below. The file is 8.4 MB compressed. The script below will download the file to the virtual machine (VM) and then extract the data.
If running the cell below does not render a webpage, open the Criteo agreement in a separate browser tab. After you accept the agreement, you can obtain the download URL by right-clicking on the "Download Sample" button and clicking "Copy link address" or "Copy Link Location", depending on your browser. Paste the URL into the # TODO cell below.
Note that the download could take a few minutes, depending upon your connection speed.
End of explanation
"""
|
pybel/pybel-notebooks | summary/Summarizing Multiple Graphs Together.ipynb | apache-2.0 | import os
import time
import sys
import pybel
import pybel_tools
from pybel_tools.summary import info_str
"""
Explanation: Summarizing Multiple Graphs Together
Author: Charles Tapley Hoyt
Estimated Run Time: 45 seconds
This notebook shows how to combine multiple graphs from different sources and summarize them together. This might be useful during projects where multiple curators are creating BEL scripts that should be joined for scientific use, but for provenance, should be kept separate.
Imports
End of explanation
"""
print(sys.version)
print(time.asctime())
"""
Explanation: Environment
End of explanation
"""
pybel.utils.get_version()
pybel_tools.utils.get_version()
"""
Explanation: Dependencies
End of explanation
"""
bms_base = os.environ['BMS_BASE']
human_dir = os.path.join(bms_base, 'cbn', 'Human-2.0')
mouse_dir = os.path.join(bms_base, 'cbn', 'Mouse-2.0')
rat_dir = os.path.join(bms_base, 'cbn', 'Rat-2.0')
"""
Explanation: Setup
End of explanation
"""
%%time
graphs = []
for d in (human_dir, mouse_dir, rat_dir):
for p in os.listdir(d):
if not p.endswith('gpickle'):
continue
path = os.path.join(d, p)
g = pybel.from_pickle(path)
graphs.append(g)
len(graphs)
"""
Explanation: Data
In this notebook, pickled instances of networks from the Causal Biological Networks database are used.
End of explanation
"""
%%time
combine = pybel.struct.union(graphs)
"""
Explanation: Processing
The graphs are combine with the union function, which retains all node and edges from each graph
End of explanation
"""
print(info_str(combine))
"""
Explanation: The info_str function creates a short text summary of the network. The information is generated with info_json which is more useful programatically.
End of explanation
"""
|
navierula/Subreddit-Analysis-on-Eating-Disorders | Data Preprocessing.ipynb | mit | import pandas as pd
json_file = 'sample_data'
list(pd.read_json(json_file, lines=True))
"""
Explanation: Load a sample of the raw JSON data into pandas.
End of explanation
"""
import csv
import json
from nltk.tokenize import TweetTokenizer
from tqdm import tqdm
MIN_NUM_WORD_TOKENS = 10
TOTAL_NUM_LINES = 53851542 # $ wc -l data_full.json
PBAR_UPDATE_SIZE = 10000
tokenizer = TweetTokenizer()
def _ok_to_write(entries):
if entries['author'] == '[deleted]':
return False
if entries['body'] == '[deleted]' or len(tokenizer.tokenize(entries['body'])) < MIN_NUM_WORD_TOKENS:
return False
return True
out_columns = [
'author',
'body',
'subreddit',
'subreddit_id',
'score',
]
in_filename = 'data_full.json'
out_filename = 'data_full_preprocessed.csv'
count = 0
pbar = tqdm(total=TOTAL_NUM_LINES)
with open(out_filename, 'w') as o:
writer = csv.DictWriter(o, fieldnames=out_columns, extrasaction='ignore',
delimiter=',', quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
with open(in_filename, 'r') as f:
for line in f:
count += 1
if count % PBAR_UPDATE_SIZE == 0:
pbar.update(PBAR_UPDATE_SIZE)
entries = json.loads(line)
if _ok_to_write(entries):
writer.writerow(entries)
print('Done. Processed {} lines total.'.format(count))
"""
Explanation: Transform the full JSON file into a CSV, removing any stuff that we won't need:
[deleted] users or comments
comments with <10 tokens
(WARNING: this takes ~2.5 hours)
End of explanation
"""
import pandas as pd
from tqdm import tqdm
from nltk.corpus import wordnet
from nltk.stem.porter import *
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import TweetTokenizer
tokenizer = TweetTokenizer()
wordnet_lemmatizer = WordNetLemmatizer()
# Create synonym sets for obesity and anorexia
def syn_set(word_list):
syns = set()
for word in word_list:
for synset in wordnet.synsets(word):
for lemma in synset.lemmas():
syns.add(lemma.name())
return syns
OBESITY_SYNS = syn_set(['obesity'])
ANOREXIA_SYNS = syn_set(['anorexia'])
def row_filter_fn(df, syns):
"""Returns True if the row should be included, False otherwise."""
# Check if any synonyms can be found.
if set([wordnet_lemmatizer.lemmatize(token.lower()) for token in tokenizer.tokenize(df)]) & syns:
return True
return False
csv_filename = 'data_full_preprocessed.csv'
chunksize = 10000
count = 0
obesity_data_frames = []
anorexia_data_frames = []
for chunk in tqdm(pd.read_csv(csv_filename, chunksize=chunksize)):
obesity_df = chunk[chunk['body'].apply(row_filter_fn, syns=OBESITY_SYNS)]
if not obesity_df.empty:
obesity_data_frames.append(obesity_df)
anorexia_df = chunk[chunk['body'].apply(row_filter_fn, syns=ANOREXIA_SYNS)]
if not anorexia_df.empty:
anorexia_data_frames.append(anorexia_df)
count += 1
#if count == 100: break
print('Total # chunks processed: {}.'.format(count))
# Write out to CSVs.
pd.concat(obesity_data_frames).to_csv('obesity.csv', index=False)
pd.concat(anorexia_data_frames).to_csv('anorexia.csv', index=False)
"""
Explanation: Creates CSVs of text from comments made by users who have posted about anorexia or obesity.
End of explanation
"""
|
zzsza/Datascience_School | 30. 딥러닝/01. 신경망 기초 이론.ipynb | mit | %%tikz
\tikzstyle{neuron}=[circle, draw, minimum size=23pt,inner sep=0pt]
\tikzstyle{bias}=[text centered]
\node[neuron] (node) at (2,0) {$z$};
\node[neuron] (x1) at (0, 1) {$x_1$};
\node[neuron] (x2) at (0, 0) {$x_2$};
\node[neuron] (x3) at (0,-1) {$x_3$};
\node[neuron] (b) at (0,-2) {$1$};
\node[neuron] (output) at (4,0) {$y$};
\draw[->] (x1) -- node[above] {$w_1$} (node);
\draw[->] (x2) -- node[above] {$w_2$} (node);
\draw[->] (x3) -- node[above] {$w_3$} (node);
\draw[->] (b) -- node[above] {$b$} (node);
\draw[->] (node) -- (output);
"""
Explanation: 신경망 기초 이론
신경망(neural network) 모형은 퍼셉트론, 서포트 벡터 머신, 로지스틱 회귀 등의 분류 모형과 달리 기저 함수(basis function)도 사용자 파라미터에 의해 변화할 수 있는 적응형 기저 함수 모형(adaptive basis function model)이며 구조적으로는 여러개의 퍼셉트론을 쌓아놓은 형태이므로 MLP(multi-layer perceptron)으로도 불린다.
퍼셉트론 복습
다음 그림과 같이 독립 변수 벡터가 3차원인 간단한 퍼셉트론 모형을 가정한다.
End of explanation
"""
%%tikz
\tikzstyle{neuron}=[circle, draw, minimum size=23pt,inner sep=0pt, node distance=2cm]
\node[neuron] (node) {$z$};
\node[neuron] (x2) [left of=node] {$x_2$};
\node[neuron] (x1) [above of=x2] {$x_1$};
\node[neuron] (b) [below of=x2] {$1$};
\node[neuron] (output) [right of=node] {$y$};
\draw[->] (x1) -- node[above=0.1] {$-2$} (node);
\draw[->] (x2) -- node[above] {$-2$} (node);
\draw[->] (b) -- node[above=0.1] {$3$} (node);
\draw[->] (node) -- (output);
"""
Explanation: 입력 $x$
$$ x_1,x_2,x_3 $$
가중치 $w$
$$ w_1, w_2, w_3 $$
상수항(bias) $b$를 포함한 활성화값(activation)
$$ a = \sum_{j=1}^3 w_j x_j + b $$
비선형 활성화 함수 $h$
$$ z = h(a) = h \left( \sum_{j=1}^3 w_j x_j + b \right) $$
출력 $y$
$$
y =
\begin{cases}
0 & \text{if } z \leq 0, \
1 & \text{if } z > 0
\end{cases}
$$
이런 퍼셉트론에서 $x$ 대신 기저 함수를 적용한 $\phi(x)$를 사용하면 XOR 문제 등의 비선형 문제를 해결할 수 있다. 그러나 고정된 기저 함수를 사용해야 하므로 문제에 맞는 기저 함수를 찾아야 한다는 단점이 있다.
만약 기저 함수 $\phi(x)$의 형태를 추가적인 모수 $w^{(1)}$, $b^{(1)}$를 사용하여 조절할 수 있다면 즉, 기저함수 $\phi(x;w^{(1)}, b^{(1)})$ 를 사용하면 $w^{(1)}$ 값을 바꾸는 것만으로 다양한 기저 함수를 시도할 수 있다.
$$ z = h \left( \sum_{j=1} w_j^{(2)} \phi_j(x ; w^{(1)}_j, b^{(1)}_j) + b^{(2)} \right) $$
신경망은 다음과 같이 원래 퍼셉트론과 같은 형태의 적응형 기저함수를 사용한 모형이다.
$$ \phi_j(x ; w^{(1)}j, b^{(1)}_j) = h \left( \sum{i=1} w_{ji}^{(1)} x_i + b_j^{(1)} \right) $$
즉 전체 모형은 다음과 같다.
$$ z = h \left( \sum_{j=1} w_j^{(2)} h \left( \sum_{i=1} w_{ji}^{(1)} x_i + b_j^{(1)} \right) + b^{(2)} \right) $$
일반적으로 활성화 함수 $h$ 는 다음과 같은 시그모이드 함수 $\sigma$를 사용한다.
$$
\begin{eqnarray}
z = \sigma(a) \equiv \frac{1}{1+e^{-a}}.
\end{eqnarray}
$$
$$
\begin{eqnarray}
\frac{1}{1+\exp(-\sum_j w_j x_j-b)}
\end{eqnarray}
$$
이 시그모이드 함수의 특징은 다음과 같은 미분값을 가진다는 것이다.
$$ \sigma' = \sigma(1-\sigma) $$
퍼셉트론을 사용한 XOR 문제 해결법
퍼셉트론를 연속적으로 연결하여 비선형 문제를 해결하는 방법은 이미 디지털 회로 설계에서 사용되던 방법이다.
퍼셉트론의 가중치를 적절히 조정하면 다음과 같은 AND / OR 등의 디지털 게이트(gate)를 제작할 수 있다.
예를 들어 $w_1 = -2$, $w_2 = -2$, $b = 3$ 인 퍼셉트론은 NAND 게이트를 구현한다.
End of explanation
"""
%%tikz --size 600,400
\tikzstyle{neuron}=[circle, draw, minimum size=2 cm,inner sep=5pt, node distance=2cm]
\node () at (0, 4.5) {$l-1$th layer};
\node () at (4, 4.5) {$l$th layer};
\node[neuron] (i1) at (0, 2) {l-1, k};
\node[neuron] (i2) at (0, -2) {l-1, k+1};
\node[neuron] (h11) at (4, 3) {l, j-1};
\node[neuron] (h12) at (4, 0) {l, j};
\node[neuron] (h13) at (4, -3) {l, j+1};
\draw[->] (i1) -- (h11);
\draw[->] (i2) -- (h11);
\draw[->, line width=0.9mm] (i1) -- node[above=0.2] {$w^{l}_{j,k}$ } (h12);
\draw[->] (i2) -- (h12);
\draw[->] (i1) -- (h13);
\draw[->] (i2) -- (h13);
"""
Explanation: <table style="display: inline-table; margin-right: 30pt;">
<tbody><tr style="background:#def; text-align:center;">
<td colspan="2" style="text-align:center;"><b>INPUT</b></td>
<td colspan="3" style="text-align:center;"><b>OUTPUT</b></td>
</tr>
<tr style="background:#def; text-align:center;">
<td>A</td>
<td>B</td>
<td>A AND B</td>
<td>A NAND B</td>
<td>A XOR B</td>
</tr>
<tr style="background:#dfd; text-align:center;">
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
</tr>
<tr style="background:#dfd; text-align:center;">
<td>0</td>
<td>1</td>
<td>0</td>
<td>1</td>
<td>1</td>
</tr>
<tr style="background:#dfd; text-align:center;">
<td>1</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>1</td>
</tr>
<tr style="background:#dfd; text-align:center;">
<td>1</td>
<td>1</td>
<td>1</td>
<td>0</td>
<td>0</td>
</tr>
</tbody></table>
$x_1 = 0$, $x_2 = 0$
$ (−2)\times 0+(−2)\times 0+3=3 > 0 \rightarrow 1$
$x_1 = 0$, $x_2 = 1$
$ (−2)\times 0+(−2)\times 1+3=1 > 0 \rightarrow 1$
$x_1 = 1$, $x_2 = 0$
$ (−2)\times 1+(−2)\times 0+3=1 > 0 \rightarrow 1$
$x_1 = 1$, $x_2 = 1$
$ (−2)\times 1+(−2)\times 1+3=-1 < 0 \rightarrow 0$
디지털 회로에서는 복수개의 NAND 게이트를 조합하면 어떤 디지털 로직이라도 구현 가능하다. 예를 들어 다음 회로는 두 입력 신호의 합과 자릿수를 반환하는 반가산기(half adder) 회로이다.
<img src="https://datascienceschool.net/upfiles/3002b65c9f034818a318ad7f6b09671f.png">
이 퍼셉트론 조합을 보면 4개의 퍼셉트론을 연결하여 XOR 로직을 구현하였음을 알 수 있다.
다계층 퍼셉트론 (MLP: Multi-Layer Perceptrons)
신경망은 퍼셉트론을 여러개 연결한 것으로 다계층 퍼셉트론(MLP: Multi-Layer Perceptrons)이라고도 한다. 신경망에 속한 퍼셉트론은 뉴론(neuron) 또는 노드(node)라고 불린다.
각 계층(layer)은 다음 계층에 대해 적응형 기저 함수의 역할을 한다. 최초의 계층은 입력 계층(input layer), 마지막 계측은 출력 계층(output layer)이라고 하며 중간은 은닉 계층(hidden layer)라고 한다.
<img src="https://datascienceschool.net/upfiles/4dcef7b75de64023900c7f7edb7cbb2f.png">
MLP의 또다른 특징은 출력 계층에 복수개의 출력 뉴런를 가지고 각 뉴런값으로 출력 클래스의 조건부 확률을 반환하도록 설계하여 멀티 클래스 문제를 해결할 수도 있다는 점이다.
다음은 필기 숫자에 대한 영상 정보를 입력 받아 숫자 0 ~ 9 까지의 조건부 확률을 출력하는 MLP의 예이다. 입력 영상이 28 x 28 해상도를 가진다면 입력 계층의 뉴런 수는 $28 \times 28 = 784$ 개가 된다. 출력은 숫자 0 ~ 9 까지의 조건부 확률을 출력하는 $10$ 개의 뉴런을 가진다.
그림의 모형은 $15$개의 뉴런을 가지는 $1$ 개의 은닉 계층을 가진다.
<img src="https://datascienceschool.net/upfiles/90f2752671424cef846839b89ddcf6aa.png">
신경망 가중치 표기법
신경망의 가중치는 $w^{l}_{j,k}$ 과 같이 표기한다. 이 가중치는 $l-1$ 번째 계층의 $k$번째 뉴런와 $l$ 번째 계층의 $j$번째 뉴런을 연결하는 가중치를 뜻한다. 첨자의 순서에 주의한다.
End of explanation
"""
%%tikz --size 600,400
\tikzstyle{neuron}=[circle, draw, minimum size=2 cm,inner sep=5pt, node distance=2cm]
\node[neuron, fill=gray!10] (i1) at (0, 2) {$x_1$};
\node[neuron, fill=gray!10] (i2) at (0, -2) {$x_2$};
\node[neuron] (h11) at (4, 3) {hidden 11};
\node[neuron] (h12) at (4, 0) {hidden 12};
\node[neuron] (h13) at (4, -3) {hidden 13};
\draw[->] (i1) -- (h11);
\draw[->] (i2) -- (h11);
\draw[->] (i1) -- (h12);
\draw[->] (i2) -- (h12);
\draw[->] (i1) -- (h13);
\draw[->] (i2) -- (h13);
\node[neuron] (h21) at (8, 3) {hidden 21};
\node[neuron] (h22) at (8, 0) {hidden 22};
\node[neuron] (h23) at (8, -3) {hidden 23};
\draw[->] (h11) -- (h21);
\draw[->] (h11) -- (h22);
\draw[->] (h11) -- (h23);
\draw[->] (h12) -- (h21);
\draw[->] (h12) -- (h22);
\draw[->] (h12) -- (h23);
\draw[->] (h13) -- (h21);
\draw[->] (h13) -- (h22);
\draw[->] (h13) -- (h23);
\node[neuron] (o1) at (12, 2) {output 1};
\node[neuron] (o2) at (12, -2) {output 2};
\draw[->] (h21) -- (o1);
\draw[->] (h21) -- (o2);
\draw[->] (h22) -- (o1);
\draw[->] (h22) -- (o2);
\draw[->] (h23) -- (o1);
\draw[->] (h23) -- (o2);
%%tikz --size 600,400
\tikzstyle{neuron}=[circle, draw, minimum size=2 cm,inner sep=5pt, node distance=2cm]
\node[neuron, fill=gray!10] (i1) at (0, 2) {$x_1$};
\node[neuron, fill=gray!10] (i2) at (0, -2) {$x_2$};
\node[neuron, fill=gray!10] (h11) at (4, 3) {$a^1_1$, $z^1_1$};
\node[neuron, fill=gray!10] (h12) at (4, 0) {$a^1_2$, $z^1_2$};
\node[neuron, fill=gray!10] (h13) at (4, -3) {$a^1_3$, $z^1_3$};
\draw[->, line width=1mm] (i1) -- (h11);
\draw[->, line width=1mm] (i2) -- (h11);
\draw[->, line width=1mm] (i1) -- (h12);
\draw[->, line width=1mm] (i2) -- (h12);
\draw[->, line width=1mm] (i1) -- (h13);
\draw[->, line width=1mm] (i2) -- (h13);
\node[neuron] (h21) at (8, 3) {hidden 21};
\node[neuron] (h22) at (8, 0) {hidden 22};
\node[neuron] (h23) at (8, -3) {hidden 23};
\draw[->] (h11) -- (h21);
\draw[->] (h11) -- (h22);
\draw[->] (h11) -- (h23);
\draw[->] (h12) -- (h21);
\draw[->] (h12) -- (h22);
\draw[->] (h12) -- (h23);
\draw[->] (h13) -- (h21);
\draw[->] (h13) -- (h22);
\draw[->] (h13) -- (h23);
\node[neuron] (o1) at (12, 2) {output 1};
\node[neuron] (o2) at (12, -2) {output 2};
\draw[->] (h21) -- (o1);
\draw[->] (h21) -- (o2);
\draw[->] (h22) -- (o1);
\draw[->] (h22) -- (o2);
\draw[->] (h23) -- (o1);
\draw[->] (h23) -- (o2);
"""
Explanation: Feedforward propagation
신경망의 계산 과정은 실제 신경망에서 신호가 전달과는 과정과 유사하므로 Feedforward propagation 이라고 불린다.
$l$번째 계층의 $j$번째 뉴런에서의 출력값 $z^l$은 다음과 같이 정의된다.
$$
\begin{eqnarray}
z^{l}j = h \left( \sum_k w^{l}{jk} z^{l-1}k + b^l_j \right) = h \left( w^{l}{j} \cdot z^{l-1} + b^l_j \right)
\end{eqnarray}
$$
$l$번째 계층 전체의 출력은 다음과 같이 표시할 수 있다.
$$
\begin{eqnarray}
z^{l} = h \left( \sum_k w^{l}_{k} z^{l-1}_k + b^l \right) = h \left( w^{l} \cdot z^{l-1} + b^l \right)
\end{eqnarray}
$$
$$
a^l \equiv w^l \cdot z^{l-1}+b^l
$$
$$
\begin{eqnarray}
z^{l} = h \left( a^l \right)
\end{eqnarray}
$$
아래에 Feedforward propagation 예를 보였다.
End of explanation
"""
%%tikz --size 600,400
\tikzstyle{neuron}=[circle, draw, minimum size=2 cm,inner sep=5pt, node distance=2cm]
\node[neuron, fill=gray!10] (i1) at (0, 2) {$x_1$};
\node[neuron, fill=gray!10] (i2) at (0, -2) {$x_2$};
\node[neuron, fill=gray!10] (h11) at (4, 3) {$a^1_1$, $z^1_1$};
\node[neuron, fill=gray!10] (h12) at (4, 0) {$a^1_2$, $z^1_2$};
\node[neuron, fill=gray!10] (h13) at (4, -3) {$a^1_3$, $z^1_3$};
\draw[-] (i1) -- (h11);
\draw[-] (i2) -- (h11);
\draw[-] (i1) -- (h12);
\draw[-] (i2) -- (h12);
\draw[-] (i1) -- (h13);
\draw[-] (i2) -- (h13);
\node[neuron, fill=gray!10] (h21) at (8, 3) {$a^2_1$, $z^2_1$};
\node[neuron, fill=gray!10] (h22) at (8, 0) {$a^2_2$, $z^2_2$};
\node[neuron, fill=gray!10] (h23) at (8, -3) {$a^2_3$, $z^2_3$};
\draw[->, line width=1mm] (h11) -- (h21);
\draw[->, line width=1mm] (h11) -- (h22);
\draw[->, line width=1mm] (h11) -- (h23);
\draw[->, line width=1mm] (h12) -- (h21);
\draw[->, line width=1mm] (h12) -- (h22);
\draw[->, line width=1mm] (h12) -- (h23);
\draw[->, line width=1mm] (h13) -- (h21);
\draw[->, line width=1mm] (h13) -- (h22);
\draw[->, line width=1mm] (h13) -- (h23);
\node[neuron] (o1) at (12, 2) {output 1};
\node[neuron] (o2) at (12, -2) {output 2};
\draw[-] (h21) -- (o1);
\draw[-] (h21) -- (o2);
\draw[-] (h22) -- (o1);
\draw[-] (h22) -- (o2);
\draw[-] (h23) -- (o1);
\draw[-] (h23) -- (o2);
"""
Explanation: $$ z^{1} = h \left( w^{1} \cdot x + b^1 \right) = h \left( a^1 \right)$$
End of explanation
"""
%%tikz --size 600,400
\tikzstyle{neuron}=[circle, draw, minimum size=2 cm,inner sep=5pt, node distance=2cm]
\node[neuron, fill=gray!10] (i1) at (0, 2) {$x_1$};
\node[neuron, fill=gray!10] (i2) at (0, -2) {$x_2$};
\node[neuron, fill=gray!10] (h11) at (4, 3) {$a^1_1$, $z^1_1$};
\node[neuron, fill=gray!10] (h12) at (4, 0) {$a^1_2$, $z^1_2$};
\node[neuron, fill=gray!10] (h13) at (4, -3) {$a^1_3$, $z^1_3$};
\draw[-] (i1) -- (h11);
\draw[-] (i2) -- (h11);
\draw[-] (i1) -- (h12);
\draw[-] (i2) -- (h12);
\draw[-] (i1) -- (h13);
\draw[-] (i2) -- (h13);
\node[neuron, fill=gray!10] (h21) at (8, 3) {$a^2_1$, $z^2_1$};
\node[neuron, fill=gray!10] (h22) at (8, 0) {$a^2_2$, $z^2_2$};
\node[neuron, fill=gray!10] (h23) at (8, -3) {$a^2_3$, $z^2_3$};
\draw[-] (h11) -- (h21);
\draw[-] (h11) -- (h22);
\draw[-] (h11) -- (h23);
\draw[-] (h12) -- (h21);
\draw[-] (h12) -- (h22);
\draw[-] (h12) -- (h23);
\draw[-] (h13) -- (h21);
\draw[-] (h13) -- (h22);
\draw[-] (h13) -- (h23);
\node[neuron, fill=gray!10] (o1) at (12, 2) {$a^3_1$, $z^3_1=y_1$};
\node[neuron, fill=gray!10] (o2) at (12, -2) {$a^3_2$, $z^3_2=y_2$};
\draw[->, line width=1mm] (h21) -- (o1);
\draw[->, line width=1mm] (h21) -- (o2);
\draw[->, line width=1mm] (h22) -- (o1);
\draw[->, line width=1mm] (h22) -- (o2);
\draw[->, line width=1mm] (h23) -- (o1);
\draw[->, line width=1mm] (h23) -- (o2);
"""
Explanation: $$ z^{2} = h \left( w^{2} \cdot z^{1} + b^2 \right) = h \left( a^2 \right)$$
End of explanation
"""
%%tikz --size 600,400
\tikzstyle{neuron}=[circle, draw, minimum size=2 cm,inner sep=5pt, node distance=2cm, align=center]
\node[neuron] (i1) at (0, 2) {$x_1$};
\node[neuron] (i2) at (0, -2) {$x_2$};
\node[neuron] (h11) at (4, 3) {$a^1_1$, $z^1_1$};
\node[neuron] (h12) at (4, 0) {$a^1_2$, $z^1_2$};
\node[neuron] (h13) at (4, -3) {$a^1_3$, $z^1_3$};
\draw[-] (i1) -- (h11);
\draw[-] (i2) -- (h11);
\draw[-] (i1) -- (h12);
\draw[-] (i2) -- (h12);
\draw[-] (i1) -- (h13);
\draw[-] (i2) -- (h13);
\node[neuron] (h21) at (8, 3) {$a^2_1$, $z^2_1$};
\node[neuron] (h22) at (8, 0) {$a^2_2$, $z^2_2$};
\node[neuron] (h23) at (8, -3) {$a^2_3$, $z^2_3$};
\draw[-] (h11) -- (h21);
\draw[-] (h11) -- (h22);
\draw[-] (h11) -- (h23);
\draw[-] (h12) -- (h21);
\draw[-] (h12) -- (h22);
\draw[-] (h12) -- (h23);
\draw[-] (h13) -- (h21);
\draw[-] (h13) -- (h22);
\draw[-] (h13) -- (h23);
\node[neuron, fill=gray!10] (o1) at (12, 2) {$a^3_1$, $z^3_1=y_1$ \\ $\delta^3_1 = z_1 - y_1$};
\node[neuron, fill=gray!10] (o2) at (12, -2) {$a^3_2$, $z^3_2=y_2$ \\ $\delta^3_2 = z_2 - y_2$};
\draw[-] (h21) -- (o1);
\draw[-] (h21) -- (o2);
\draw[-] (h22) -- (o1);
\draw[-] (h22) -- (o2);
\draw[-] (h23) -- (o1);
\draw[-] (h23) -- (o2);
"""
Explanation: $$ y = z^{3} = h \left( w^{3} \cdot z^{2} + b^3 \right) = h \left( a^3 \right)$$
오차 함수
신경망의 오차 함수는 조건부 확률이라는 실수 값을 출력해야 하므로 퍼셉트론과 달리 제곱합 오차 함수를 사용한다.
$$
\begin{eqnarray} C(w,b) =
\frac{1}{2n} \sum_i \| y_i - \hat{y}(x_i; w, b)\|^2 = \frac{1}{2n} \sum_i \| y_i - z_i \|^2
\end{eqnarray}
$$
가중치 최적화
오차함수를 최소화하는 최적의 가중치를 찾기 위해 다음과 같이 미분(gradient)을 사용한 steepest gradient descent 방법을 적용한다.
$$
\begin{eqnarray}
\Delta w = -\eta \nabla C,
\end{eqnarray}
$$
여기에서 $\eta$는 최적화 속도(learning rate)이다.
$$
\begin{eqnarray}
\nabla C \equiv \left(\frac{\partial C}{\partial w_1}, \ldots,
\frac{\partial C}{\partial w_m}\right)^T
\end{eqnarray}
$$
가중치 갱신 공식은 다음과 같다.
$$
\begin{eqnarray}
w_k & \rightarrow & w_k' = w_k-\eta \frac{\partial C}{\partial w_k} \
b_l & \rightarrow & b_l' = b_l-\eta \frac{\partial C}{\partial b_l}
\end{eqnarray}
$$
Stochastic Gradient Descent
실제로는 단순 Steepest Gradient Descent 방법보다 (SGC: Stochastic Gradient Descent)를 주로 사용한다. SGD는 미분 계산을 위해 전체 데이터 샘플을 모두 사용하지 않고 $m$개의 일부 데이터만 사용하여 미분을 계산하는 방법이다.
$$
\begin{eqnarray}
\frac{\sum_{j=1}^m \nabla C_{X_{j}}}{m} \approx \frac{\sum_x \nabla C_x}{n} = \nabla C
\end{eqnarray}
$$
이 경우 가중치 갱신 공식은 다음과 같다.
$$
\begin{eqnarray}
w_k & \rightarrow & w_k' = w_k-\frac{\eta}{m}
\sum_j \frac{\partial C_{X_j}}{\partial w_k} \
b_l & \rightarrow & b_l' = b_l-\frac{\eta}{m}
\sum_j \frac{\partial C_{X_j}}{\partial b_l},
\end{eqnarray}
$$
Back Propagation
단순하게 수치적으로 미분을 계산한다면 모든 가중치에 대해서 개별적으로 미분을 계산해야 한다. 그러나 back propagation 방법을 사용하면 모든 가중치에 대한 미분값을 한번에 계산할 수 있다.
back propagation 방법을 수식으로 표현하면 다음과 같다.
우선 $\delta$ 를 뒤에서 앞으로 전파한다. $\delta$는 다음과 같이 정의되는 값이다.
$$
\delta_j = \dfrac{\partial C}{\partial a_j}
$$
$$
\begin{eqnarray}
\delta^{l-1}j = h'(a^{l-1}_j) \sum_k w^l{kj} \delta^l_k
\end{eqnarray}
$$
이 식을 벡터-행렬 식으로 쓰면 다음과 같다.
$$
\delta^{l-1} = h'(a^{l-1}) \odot ((w^{l})^T \delta^{l})
$$
여기에서 $\odot$ 연산 기호는 Hamadard Product 혹은 Schur product 라고 불리는 연산으로 정의는 다음과 같다.
$$
\left(\begin{array}{ccc} \mathrm{a}{11} & \mathrm{a}{12} & \mathrm{a}{13}\ \mathrm{a}{21} & \mathrm{a}{22} & \mathrm{a}{23}\ \mathrm{a}{31} & \mathrm{a}{32} & \mathrm{a}{33} \end{array}\right) \odot \left(\begin{array}{ccc} \mathrm{b}{11} & \mathrm{b}{12} & \mathrm{b}{13}\ \mathrm{b}{21} & \mathrm{b}{22} & \mathrm{b}{23}\ \mathrm{b}{31} & \mathrm{b}{32} & \mathrm{b}{33} \end{array}\right) = \left(\begin{array}{ccc} \mathrm{a}{11}\, \mathrm{b}{11} & \mathrm{a}{12}\, \mathrm{b}{12} & \mathrm{a}{13}\, \mathrm{b}{13}\ \mathrm{a}{21}\, \mathrm{b}{21} & \mathrm{a}{22}\, \mathrm{b}{22} & \mathrm{a}{23}\, \mathrm{b}{23}\ \mathrm{a}{31}\, \mathrm{b}{31} & \mathrm{a}{32}\, \mathrm{b}{32} & \mathrm{a}{33}\, \mathrm{b}{33} \end{array}\right)
$$
오차값에서 가중치에 대한 미분은 다음과 같이 구한다.
$$
\frac{\partial C}{\partial w^l_{jk}} = \delta^l_j z^{l-1}_k
$$
또한 최종단의 $\delta$는 다음과 같이 예측 오차 그 자체이다.
$$
\delta^L_j = y_j - z_j
$$
따라서 오차값을 위 식에 따라 앞쪽으로 다시 전파하면 전체 가중치에 대한 미분을 구할 수 있다.
End of explanation
"""
%%tikz --size 600,400
\tikzstyle{neuron}=[circle, draw, minimum size=2 cm,inner sep=5pt, node distance=2cm, align=center]
\node[neuron] (i1) at (0, 2) {$x_1$};
\node[neuron] (i2) at (0, -2) {$x_2$};
\node[neuron] (h11) at (4, 3) {$a^1_1$, $z^1_1$};
\node[neuron] (h12) at (4, 0) {$a^1_2$, $z^1_2$};
\node[neuron] (h13) at (4, -3) {$a^1_3$, $z^1_3$};
\draw[-] (i1) -- (h11);
\draw[-] (i2) -- (h11);
\draw[-] (i1) -- (h12);
\draw[-] (i2) -- (h12);
\draw[-] (i1) -- (h13);
\draw[-] (i2) -- (h13);
\node[neuron, fill=gray!10] (h21) at (8, 3) {$a^2_1$, $z^2_1$ \\ $\delta^2_1$};
\node[neuron, fill=gray!10] (h22) at (8, 0) {$a^2_2$, $z^2_2$ \\ $\delta^2_2$};
\node[neuron, fill=gray!10] (h23) at (8, -3) {$a^2_3$, $z^2_3$ \\ $\delta^2_3$};
\draw[-] (h11) -- (h21);
\draw[-] (h11) -- (h22);
\draw[-] (h11) -- (h23);
\draw[-] (h12) -- (h21);
\draw[-] (h12) -- (h22);
\draw[-] (h12) -- (h23);
\draw[-] (h13) -- (h21);
\draw[-] (h13) -- (h22);
\draw[-] (h13) -- (h23);
\node[neuron, fill=gray!10] (o1) at (12, 2) {$a^3_1$, $z^3_1=y_1$ \\ $\delta^3_1 = z_1 - y_1$};
\node[neuron, fill=gray!10] (o2) at (12, -2) {$a^3_2$, $z^3_2=y_2$ \\ $\delta^3_2 = z_2 - y_2$};
\draw[<-, line width=0.5mm] (h21) -- (o1);
\draw[<-, line width=0.5mm] (h21) -- (o2);
\draw[<-, line width=0.5mm] (h22) -- (o1);
\draw[<-, line width=0.5mm] (h22) -- (o2);
\draw[<-, line width=0.5mm] (h23) -- (o1);
\draw[<-, line width=0.5mm] (h23) -- (o2);
"""
Explanation: $$
\delta^4_j = y_j - z_j
$$
End of explanation
"""
%%tikz --size 600,400
\tikzstyle{neuron}=[circle, draw, minimum size=2 cm,inner sep=5pt, node distance=2cm, align=center]
\node[neuron] (i1) at (0, 2) {$x_1$};
\node[neuron] (i2) at (0, -2) {$x_2$};
\node[neuron, fill=gray!10] (h11) at (4, 3) {$a^1_1$, $z^1_1$ \\ $\delta^1_1$};
\node[neuron, fill=gray!10] (h12) at (4, 0) {$a^1_2$, $z^1_2$ \\ $\delta^1_2$};
\node[neuron, fill=gray!10] (h13) at (4, -3) {$a^1_3$, $z^1_3$ \\ $\delta^1_3$};
\draw[-] (i1) -- (h11);
\draw[-] (i2) -- (h11);
\draw[-] (i1) -- (h12);
\draw[-] (i2) -- (h12);
\draw[-] (i1) -- (h13);
\draw[-] (i2) -- (h13);
\node[neuron, fill=gray!10] (h21) at (8, 3) {$a^2_1$, $z^2_1$ \\ $\delta^2_1$};
\node[neuron, fill=gray!10] (h22) at (8, 0) {$a^2_2$, $z^2_2$ \\ $\delta^2_2$};
\node[neuron, fill=gray!10] (h23) at (8, -3) {$a^2_3$, $z^2_3$ \\ $\delta^2_3$};
\draw[<-, line width=0.5mm] (h11) -- (h21);
\draw[<-, line width=0.5mm] (h11) -- (h22);
\draw[<-, line width=0.5mm] (h11) -- (h23);
\draw[<-, line width=0.5mm] (h12) -- (h21);
\draw[<-, line width=0.5mm] (h12) -- (h22);
\draw[<-, line width=0.5mm] (h12) -- (h23);
\draw[<-, line width=0.5mm] (h13) -- (h21);
\draw[<-, line width=0.5mm] (h13) -- (h22);
\draw[<-, line width=0.5mm] (h13) -- (h23);
\node[neuron, fill=gray!10] (o1) at (12, 2) {$a^3_1$, $z^3_1=y_1$ \\ $\delta^3_1 = z_1 - y_1$};
\node[neuron, fill=gray!10] (o2) at (12, -2) {$a^3_2$, $z^3_2=y_2$ \\ $\delta^3_2 = z_2 - y_2$};
\draw[-] (h21) -- (o1);
\draw[-] (h21) -- (o2);
\draw[-] (h22) -- (o1);
\draw[-] (h22) -- (o2);
\draw[-] (h23) -- (o1);
\draw[-] (h23) -- (o2);
"""
Explanation: $$ \frac{\partial C}{\partial w^3_{jk}} = z^2_k \delta^3_j $$
$$ \delta^2 = h'(a^2) \odot ((w^{3})^T \delta^{3}) $$
End of explanation
"""
%%tikz --size 600,400
\tikzstyle{neuron}=[circle, draw, minimum size=2 cm,inner sep=5pt, node distance=2cm, align=center]
\node[neuron, fill=gray!10] (i1) at (0, 2) {$x_1$};
\node[neuron, fill=gray!10] (i2) at (0, -2) {$x_2$};
\node[neuron, fill=gray!10] (h11) at (4, 3) {$a^1_1$, $z^1_1$ \\ $\delta^1_1$};
\node[neuron, fill=gray!10] (h12) at (4, 0) {$a^1_2$, $z^1_2$ \\ $\delta^1_2$};
\node[neuron, fill=gray!10] (h13) at (4, -3) {$a^1_3$, $z^1_3$ \\ $\delta^1_3$};
\draw[<-, line width=0.5mm] (i1) -- (h11);
\draw[<-, line width=0.5mm] (i2) -- (h11);
\draw[<-, line width=0.5mm] (i1) -- (h12);
\draw[<-, line width=0.5mm] (i2) -- (h12);
\draw[<-, line width=0.5mm] (i1) -- (h13);
\draw[<-, line width=0.5mm] (i2) -- (h13);
\node[neuron, fill=gray!10] (h21) at (8, 3) {$a^2_1$, $z^2_1$ \\ $\delta^2_1$};
\node[neuron, fill=gray!10] (h22) at (8, 0) {$a^2_2$, $z^2_2$ \\ $\delta^2_2$};
\node[neuron, fill=gray!10] (h23) at (8, -3) {$a^2_3$, $z^2_3$ \\ $\delta^2_3$};
\draw[-] (h11) -- (h21);
\draw[-] (h11) -- (h22);
\draw[-] (h11) -- (h23);
\draw[-] (h12) -- (h21);
\draw[-] (h12) -- (h22);
\draw[-] (h12) -- (h23);
\draw[-] (h13) -- (h21);
\draw[-] (h13) -- (h22);
\draw[-] (h13) -- (h23);
\node[neuron, fill=gray!10] (o1) at (12, 2) {$a^3_1$, $z^3_1=y_1$ \\ $\delta^3_1 = z_1 - y_1$};
\node[neuron, fill=gray!10] (o2) at (12, -2) {$a^3_2$, $z^3_2=y_2$ \\ $\delta^3_2 = z_2 - y_2$};
\draw[-] (h21) -- (o1);
\draw[-] (h21) -- (o2);
\draw[-] (h22) -- (o1);
\draw[-] (h22) -- (o2);
\draw[-] (h23) -- (o1);
\draw[-] (h23) -- (o2);
"""
Explanation: $$ \frac{\partial C}{\partial w^2_{jk}} = z^1_k \delta^2_j $$
$$ \delta^1 = h'(a^1) \odot ((w^{2})^T \delta^{2}) $$
End of explanation
"""
|
dwhswenson/openpathsampling | examples/alanine_dipeptide_tps/AD_tps_3a_analysis_flex.ipynb | mit | from __future__ import print_function
%matplotlib inline
import openpathsampling as paths
import numpy as np
import matplotlib.pyplot as plt
import os
import openpathsampling.visualize as ops_vis
from IPython.display import SVG
"""
Explanation: Analyzing the flexible path length simulation
End of explanation
"""
# note that this log will overwrite the log from the previous notebook
#import logging.config
#logging.config.fileConfig("logging.conf", disable_existing_loggers=False)
%%time
flexible = paths.AnalysisStorage("ad_tps.nc")
# opening as AnalysisStorage is a little slower, but speeds up the move_summary
engine = flexible.engines[0]
flex_scheme = flexible.schemes[0]
print("File size: {0} for {1} steps, {2} snapshots".format(
flexible.file_size_str,
len(flexible.steps),
len(flexible.snapshots)
))
"""
Explanation: Load the file, and from the file pull our the engine (which tells us what the timestep was) and the move scheme (which gives us a starting point for much of the analysis).
End of explanation
"""
flex_scheme.move_summary(flexible.steps)
"""
Explanation: That tell us a little about the file we're dealing with. Now we'll start analyzing the contents of that file. We used a very simple move scheme (only shooting), so the main information that the move_summary gives us is the acceptance of the only kind of move in that scheme. See the MSTIS examples for more complicated move schemes, where you want to make sure that frequency at which the move runs is close to what was expected.
End of explanation
"""
replica_history = ops_vis.ReplicaEvolution(replica=0)
tree = ops_vis.PathTree(
flexible.steps[0:25],
replica_history
)
tree.options.css['scale_x'] = 3
SVG(tree.svg())
# can write to svg file and open with programs that can read SVG
with open("flex_tps_tree.svg", 'w') as f:
f.write(tree.svg())
print("Decorrelated trajectories:", len(tree.generator.decorrelated_trajectories))
%%time
full_history = ops_vis.PathTree(
flexible.steps,
ops_vis.ReplicaEvolution(
replica=0
)
)
n_decorrelated = len(full_history.generator.decorrelated_trajectories)
print("All decorrelated trajectories:", n_decorrelated)
"""
Explanation: Replica history tree and decorrelated trajectories
The ReplicaHistoryTree object gives us both the history tree (often called the "move tree") and the number of decorrelated trajectories.
A ReplicaHistoryTree is made for a certain set of Monte Carlo steps. First, we make a tree of only the first 25 steps in order to visualize it. (All 10000 steps would be unwieldy.)
After the visualization, we make a second PathTree of all the steps. We won't visualize that; instead we use it to count the number of decorrelated trajectories.
End of explanation
"""
path_lengths = [len(step.active[0].trajectory) for step in flexible.steps]
plt.hist(path_lengths, bins=40, alpha=0.5);
print("Maximum:", max(path_lengths),
"("+(max(path_lengths)*engine.snapshot_timestep).format("%.3f")+")")
print ("Average:", "{0:.2f}".format(np.mean(path_lengths)),
"("+(np.mean(path_lengths)*engine.snapshot_timestep).format("%.3f")+")")
"""
Explanation: Path length distribution
Flexible length TPS gives a distribution of path lengths. Here we calculate the length of every accepted trajectory, then histogram those lengths, and calculate the maximum and average path lengths.
We also use engine.snapshot_timestep to convert the count of frames to time, including correct units.
End of explanation
"""
from openpathsampling.numerics import HistogramPlotter2D
psi = flexible.cvs['psi']
phi = flexible.cvs['phi']
deg = 180.0 / np.pi
path_density = paths.PathDensityHistogram(cvs=[phi, psi],
left_bin_edges=(-180/deg,-180/deg),
bin_widths=(2.0/deg,2.0/deg))
path_dens_counter = path_density.histogram([s.active[0].trajectory for s in flexible.steps])
"""
Explanation: Path density histogram
Next we will create a path density histogram. Calculating the histogram itself is quite easy: first we reload the collective variables we want to plot it in (we choose the phi and psi angles). Then we create the empty path density histogram, by telling it which CVs to use and how to make the histogram (bin sizes, etc). Finally, we build the histogram by giving it the list of active trajectories to histogram.
End of explanation
"""
tick_labels = np.arange(-np.pi, np.pi+0.01, np.pi/4)
plotter = HistogramPlotter2D(path_density,
xticklabels=tick_labels,
yticklabels=tick_labels,
label_format="{:4.2f}")
ax = plotter.plot(cmap="Blues")
"""
Explanation: Now we've built the path density histogram, and we want to visualize it. We have a convenient plot_2d_histogram function that works in this case, and takes the histogram, desired plot tick labels and limits, and additional matplotlib named arguments to plt.pcolormesh.
End of explanation
"""
ops_traj = flexible.steps[1000].active[0].trajectory
traj = ops_traj.to_mdtraj()
traj
# Here's how you would then use NGLView:
#import nglview as nv
#view = nv.show_mdtraj(traj)
#view
flexible.close()
"""
Explanation: Convert to MDTraj for analysis by external tools
The trajectory can be converted to an MDTraj trajectory, and then used anywhere that MDTraj can be used. This includes writing it to a file (in any number of file formats) or visualizing the trajectory using, e.g., NGLView.
End of explanation
"""
|
UPML/complexityTheory | toGit/TSP/tsp/results.ipynb | apache-2.0 | class Node:
def __init__(self, number, cost, time, answer):
self.number = int(number)
self.cost = float(cost)
self.time = float(time) / 10**9
self.size = self.number / 100
self.answer = answer
def write(self):
print("n = ", self.number," \n")
print("cost = ", self.cost, " \n")
print("time = ", self.time, " \n")
print("size = ", self.size, "\n")
print("answer = ", self.answer, "\n")
def getTime(self):
return self.time
def getSize(self):
return self.size
def getNumber(self):
return self.number
def getAnswer(self):
return self.answer
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def constructNode(a):
c = a.split('\n')
number = c[0]
answerStr = c[1].split("to")
answer = []
for i in range(len(answerStr)):
answer.append(int(answerStr[i]))
cost = (c[2].split())[1]
time = (c[3].split())[1]
return Node(number, cost, time, answer)
"""
Explanation: Я запустил алгоритм на случайных тестах, размера начиная с 2 и он обсчитывает по 10 тестов одного размера.
Я хочу попробовать проанализировать данные, которые получу в результате его работы.
А именно я получаю на выход, время работы на тесте, вес цикла и сам цикл.
End of explanation
"""
import math
import pylab
from matplotlib import mlab
%pylab inline
def plotPoints(a, size, showed):
Y = [a[i].getTime() for i in range(size)]
X = [a[i].getSize() for i in range(size)]
pylab.plot (X, Y)
if(showed):
pylab.show()
def readNodes(name):
fin = open(name, 'r')
a = fin.read()
nodesToSplit = a.split("i =");
nodes = []
for i in range(len(nodesToSplit) -1):
nodes.append(constructNode(nodesToSplit[i+1]))
return nodes
nodes = readNodes('out0.txt')
nodesOne = []
nodesOne.append(nodes0[240])
plotPoints(nodes, len(nodes), True)
"""
Explanation: Вытащим данные из файла и преобразуем в удобный формат.
End of explanation
"""
def findMaxTime(l, a, b):
maxEl = l[a].getTime()
deleteEl = 0
for i in range(a, b):
if(l[i].getTime() >= maxEl):
maxEl = l[i].getTime()
value = l[i]
deleteEl = i
l.pop(deleteEl)
return value
maxTimeNodes =[]
for i in range(len(nodes) // 10 - 1, -1, -1):
maxTimeNodes.append(findMaxTime(nodes, i*10, (i + 1) * 10))
plotPoints(maxTimeNodes, len(maxTimeNodes),True)
"""
Explanation: Построим график, видим, что есть три каких-то очень плохих теста, которые выглядят, как пики на этом графике. Давайте запомним, что они есть и выкиним их из эксперементальных данных, потом глазами попробуем на них посмотреть и понять почему метод ветвей и границ так долго искал ответ.
End of explanation
"""
plotPoints(nodes, len(nodes),True)
"""
Explanation: График из максимумов, похож на рост по экспоненте.
End of explanation
"""
for i in range(len(nodes) // 9 - 1, -1, -1):
maxTimeNodes.append(findMaxTime(nodes, i*9, (i + 1) * 9))
plotPoints(nodes, len(nodes), True)
"""
Explanation: Ну чтож попробуем выкинуть два максимума из рассмотрения. Хотя уже сейчас график выглядит намного лучше.
End of explanation
"""
plotPoints(nodes, len(nodes)//2, True)
"""
Explanation: Ну вот уже намного лучше.
посмотрим для интереса, ещё и на начало графика.
End of explanation
"""
fin = open('16100.txt', 'r')
a = fin.read()
nodesToSplitSmall = a.split("i =");
smallNodes = []
for i in range(len(nodesToSplitSmall) -1):
smallNodes.append(constructNode(nodesToSplitSmall[i+1]))
plotPoints(smallNodes, len(smallNodes), True)
smallMaxTime = []
for i in range(len(smallNodes) // 100 - 1, -1, -1):
smallMaxTime.append(findMaxTime(smallNodes, i*100, (i + 1) * 100))
plotPoints(smallNodes, len(smallNodes), True)
"""
Explanation: Не красивый график, давайте запустим по 100 тестов для каждого размера.
End of explanation
"""
def findMinTime(l, a, b):
minEl = l[a].getTime()
deleteEl = 0
for i in range(a, b):
if(l[i].getTime() <= minEl):
minEl = l[i].getTime()
value = l[i]
deleteEl = i
l.pop(deleteEl)
return value
smallMinTime = []
for i in range(len(smallNodes) // 99 - 1, -1, -1):
smallMinTime.append(findMinTime(smallNodes, i*99, (i + 1) * 99))
"""
Explanation: Давайте попробуем понять, есть ли какая-то видимая разница, между тестами на которых алгоритм работает плохо и тех на которых он работает хорошо.
End of explanation
"""
import numpy as np
from bokeh.plotting import *
def show(node):
fin = open(str(node.getNumber()) + ".txt", 'r')
a = fin.read()
lines = a.split('\n')
lines.pop(0)
lines.pop(0)
lines.pop(len(lines) - 1)
lines.pop(len(lines) - 1)
points = []
X = []
Y = []
for i in range(len(lines)):
c = lines[i].split(' ')
points.append(Point(float(c[1]), float(c[2])))
for i in range(len(node.getAnswer())):
c = lines[node.getAnswer()[i] - 1].split(' ')
X.append(float(c[1]))
Y.append(float(c[2]))
plot(X, Y)
for i in range(0, 12, 4):
subplot(221 + i // 4)
p1 = show(smallMinTime[i]) #синий
p2 = show(smallMaxTime[i]) #зеленый
for i in range(12, 15):
subplot(221 + i % 4)
p1 = show(smallMinTime[i]) #синий
p2 = show(smallMaxTime[i]) #зеленый
"""
Explanation: Нарисуем и посмотрим.
End of explanation
"""
nodes0 = nodes
nodes010 = readNodes("out10.txt")
for i in range(len(nodes010) // 10 - 1, -1, -1):
findMaxTime(nodes010, i*10, (i + 1) * 10)
nodes025 = readNodes("out25.txt")
for i in range(len(nodes025) // 10 - 1, -1, -1):
findMaxTime(nodes025, i*10, (i + 1) * 10)
plotPoints(nodes010, len(nodes010), False) # синий
plotPoints(nodes0, len(nodes0), False) #зеленый
plotPoints(nodes025, len(nodes025), False) #красный
plotPoints(nodes010[0:110], 110, False) # синий
plotPoints(nodes0[0:122], 122, False) #зеленый
plotPoints(nodes025[0:110], 110, False) #красный
plotPoints(nodes010[110:150], 40, False) # синий
plotPoints(nodes0[122:170], 48, False) #зеленый
plotPoints(nodes025[110:150], 40, False) #красный
"""
Explanation: Сомневаюсь, что здесь можно найти закономерность. Это и понятно в этом алгоритме многое зависит от того в каком порядке заданы вершины, от этого зависит, то на сколько быстро мы найдем действительно хороший путь, который позволит перебирать нам малое количество вершин.
До этого момента, старался найти честное полное решение задачи, и иногда это получалось сделать за малое время, но видно что например на тесте 2980 алгоритм работал почти то же время, что умный перебор с динамикой работающий за $2^n * n^2$.
Давайте теперь построим несколько графиков, времени работы алгоритма, в зависимости от точности, которая нам требуется.
End of explanation
"""
nodesOne.append(readNodes("26out05.txt")[0])
nodesOne.append(readNodes("26out1.txt")[0])
nodesOne.append(readNodes("26out15.txt")[0])
nodesOne.append(readNodes("26out20.txt")[0])
nodesOne.append(readNodes("26out25.txt")[0])
nodesOne.append(readNodes("26out30.txt")[0])
Y = [nodesOne[i].getTime() for i in range(len(nodesOne))]
X = [0.05 * i for i in range(len(nodesOne))]
pylab.plot (X, Y)
show()
"""
Explanation: Видна зависимость между качеством апроксимации и временем работы программы.
Будет интересно посмотреть, например на зависимость времени работы программы на одном и том же тесте от качества решения задачи. Возьмем например размер задачи 26, чтобы не ждать два часа пока все посчитается точно.
End of explanation
"""
|
manolomartinez/skyrms | Signal Tutorial.ipynb | gpl-3.0 | sender = np.identity(3)
receiver = np.identity(3)
state_chances = np.array([1/3, 1/3, 1/3])
"""
Explanation: 1. Setting things up
Let's take a look at game.py, which we use to create games. Right now, Signal only does cheap-talk games with a chance player. That is, games in which the state the sender observes is exogenously generated, following a certain probablitiy distribution.
We first generate payoff matrices for sender and receiver. The cell $c_{ij}$ in the sender (receiver) payoff matrix gives the payoff for the sender (receiver) when act $A_j$ is done in state $S_i$.
End of explanation
"""
simple_game = game.Chance(state_chances, sender, receiver, 3) # The 3 gives the number of available messages
"""
Explanation: So, 3 equiprobable (as per state_chances) states and 3 acts (that's why the payoff matrices are square), and sender and receiver get 1 payoff unit when act $A_i$ is performed in state $S_i$, 0 otherwise. That's why both payoff matrices are the identity matrix.
What we now need is to decide which strategies will each type in the sender/receiver populations play, and calculate the average payoff for each combination of sender and receiver strategies. s.Chance takes care of that:
End of explanation
"""
sender_strats = simple_game.sender_pure_strats()
receiver_strats = simple_game.receiver_pure_strats()
"""
Explanation: One common choice is to have one type in each of the populations for each possible pure strategy available to them:
End of explanation
"""
simple_evo = game.Evolve(simple_game, sender_strats, receiver_strats)
"""
Explanation: But this is not the only choice. Conceivably, one might want to add into sender_strats a type of senders following a mixed strategy, or ditto for the receiver. One might add such possibilities by hand into the strats arrays, or delete a number of pure strats, or create a strats array from scratch, etc.
Simulating Evolution
Once we have the two strats arrays we want, we can create a population game. From here on game.Evolve takes over:
End of explanation
"""
sender_init = simple_evo.random_sender()
receiver_init = simple_evo.random_receiver()
times = np.arange(1000) # times from 0 to 999, at 1 time-unit increments
"""
Explanation: All right, we can now actually run one of the ODE solvers available in scipy with this object. We create two random population vectors as the initial state, then run, e.g., scipy.integrate.odeint. We also need to give a vector of times for which population snapshots will be calculated (other solvers in scipy.integrate have a slightly different API):
End of explanation
"""
results = simple_evo.replicator_odeint(sender_init, receiver_init, times)
"""
Explanation: Right now, Signal calculates the two-population replicator(-mutator) dynamics, in continuous or discrete time. It is able to use both scipy.integrate.odeint and scipy.integrate.ode for this. Let's evolve those two random initial population vectors, following the replicator dynamics in continuous time, using odeint:
End of explanation
"""
plt.plot(results);
"""
Explanation: results gives the results of the simulations. If you want the additional output that odeint can provide, you can pass the full_output=True flag to replicator_odeintjust as you would to odeint. The same goes for other additional input to odeint.
End of explanation
"""
sender_final, receiver_final = simple_evo.vector_to_populations(results[-1])
# This splits the vector that the solver outputs into two population vectors
winning_sender = sender_strats[sender_final.argmax()]
winning_receiver = receiver_strats[receiver_final.argmax()]
# This gives the strategies with the highest frequency (which we know is 1)
# for sender and receiver in the final population state
print("{}\n\n{}".format(winning_sender, winning_receiver))
"""
Explanation: This popnulation will no longer evolve. We can now check what it is they are doing in the final state:
End of explanation
"""
import analyze
info = analyze.Information(simple_game, winning_sender, winning_receiver)
info.mutual_info_states_acts()
"""
Explanation: Sender and receiver are engaged in a signaling system. We can also check directly the mutual info between states and acts in the final snapshot:
End of explanation
"""
final_info = []
for i in range(1000):
sender_init = simple_evo.random_sender()
receiver_init = simple_evo.random_receiver()
data = simple_evo.replicator_odeint(sender_init, receiver_init, times)
sender_final, receiver_final = simple_evo.vector_to_populations(data[-1])
sender_final_info = simple_evo.sender_to_mixed_strat(sender_final)
receiver_final_info = simple_evo.receiver_to_mixed_strat(receiver_final)
info = analyze.Information(simple_game, sender_final_info, receiver_final_info)
final_info.append(info.mutual_info_states_acts())
"""
Explanation: 1.58 (that is, log2(3)) bits is the entropy of states, which is fully recovered in the mutual information between states and act: a signaling system, as expected.
But we know that a small proportion of random starting points in the "simple game" do not evolve to signaling systems. This percentage, according to Huttegger et al. (2010, p. 183) is about 4.7%. Let's replicate their result.
End of explanation
"""
plt.plot(final_info);
sum(np.array(final_info) < 1.58)/1000
"""
Explanation: We have now the mutual info between states and acts at 1000 end points:
As the plot shows, there are no intermediate values between signaling systems (at 1.58 bits) and the partially pooling configuration at 0.9 bits. So, to calculate the proportion of pooling equilibria, we can look at just that.
End of explanation
"""
import imp
imp.reload(game)
imp.reload(analyze)
sender = np.identity(3)
receiver = np.identity(3)
simple_nonchance = game.NonChance(sender, receiver, 3)
sender_strats = simple_nonchance.sender_pure_strats()
receiver_strats = simple_nonchance.receiver_pure_strats()
avgpayoff = simple_nonchance.avg_payoffs(sender_strats, receiver_strats)
nc_evolve = game.Evolve(simple_nonchance, sender_strats, receiver_strats)
sender_init = nc_evolve.random_sender()
receiver_init = nc_evolve.random_receiver()
times = np.arange(1000)
results = nc_evolve.replicator_odeint(sender_init, receiver_init, times)
plt.plot(results);
sender_final, receiver_final = nc_evolve.vector_to_populations(results[-1])
# This splits the vector that the solver outputs into two population vectors
sender_final_strat = nc_evolve.sender_to_mixed_strat(sender_final)
receiver_final_strat = nc_evolve.receiver_to_mixed_strat(receiver_final)
print("{}\n\n{}".format(sender_final_strat, receiver_final_strat))
ci = analyze.CommonInterest(simple_nonchance)
ci.C_chance()
simple_nonchance.sender_payoff_matrix.dot(receiver_for_sender)
payoffs = np.arange(9).reshape(3,3)
payoffs
senderstrat[0][:, None] * payoffs.dot(np.array([1/3, 0, 2/3])[:, None])
simple_evo.sender_to_mixed_strat(receiver_final)
final_info
"""
Explanation: Close enough!
Games without chance player
Let's now work with a game in which the sender has an endogenously generated state. I.e., a sender type will now be individuated by a state together with a probability vector over the set of messages. Receivers are as in the games with chance player discussed above:
End of explanation
"""
|
rmdort/clipper | examples/tutorial/tutorial_part_one.ipynb | apache-2.0 | cifar_loc = ""
%run ./download_cifar.py $cifar_loc
"""
Explanation: Clipper Tutorial: Part 1
This tutorial will walk you through the process of starting Clipper, creating and querying a Clipper application, and deploying models to Clipper. In the first part of the demo, you will set up Clipper and create an application without involving any machine learning, demonstrating how a frontend developer or dev-ops engineer can set up and query Clipper without having to know anything about the machine-learning models involved.
As an example, this tutorial will walk you through creating an application that labels images as either pictures of birds or planes. You will use the CIFAR-10 dataset as the source of these images.
Download the images
As the first step in the tutorial, download the CIFAR dataset that your Clipper application will work with. You can do this by specifying a download location, cifar_loc, and running the below code. This will make use of the provided download_cifar.py.
This download can take some time. If it fails before you see the output "Finished downloading", go to the download location you specified, delete cifar-10-python.tar.gz, and attempt the download again.
End of explanation
"""
max_train_datapoints = 10000
max_test_datapoints = 10000
%run ./extract_cifar.py $cifar_loc $max_train_datapoints $max_test_datapoints
"""
Explanation: Extract the images
Now, we must extract the data into a format we can load. This will make use of the provided extract_cifar.py
This dataset has 50,000 training datapoints and 10,000 test datapoints. We don't need to use all of them to demonstrate how Clipper works. Feel free to adjust max_train_datapoints and max_test_datapoints or set them to None if you wish to use all the data available for training and testing. You can change these vaues and re-run this command in the future if you wish.
Using 10,000 training images (as opposed to the full 50,000 in the dataset) yields similar prediction accuracies and takes less time to extract into a readable format.
End of explanation
"""
import cifar_utils
test_x, test_y = cifar_utils.filter_data(
*cifar_utils.load_cifar(cifar_loc, cifar_filename="cifar_test.data", norm=True))
no_norm_x, no_norm_y = cifar_utils.filter_data(
*cifar_utils.load_cifar(cifar_loc, cifar_filename="cifar_test.data", norm=False))
"""
Explanation: Load Cifar
The first step in building any application, using machine-learning or otherwise, is to understand the application requirements. Load the dataset into the notebook so you can examine it and better understand the dataset you will be working with. The cifar_utils library provides several utilities for working with CIFAR data – we will make use of one of them here.
End of explanation
"""
%matplotlib inline
cifar_utils.show_example_images(no_norm_x, no_norm_y, 2)
"""
Explanation: Take a look at the data you've loaded. The size and blurriness of these photos should give you a better understanding of the difficulty of the task you will ask of your machine learning models! If you'd like to see more images, increase the number of rows of images displayed -- the last argument to the function -- to a number greater than 2.
End of explanation
"""
import sys
import os
from clipper_admin import Clipper
# Change the username if necessary
user = ""
# Set the path to the SSH key
key = ""
# Set the SSH host
host = ""
clipper = Clipper(host, user, key)
clipper.start()
"""
Explanation: Start Clipper
Now you're ready to start Clipper! You will be using the clipper_admin client library to perform administrative commands.
Remember, Docker and Docker-Compose must be installed before deploying Clipper. Visit https://docs.docker.com/compose/install/ for instructions on how to do so. In addition, we recommend using Anaconda and Anaconda environments to manage Python.
Start by installing the library with pip:
sh
pip install clipper_admin
Clipper uses Docker to manage application configurations and to deploy machine-learning models. Make sure your Docker daemon, local or remote, is up and running. You can check this by running docker ps in your command line – if your Docker daemon is not running, you will be told explicitly.
Starting Clipper will have the following effect on your setup: <img src="img/start_clipper.png" style="width: 350px;"/>
If you'd like to deploy Clipper locally, you can leave the user and key variables blank and set host="localhost". Otherwise, you can deploy Clipper remotely to a machine that you have SSH access to. Set the user variable to your SSH username, the key variable to the path to your SSH key, and the host variable to the remote hostname or IP address.
If your SSH server is running on a non-standard port, you can specify the SSH port to use as another argument to the Clipper constructor. For example, clipper = Clipper(host, user, key, ssh_port=9999).
End of explanation
"""
clipper.get_all_apps()
"""
Explanation: Congratulations! You now have a running Clipper instance that you can start to interact with. Think of your clipper Python object as a vehicle for that interaction. Try using it to see the applications deployed to this Clipper instance:
End of explanation
"""
app_name = "cifar_demo"
model_name = "birds_vs_planes_classifier"
# If the model doesn't return a prediction in time, predict
# label 0 (bird) by default
default_output = "0"
clipper.register_application(
app_name,
model_name,
"doubles",
default_output,
slo_micros=20000)
"""
Explanation: Create an application
In order to query Clipper for predictions, you need to create an application. Each application specifies a name, a set of models it can query, the query input datatype, the selection policy, and a latency service level objective. Once you register an application with Clipper, the system will create two REST endpoints: one for requesting predictions and for providing feedback.
By associating the query interface with a specific application, Clipper allows frontend developers the flexibility to have multiple applications running in the same Clipper instance. Applications can request predictions from any model in Clipper. This allows a single Clipper instance to serve multiple machine-learning applications. It also provides a convenient mechanism for beta-testing or incremental rollout by creating experimental and stable applications for the same set of queries.
For this tutorial, you will create an application named "cifar_demo" and register a candidate model. Note that Clipper allows you to create the application before deploying the models. Clipper will be moving to a label-based model specification mechanism soon, so that in the future you won't have to explicitly enumerate all the models you want to query up front.
Registering the cifar_demo application with Clipper will have the following effect on your setup: <img src="img/register_app.png" style="width: 500px;"/>
Don't worry if this command seems to take a long time. Before starting Clipper, the Docker containers must be downloaded from Docker Hub. These containers are fairly large and may take awhile to download depending on the speed of your internet connection.
End of explanation
"""
clipper.get_all_apps(verbose=True)
"""
Explanation: Now when you list the applications registered with Clipper, you should see the newly registered "cifar_demo" application show up!
End of explanation
"""
import seaborn as sns
sns.set_style("whitegrid")
sns.despine()
import matplotlib as mpl
%matplotlib notebook
cifar_utils.run_serving_workload(host, app_name, test_x, test_y)
"""
Explanation: Start serving
Now that you have registered an application, you can start querying the application for predictions. In this case,
Clipper has created two endpoints:
http://HOSTNAME:1337/cifar_demo/predict
http://HOSTNAME:1337/cifar_demo/update
You will now start querying Clipper with a simple Python frontend app that computes the average accuracy of the responses after every 100 requests and updates a plot of the results with every iteration.
This diagram shows how the accuracy plot is receiving its test predictions: <img src="img/serve_predictions.png" style="width: 500px;"/>
End of explanation
"""
|
turbomanage/training-data-analyst | courses/machine_learning/deepdive2/building_production_ml_systems/solutions/0_export_data_from_bq_to_gcs.ipynb | apache-2.0 | #%load_ext google.cloud.bigquery
import os
from google.cloud import bigquery
"""
Explanation: Exporting data from BigQuery to Google Cloud Storage
In this notebook, we export BigQuery data to GCS so that we can reuse our Keras model that was developed on CSV data.
Uncomment the following line if you are running the notebook locally:
End of explanation
"""
# Change with your own bucket and project below:
BUCKET = "<BUCKET>"
PROJECT = "<PROJECT>"
OUTDIR = "gs://{bucket}/taxifare/data".format(bucket=BUCKET)
os.environ['BUCKET'] = BUCKET
os.environ['OUTDIR'] = OUTDIR
os.environ['PROJECT'] = PROJECT
"""
Explanation: Change the following cell as necessary:
End of explanation
"""
bq = bigquery.Client(project = PROJECT)
dataset = bigquery.Dataset(bq.dataset("taxifare"))
try:
bq.create_dataset(dataset)
print("Dataset created")
except:
print("Dataset already exists")
"""
Explanation: Create BigQuery tables
If you haven not already created a BigQuery dataset for our data, run the following cell:
End of explanation
"""
%%bigquery
CREATE OR REPLACE TABLE taxifare.feateng_training_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
'unused' AS key
FROM `nyc-tlc.yellow.trips`
WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 1000)) = 1
AND
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
"""
Explanation: Let's create a table with 1 million examples.
Note that the order of columns is exactly what was in our CSV files.
End of explanation
"""
%%bigquery
CREATE OR REPLACE TABLE taxifare.feateng_valid_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
'unused' AS key
FROM `nyc-tlc.yellow.trips`
WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 10000)) = 2
AND
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
"""
Explanation: Make the validation dataset be 1/10 the size of the training dataset.
End of explanation
"""
%%bash
echo "Deleting current contents of $OUTDIR"
gsutil -m -q rm -rf $OUTDIR
echo "Extracting training data to $OUTDIR"
bq --location=US extract \
--destination_format CSV \
--field_delimiter "," --noprint_header \
taxifare.feateng_training_data \
$OUTDIR/taxi-train-*.csv
echo "Extracting validation data to $OUTDIR"
bq --location=US extract \
--destination_format CSV \
--field_delimiter "," --noprint_header \
taxifare.feateng_valid_data \
$OUTDIR/taxi-valid-*.csv
gsutil ls -l $OUTDIR
!gsutil cat gs://$BUCKET/taxifare/data/taxi-train-000000000000.csv | head -2
"""
Explanation: Export the tables as CSV files
End of explanation
"""
|
Neuroglycerin/neukrill-net-work | notebooks/model_run_and_result_analyses/Analyse Extra MLP Layers with Dropout.ipynb | mit | import pylearn2.utils
import pylearn2.config
import theano
import neukrill_net.dense_dataset
import neukrill_net.utils
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import holoviews as hl
%load_ext holoviews.ipython
import sklearn.metrics
"""
Explanation: Started some more runs with extra MLP layers, with varying levels of Dropout in each. Casually looking at the traces when they were running, it looked like the model with lower dropout converged faster, but the second with higher dropout reached a better final validation score.
End of explanation
"""
m = pylearn2.utils.serial.load(
"/disk/scratch/neuroglycerin/models/8aug_extra_layers0p8_recent.pkl")
nll_channels = [c for c in m.monitor.channels.keys() if 'nll' in c]
def make_curves(model, *args):
curves = None
for c in args:
channel = model.monitor.channels[c]
c = c[0].upper() + c[1:]
if not curves:
curves = hl.Curve(zip(channel.example_record,channel.val_record),group=c)
else:
curves += hl.Curve(zip(channel.example_record,channel.val_record),group=c)
return curves
make_curves(m,*nll_channels)
"""
Explanation: Lower Dropout
This model had two MLP layers, each with dropout set to 0.8.
End of explanation
"""
mh = pylearn2.utils.serial.load(
"/disk/scratch/neuroglycerin/models/8aug_extra_layers0p5_recent.pkl")
make_curves(mh,*nll_channels)
"""
Explanation: It's definitely overfitting.
Higher Dropout
The same model, with dropout set to 0.5 as we've been doing so far:
End of explanation
"""
cl = m.monitor.channels['valid_y_nll']
ch = mh.monitor.channels['valid_y_nll']
compare = []
for t,v in zip(cl.example_record,cl.val_record):
for t2,v2 in zip(ch.example_record,ch.val_record):
if v2 < v:
compare.append((float(v),np.max([t2-t,0])))
break
plt.plot(*zip(*compare))
plt.xlabel("valid_y_nll")
plt.ylabel("time difference")
"""
Explanation: It takes longer to reach a slightly lower validation score, but does not overfit.
How slow is dropout?
If we look at the difference in time to pass a validation score over the range we can see how much longer it takes the model using higher dropout.
End of explanation
"""
|
n-witt/MachineLearningWithText_SS2017 | exercises/solutions/0 Python basics exercises.ipynb | gpl-3.0 | def maximum(x, y):
if x > y:
return x
else:
return y
assert maximum(3, 3) == 3
assert maximum(1, 2) == 2
assert maximum(3, 2) == 3
"""
Explanation: 1. Define a function maximum that takes two numbers as arguments and returns the largest of them. Use the if-then-else construct available in Python. (It is true that Python has the max() function built in, but writing it yourself is nevertheless a good exercise.)
End of explanation
"""
def max_of_three(x, y, z):
if x > y and x > z:
return x
elif y > x and y > z:
return y
elif z > x and z > y:
return z
else:
return x
assert max_of_three(1, 2, 3) == 3
assert max_of_three(1, 1, 2) == 2
assert max_of_three(2, 1 , .5) == 2
assert max_of_three(0, 0, 0) == 0
"""
Explanation: 2. Define a function max_of_three that takes three numbers as arguments and returns the largest of them.
End of explanation
"""
def length(obj):
len = 0
for _ in obj:
len += 1
return len
assert length([1, 2, 3]) == 3
assert length('this is some string') == 19
"""
Explanation: 3. Define a function length that computes the length of a given list or string. (It is true that Python has the len() function built in, but writing it yourself is nevertheless a good exercise.)
End of explanation
"""
def is_vowel(char):
return char in 'aeiou'
assert is_vowel('t') == False
assert is_vowel('a') == True
"""
Explanation: 4. Write a function is_vowel that takes a character (i.e. a string of length 1) and returns True if it is a vowel, False otherwise.
End of explanation
"""
def accumulate(obj):
res = 0
for num in obj:
res += num
return res
def multiply(obj):
res = 1
for num in obj:
res *= num
return res
assert accumulate([1, 2, 3, 4]) == 10
assert multiply([1, 2, 3, 4]) == 24
"""
Explanation: 5. Define a function accumulate and a function multiply that sums and multiplies (respectively) all the numbers in a list of numbers. For example, sum([1, 2, 3, 4]) should return 10, and multiply([1, 2, 3, 4]) should return 24.
End of explanation
"""
from operator import add, mul
def calc(obj, func):
res = None
if func == add:
res = 0
if func == mul:
res = 1
for num in obj:
res = func(res, num)
return res
print(calc([1, 2, 3, 4], mul))
print(calc([1, 2, 3, 4], add))
"""
Explanation: A more elegant and generic solution is given hereafter. It uses a functional approach, as the function that is to be calculated is passed to the function:
End of explanation
"""
def reverse(s):
return ''.join([c for c in s[::-1]])
assert reverse('I am testing') == 'gnitset ma I'
"""
Explanation: 6. Define a function reverse that computes the reversal of a string. For example, reverse("I am testing") should return the string "gnitset ma I".
End of explanation
"""
def is_palindrome(s):
return s == reverse(s)
assert is_palindrome('radar') == True
assert is_palindrome('sonar') == False
"""
Explanation: 7. Define a function is_palindrome that recognizes palindromes (i.e. words that look the same written backwards). For example, is_palindrome("radar") should return True.
End of explanation
"""
def is_member(x, a):
for v in x:
if v == a:
return True
return False
assert is_member([1, 2, 3], 4) == False
assert is_member([1, 2, 3], 2) == True
"""
Explanation: 8. Write a function is_member that takes a value (i.e. a number, string, etc) x and a list of values a, and returns True if x is a member of a, False otherwise. (Note that this is exactly what the in operator does, but for the sake of the exercise you should pretend Python did not have this operator.)
End of explanation
"""
def histogram(obj):
for n in obj:
print('*' * n, '\n')
histogram([4, 9, 7])
"""
Explanation: 9. Define a procedure histogram that takes a list of integers and prints a histogram to the screen. For example, histogram([4, 9, 7]) should print the following:
```
```
End of explanation
"""
def filter_long_words(words, n):
return [word for word in words if len(word) >= n]
assert len(filter_long_words('this is some sentence'.split(), 3)) == 3
"""
Explanation: 10. Write a function filter_long_words that takes a list of words and an integer n and returns the list of words that are longer than n.
End of explanation
"""
def is_pangram(sentence):
alphabet = set('a b c d e f g h i j k l m n o p q r s t u v w x y z'.split())
for char in sentence:
try:
alphabet.remove(char)
except KeyError:
pass
if len(alphabet) == 0:
return True
else:
return False
assert is_pangram('foo') == False
assert is_pangram('The quick brown fox jumps over the lazy dog') == True
"""
Explanation: 11. A pangram is a sentence that contains all the letters of the English alphabet at least once, for example: "The quick brown fox jumps over the lazy dog". Your task here is to write a function is_pangram to check a sentence to see if it is a pangram or not.
End of explanation
"""
def translate(eng):
dictionary = {
"may": "möge",
"the": "die",
"force": "macht",
"be": "sein",
"with": "mit",
"you": "dir"
}
ger = []
for word in eng:
if word in dictionary:
ger.append(dictionary[word])
else:
ger.append(word)
return ger
assert translate("may the force be with you".split()) == ['möge', 'die', 'macht', 'sein', 'mit', 'dir']
"""
Explanation: 12. Represent a small bilingual lexicon as a Python dictionary in the following fashion {"may": "möge", "the": "die", "force": "macht", "be": "sein", "with": "mit", "you": "dir"} and use it to translate the sentence "may the force be with you" from English into German. That is, write a function translate that takes a list of English words and returns a list of German words.
End of explanation
"""
def rot13(msg):
key = {'a':'n', 'b':'o', 'c':'p', 'd':'q', 'e':'r', 'f':'s', 'g':'t', 'h':'u',
'i':'v', 'j':'w', 'k':'x', 'l':'y', 'm':'z', 'n':'a', 'o':'b', 'p':'c',
'q':'d', 'r':'e', 's':'f', 't':'g', 'u':'h', 'v':'i', 'w':'j', 'x':'k',
'y':'l', 'z':'m', 'A':'N', 'B':'O', 'C':'P', 'D':'Q', 'E':'R', 'F':'S',
'G':'T', 'H':'U', 'I':'V', 'J':'W', 'K':'X', 'L':'Y', 'M':'Z', 'N':'A',
'O':'B', 'P':'C', 'Q':'D', 'R':'E', 'S':'F', 'T':'G', 'U':'H', 'V':'I',
'W':'J', 'X':'K', 'Y':'L', 'Z':'M'}
res = []
for char in msg:
if char in key:
res.append(key[char])
else:
res.append(char)
return ''.join(res)
text = 'this is some text'
assert rot13(rot13(text)) == text
"""
Explanation: 13. In cryptography, a Caesar cipher is a very simple encryption techniques in which each letter in the plain text is replaced by a letter some fixed number of positions down the alphabet. For example, with a shift of 3, A would be replaced by D, B would become E, and so on. The method is named after Julius Caesar, who used it to communicate with his generals. ROT-13 ("rotate by 13 places") is a widely used example of a Caesar cipher where the shift is 13. In Python, the key for ROT-13 may be represented by means of the following dictionary:
key = {'a':'n', 'b':'o', 'c':'p', 'd':'q', 'e':'r', 'f':'s', 'g':'t', 'h':'u',
'i':'v', 'j':'w', 'k':'x', 'l':'y', 'm':'z', 'n':'a', 'o':'b', 'p':'c',
'q':'d', 'r':'e', 's':'f', 't':'g', 'u':'h', 'v':'i', 'w':'j', 'x':'k',
'y':'l', 'z':'m', 'A':'N', 'B':'O', 'C':'P', 'D':'Q', 'E':'R', 'F':'S',
'G':'T', 'H':'U', 'I':'V', 'J':'W', 'K':'X', 'L':'Y', 'M':'Z', 'N':'A',
'O':'B', 'P':'C', 'Q':'D', 'R':'E', 'S':'F', 'T':'G', 'U':'H', 'V':'I',
'W':'J', 'X':'K', 'Y':'L', 'Z':'M'}
Your task in this exercise is to implement an encoder/decoder of ROT-13 called rot13. Once you're done, you will be able to read the following secret message:
Pnrfne pvcure? V zhpu cersre Pnrfne fnynq!
Note that since English has 26 characters, your ROT-13 program will be able to both encode and decode texts written in English.
End of explanation
"""
from collections import defaultdict
import string
def char_freq_table(filename):
char_counter = defaultdict(int)
with open(filename) as fh:
text = fh.read()
for character in text:
char_counter[character] += 1
return char_counter
frequencies = char_freq_table('material/jedi.txt')
with open('jedi_frequencies.txt', 'w') as fh:
for k, v in frequencies.items():
if k in string.printable.replace('\n', '') :
fh.writelines('| {} || {} |\n'.format(k, v))
with open('material/jedi_frequencies.txt') as fh:
print(fh.read())
"""
Explanation: 14. Write a procedure char_freq_table that accepts the file name material/jedi.txt as argument, builds a frequency listing of the characters contained in the file, and prints a sorted and nicely formatted character frequency table to the screen.
End of explanation
"""
|
erdewit/ib_insync | notebooks/tick_data.ipynb | bsd-2-clause | from ib_insync import *
util.startLoop()
ib = IB()
ib.connect('127.0.0.1', 7497, clientId=15)
"""
Explanation: Tick data
For optimum results this notebook should be run during the Forex trading session.
End of explanation
"""
contracts = [Forex(pair) for pair in ('EURUSD', 'USDJPY', 'GBPUSD', 'USDCHF', 'USDCAD', 'AUDUSD')]
ib.qualifyContracts(*contracts)
eurusd = contracts[0]
"""
Explanation: Streaming tick data
Create some Forex contracts:
End of explanation
"""
for contract in contracts:
ib.reqMktData(contract, '', False, False)
"""
Explanation: Request streaming ticks for them:
End of explanation
"""
ticker = ib.ticker(eurusd)
ib.sleep(2)
ticker
"""
Explanation: Wait a few seconds for the tickers to get filled.
End of explanation
"""
ticker.marketPrice()
"""
Explanation: The price of Forex ticks is always nan. To get a midpoint price use midpoint() or marketPrice().
The tickers are kept live updated, try this a few times to see if the price changes:
End of explanation
"""
from IPython.display import display, clear_output
import pandas as pd
df = pd.DataFrame(
index=[c.pair() for c in contracts],
columns=['bidSize', 'bid', 'ask', 'askSize', 'high', 'low', 'close'])
def onPendingTickers(tickers):
for t in tickers:
df.loc[t.contract.pair()] = (
t.bidSize, t.bid, t.ask, t.askSize, t.high, t.low, t.close)
clear_output(wait=True)
display(df)
ib.pendingTickersEvent += onPendingTickers
ib.sleep(30)
ib.pendingTickersEvent -= onPendingTickers
"""
Explanation: The following cell will start a 30 second loop that prints a live updated ticker table.
It is updated on every ticker change.
End of explanation
"""
for contract in contracts:
ib.cancelMktData(contract)
"""
Explanation: New tick data is available in the 'ticks' attribute of the pending tickers.
The tick data will be cleared before the next update.
To stop the live tick subscriptions:
End of explanation
"""
ticker = ib.reqTickByTickData(eurusd, 'BidAsk')
ib.sleep(2)
print(ticker)
ib.cancelTickByTickData(ticker.contract, 'BidAsk')
"""
Explanation: Tick by Tick data
The ticks in the previous section are time-sampled by IB in order to cut on bandwidth. So with reqMktdData not every tick from the exchanges is sent. The promise of reqTickByTickData is to send every tick, just how it appears in the TWS Time & Sales window. This functionality is severly nerfed by a total of just three simultaneous subscriptions, where bid-ask ticks and sale ticks also use up a subscription each.
The tick-by-tick updates are available from ticker.tickByTicks and are signalled by ib.pendingTickersEvent or ticker.updateEvent.
End of explanation
"""
import datetime
start = ''
end = datetime.datetime.now()
ticks = ib.reqHistoricalTicks(eurusd, start, end, 1000, 'BID_ASK', useRth=False)
ticks[-1]
ib.disconnect()
"""
Explanation: Historical tick data
Historical tick data can be fetched with a maximum of 1000 ticks at a time. Either the start time or the end time must be given, and one of them must remain empty:
End of explanation
"""
|
letsgoexploring/teaching | winter2017/econ129/python/Econ129_Class_05.ipynb | mit | # Use the requests module to download money growth and inflation data
url = 'http://www.briancjenkins.com/data/quantitytheory/csv/qtyTheoryData.csv'
r = requests.get(url,verify=True)
with open('qtyTheoryData.csv','wb') as newFile:
newFile.write(r.content)
"""
Explanation: Class 5: Pandas
Pandas is a Python package for data analysis. Documentation and examples: http://pandas.pydata.org/
Pandas basics
To learn how Pandas works, we'll make use of a dataset containing long-run averages of inflation, money growth, and real GDP. The dataset is available here: http://www.briancjenkins.com/data/quantitytheory/csv/qtyTheoryData.csv. Recall that the quantity theory of money implies the following linear relationship between the long-run rate of money growth, the long-run rate of inflation, and the long-run rate of real GDP growth in a country:
\begin{align}
\text{inflation} & = \text{money growth} - \text{real GDP growth},
\end{align}
Generally, we treat real GDP growth and money supply growth as exogenous so this is a theory about the determination of inflation.
Now, we could download the data manually, but we might as well use Python to do it. The requests module is good for this.
End of explanation
"""
import pandas as pd
"""
Explanation: Import Pandas
End of explanation
"""
# Import quantity theory data into a Pandas DataFrame called df with country names as the index.
# Print the first 5 rows
# Print the last 5 rows
# Print the type of df
"""
Explanation: Import data from a csv file
Pandas has a function called read_csv() for reading data from a csv file into a Pandas DataFrame object. Let's import the quantity thery data into a variable called df.
End of explanation
"""
# Print the columns of df
# Create a new variable called money equal to the 'money growth' column and print
# Print the type of the variable money
# Print the first 5 rows of just the inflation, money growth, and gdp growth columns
"""
Explanation: Properties of DataFrame objects
Like entries in a spreadsheet file, elements in a DataFrame object have row and column coordinates. Column names are always strings.
End of explanation
"""
# Print the index of df
# Create a new variable called usa equal to the 'United States' row and print
# Print the inflation rate of the United States
# Print the inflation rate of the United States in a different way
# Create a new variable called first equal to the first row in the DataFrame and print
"""
Explanation: The set of row coordinates is the index. Index values can be strings, numbers, or dates.
End of explanation
"""
# Create a new column called 'difference' equal to the money growth column minus the inflation column and print the column
"""
Explanation: Create new columns by name.
End of explanation
"""
# Print the summary statistics for df
"""
Explanation: Methods
A Pandas DataFrame has a bunch of useful methods defined for it. describe() returns some summary statistics.
End of explanation
"""
# Print the correlation coefficient for inflation and money growth
# Print the correlation coefficient for inflation and real GDP growth
# Print the correlation coefficient for money growth and real GDP growth
"""
Explanation: While Pandas' describe function provides some good summary information, NumPy also has some useful functions for computing statistics. For example, the NumPy function corrcoef() computes the coefficient of correlation for two series.
End of explanation
"""
# Print rows for the countries with the 10 lowest inflation rates
# Print rows for the countries with the 10 lowest money growth rates
# Print rows for the countries with the 10 highest inflation rates
# Print rows for the countries with the 10 highest money growth rates
"""
Explanation: sort_values() returns a copy of the original DataFrame sorted along the given column. The optional argument ascending is set to True by default, but can be changed to False if you want to print the lowest first.
End of explanation
"""
# Print df with the index descending alphabetical order
"""
Explanation: sort_index() returns a copy of the original DataFrame sorted along the index. The optional argument ascending is set to True by default, but can be changed to False if you want to print the lowest first.
End of explanation
"""
# Construct a well-labeled scatter plot of inflation against money growth
"""
Explanation: Quick plotting example
Construct a graph that visually confirms the quantity theory of money by making a scatter plot with average money growth on the horizontal axis and average inflation on the vertical axis. Add a 45 degree line and labels and a title.
End of explanation
"""
|
mayankjohri/LetsExplorePython | Section 3 - Machine Learning/Supervised Learning Algorithm/Regression Analysis/3. Ridge Regression.ipynb | gpl-3.0 | import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
%matplotlib inline
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 12, 10
#Define input array with angles from 60deg to 300deg converted to radians
x = np.array([i*np.pi/180 for i in range(60,300,4)])
np.random.seed(10) #Setting seed for reproducability
y = np.sin(x) + np.random.normal(0,0.15,len(x))
data = pd.DataFrame(np.column_stack([x,y]),columns=['x','y'])
plt.plot(data['x'],data['y'],'.')
"""
Explanation: Ridge & Lasso Regression
Ridge and Lasso regression are powerful techniques generally used for creating parsimonious models in presence of a ‘large’ number of features. Here ‘large’ can typically mean either of two things:
Large enough to enhance the tendency of a model to overfit (as low as 10 variables might cause overfitting)
Large enough to cause computational challenges. With modern systems, this situation might arise in case of millions or billions of features
Though Ridge and Lasso might appear to work towards a common goal, the inherent properties and practical use cases differ substantially. They use ‘regularization’ techniques which work by penalizing the magnitude of coefficients of features along with minimizing the error between predicted and actual observations. The key difference is in how they assign penalty to the coefficients.
Ridge Regression:
It performs L2 regularization which adds penalty equivalent to square of the magnitude of coefficients
Minimization objective
LS Obj + α * (sum of square of coefficients)
Lasso Regression:
It performs L1 regularization which adds penalty equivalent to absolute value of the magnitude of coefficients
Minimization objective
LS Obj + α * (sum of absolute value of coefficients)
*** ‘LS Obj’ refers to ‘least squares objective’, i.e. the linear regression objective without regularization.
Ridge Regression
Ridge Regression is a technique used when the data suffers from multicollinearity (independent variables are highly correlated). In multicollinearity, even though the least squares estimates (OLS) are unbiased, their variances are large which deviates the observed value far from the true value. By adding a degree of bias to the regression estimates, ridge regression reduces the standard errors.
Linear regression can be represented as:
y = a + b*x
Collinearity is a linear association between two explanatory variables. Two variables are perfectly collinear if there is an exact linear relationship between them. For example, X<sub>2i</sub> and X<sub>1i</sub> are perfectly collinear if there exist parameters β0 and β1 such that, for all observations i, we have
X<sub>2i</sub> = β0 + β1X<sub>1i</sub>
Multicollinearity refers to a situation in which two or more explanatory variables in a multiple regression model are highly linearly related. We have perfect multicollinearity if, for example as in the equation above, the correlation between two independent variables is equal to 1 or −1. In practice, we rarely face perfect multicollinearity in a data set. More commonly, the issue of multicollinearity arises when there is an approximate linear relationship among two or more independent variables.
Y<sub>i</sub> = β0 + β<sub>1</sub>X<sub>1i</sub> + β<sub>2</sub>X<sub>2i</sub> + β<sub>3</sub>X<sub>3i</sub> + ... + + β<sub>n</sub>X<sub>ni</sub> = 0
Important Points:
The assumptions of this regression is same as least squared regression except normality is not to be assumed
It shrinks the value of coefficients but doesn’t reaches zero, which suggests no feature selection feature
This is a regularization method and uses l2 regularization.
End of explanation
"""
for i in range(2,16): #power of 1 is already there
colname = 'x_%d'%i #new var will be x_power
data[colname] = data['x']**i
print( data.head())
"""
Explanation: This resembles a sine curve but not exactly because of the noise. We’ll use this as an example to test different scenarios in this article. Lets try to estimate the sine function using polynomial regression with powers of x form 1 to 15. Lets add a column for each power upto 15 in our dataframe. This can be accomplished using the following code:
End of explanation
"""
#Import Linear Regression model from scikit-learn.
from sklearn.linear_model import LinearRegression
def linear_regression(data, power, models_to_plot):
#initialize predictors:
predictors=['x']
if power>=2:
predictors.extend(['x_%d'%i for i in range(2,power+1)])
#Fit the model
linreg = LinearRegression(normalize=True)
linreg.fit(data[predictors],data['y'])
y_pred = linreg.predict(data[predictors])
#Check if a plot is to be made for the entered power
if power in models_to_plot:
plt.subplot(models_to_plot[power])
plt.tight_layout()
plt.plot(data['x'],y_pred)
plt.plot(data['x'],data['y'],'.')
plt.title('Plot for power: %d'%power)
#Return the result in pre-defined format
rss = sum((y_pred-data['y'])**2)
ret = [rss]
ret.extend([linreg.intercept_])
ret.extend(linreg.coef_)
return ret
"""
Explanation: Now that we have all the 15 powers, lets make 15 different linear regression models with each model containing variables with powers of x from 1 to the particular model number. For example, the feature set of model 8 will be – {x, x_2, x_3, … ,x_8}.
First, we’ll define a generic function which takes in the required maximum power of x as an input and returns a list containing – [ model RSS, intercept, coef_x, coef_x2, … upto entered power ]. Here RSS refers to ‘Residual Sum of Squares’ which is nothing but the sum of square of errors between the predicted and actual values in the training data set. The python code defining the function is:
End of explanation
"""
#Initialize a dataframe to store the results:
col = ['rss','intercept'] + ['coef_x_%d'%i for i in range(1,16)]
ind = ['model_pow_%d'%i for i in range(1,16)]
coef_matrix_simple = pd.DataFrame(index=ind, columns=col)
#Define the powers for which a plot is required:
models_to_plot = {1:231,3:232,6:233,9:234,12:235,15:236}
#Iterate through all powers and assimilate results
for i in range(1,16):
coef_matrix_simple.iloc[i-1,0:i+2] = linear_regression(data, power=i, models_to_plot=models_to_plot)
"""
Explanation: Note that this function will not plot the model fit for all the powers but will return the residual sum of squares (RSS) and coefficients for all the models. I’ll skip the details of the code for now to maintain brevity. I’ll be happy to discuss the same through comments below if required.
Now, we can make all 15 models and compare the results. For ease of analysis, we’ll store all the results in a Pandas dataframe and plot 6 models to get an idea of the trend. Consider the following code:
End of explanation
"""
#Set the display format to be scientific for ease of analysis
pd.options.display.float_format = '{:,.2g}'.format
coef_matrix_simple
"""
Explanation: We would expect the models with increasing complexity to better fit the data and result in lower RSS values. This can be verified by looking at the plots generated for 6 models.
This clearly aligns with our initial understanding. As the model complexity increases, the models tends to fit even smaller deviations in the training data set. Though this leads to overfitting, lets keep this issue aside for some time and come to our main objective, i.e. the impact on the magnitude of coefficients. This can be analysed by looking at the data frame created above.
End of explanation
"""
import numpy as np
import scipy.stats as st
import sklearn.linear_model as lm
import matplotlib.pyplot as plt
%matplotlib inline
f = lambda x: np.exp(3 * x)
x_tr = np.linspace(0., 2, 200)
y_tr = f(x_tr)
x = np.array([0, .1, .2, .5, .8, .9, 1])
y = f(x) + np.random.randn(len(x))
plt.figure(figsize=(6,3));
plt.plot(x_tr[:100], y_tr[:100], '--k');
plt.plot(x, y, 'ok', ms=10);
# We create the model.
lr = lm.LinearRegression()
# We train the model on our training dataset.
lr.fit(x[:, np.newaxis], y);
# Now, we predict points with our trained model.
y_lr = lr.predict(x_tr[:, np.newaxis])
plt.figure(figsize=(6,3));
plt.plot(x_tr, y_tr, '--k');
plt.plot(x_tr, y_lr, 'g');
plt.plot(x, y, 'ok', ms=10);
plt.xlim(0, 1);
plt.ylim(y.min()-1, y.max()+1);
plt.title("Linear regression");
lrp = lm.LinearRegression()
plt.figure(figsize=(6,3));
plt.plot(x_tr, y_tr, '--k');
for deg, s in zip([2, 5], ['-', '.']):
lrp.fit(np.vander(x, deg + 1), y);
y_lrp = lrp.predict(np.vander(x_tr, deg + 1))
plt.plot(x_tr, y_lrp, s, label='degree ' + str(deg));
plt.legend(loc=2);
plt.xlim(0, 1.4);
plt.ylim(-10, 40);
# Print the model's coefficients.
print(' '.join(['%.2f' % c for c in lrp.coef_]))
plt.plot(x, y, 'ok', ms=10);
plt.title("Linear regression");
ridge = lm.RidgeCV()
plt.figure(figsize=(6,3));
plt.plot(x_tr, y_tr, '--k');
for deg, s in zip([2, 5], ['-', '.']):
ridge.fit(np.vander(x, deg + 1), y);
y_ridge = ridge.predict(np.vander(x_tr, deg + 1))
plt.plot(x_tr, y_ridge, s, label='degree ' + str(deg));
plt.legend(loc=2);
plt.xlim(0, 1.5);
plt.ylim(-5, 80);
# Print the model's coefficients.
print(' '.join(['%.2f' % c for c in ridge.coef_]))
plt.plot(x, y, 'ok', ms=10);
plt.title("Ridge regression");
from sklearn.linear_model import Ridge
def ridge_regression(data, predictors, alpha, models_to_plot={}):
#Fit the model
ridgereg = Ridge(alpha=alpha,normalize=True)
ridgereg.fit(data[predictors],data['y'])
y_pred = ridgereg.predict(data[predictors])
#Check if a plot is to be made for the entered alpha
if alpha in models_to_plot:
plt.subplot(models_to_plot[alpha])
plt.tight_layout()
plt.plot(data['x'],y_pred)
plt.plot(data['x'],data['y'],'.')
plt.title('Plot for alpha: %.3g'%alpha)
#Return the result in pre-defined format
rss = sum((y_pred-data['y'])**2)
ret = [rss]
ret.extend([ridgereg.intercept_])
ret.extend(ridgereg.coef_)
return ret
"""
Explanation: It is clearly evident that the size of coefficients increase exponentially with increase in model complexity. I hope this gives some intuition into why putting a constraint on the magnitude of coefficients can be a good idea to reduce model complexity.
Ridge Regression
As mentioned before, ridge regression performs ‘L2 regularization‘, i.e. it adds a factor of sum of squares of coefficients in the optimization objective. Thus, ridge regression optimizes the following:
Objective = RSS + α * (sum of square of coefficients)
Here, α (alpha) is the parameter which balances the amount of emphasis given to minimizing RSS vs minimizing sum of square of coefficients. α can take various values:
α = 0:
The objective becomes same as simple linear regression.
We’ll get the same coefficients as simple linear regression.
α = ∞:
The coefficients will be zero. Why? Because of infinite weightage on square of coefficients, anything less than zero will make the objective infinite.
0 < α < ∞:
The magnitude of α will decide the weightage given to different parts of objective.
The coefficients will be somewhere between 0 and ones for simple linear regression.
OR:
It works like linear regression, except that it prevents the polynomial's coefficients to explode. By adding a regularization term in the loss function, ridge regression imposes some structure on the underlying model.
The ridge regression model has a meta-parameter which represents the weight of the regularization term. We could try different values with trials and errors, using the Ridge class. However, scikit-learn includes another model called RidgeCV which includes a parameter search with cross-validation. In practice, it means that we don't have to tweak this parameter by hand: scikit-learn does it for us. Since the models of scikit-learn always follow the fit-predict API, all we have to do is replace lm.LinearRegression by lm.RidgeCV in the code above. We will give more details in the next section.
End of explanation
"""
#Initialize predictors to be set of 15 powers of x
predictors=['x']
predictors.extend(['x_%d'%i for i in range(2,16)])
#Set the different values of alpha to be tested
alpha_ridge = [1e-15, 1e-10, 1e-8, 1e-4, 1e-3,1e-2, 1, 5, 10, 20]
#Initialize the dataframe for storing coefficients.
col = ['rss','intercept'] + ['coef_x_%d'%i for i in range(1,16)]
ind = ['alpha_%.2g'%alpha_ridge[i] for i in range(0,10)]
coef_matrix_ridge = pd.DataFrame(index=ind, columns=col)
models_to_plot = {1e-15:231, 1e-10:232, 1e-4:233, 1e-3:234, 1e-2:235, 5:236}
for i in range(10):
coef_matrix_ridge.iloc[i,] = ridge_regression(data, predictors, alpha_ridge[i], models_to_plot)
"""
Explanation: Note the ‘Ridge’ function used here. It takes ‘alpha’ as a parameter on initialization. Also, keep in mind that normalizing the inputs is generally a good idea in every type of regression and should be used in case of ridge regression as well.
Now, lets analyze the result of Ridge regression for 10 different values of α ranging from 1e-15 to 20. These values have been chosen so that we can easily analyze the trend with change in values of α. These would however differ from case to case.
Note that each of these 10 models will contain all the 15 variables and only the value of alpha would differ. This is different from the simple linear regression case where each model had a subset of features.
End of explanation
"""
#Set the display format to be scientific for ease of analysis
pd.options.display.float_format = '{:,.2g}'.format
coef_matrix_ridge
"""
Explanation: Here we can clearly observe that as the value of alpha increases, the model complexity reduces. Though higher values of alpha reduce overfitting, significantly high values can cause underfitting as well (eg. alpha = 5). Thus alpha should be chosen wisely. A widely accept technique is cross-validation, i.e. the value of alpha is iterated over a range of values and the one giving higher cross-validation score is chosen.
End of explanation
"""
coef_matrix_ridge.apply(lambda x: sum(x.values==0),axis=1)
"""
Explanation: This straight away gives us the following inferences:
The RSS increases with increase in alpha, this model complexity reduces
An alpha as small as 1e-15 gives us significant reduction in magnitude of coefficients. How? Compare the coefficients in the first row of this table to the last row of simple linear regression table.
High alpha values can lead to significant underfitting. Note the rapid increase in RSS for values of alpha greater than 1
Though the coefficients are very very small, they are NOT zero.
The first 3 are very intuitive. But #4 is also a crucial observation. Let’s reconfirm the same by determining the number of zeros in each row of the coefficients data set:
End of explanation
"""
from sklearn.linear_model import Lasso
def lasso_regression(data, predictors, alpha, models_to_plot={}):
#Fit the model
lassoreg = Lasso(alpha=alpha,normalize=True, max_iter=1e5)
lassoreg.fit(data[predictors],data['y'])
y_pred = lassoreg.predict(data[predictors])
#Check if a plot is to be made for the entered alpha
if alpha in models_to_plot:
plt.subplot(models_to_plot[alpha])
plt.tight_layout()
plt.plot(data['x'],y_pred)
plt.plot(data['x'],data['y'],'.')
plt.title('Plot for alpha: %.3g'%alpha)
#Return the result in pre-defined format
rss = sum((y_pred-data['y'])**2)
ret = [rss]
ret.extend([lassoreg.intercept_])
ret.extend(lassoreg.coef_)
return ret
#Initialize predictors to all 15 powers of x
predictors=['x']
predictors.extend(['x_%d'%i for i in range(2,16)])
#Define the alpha values to test
alpha_lasso = [1e-15, 1e-10, 1e-8, 1e-5,1e-4, 1e-3,1e-2, 1, 5, 10]
#Initialize the dataframe to store coefficients
col = ['rss','intercept'] + ['coef_x_%d'%i for i in range(1,16)]
ind = ['alpha_%.2g'%alpha_lasso[i] for i in range(0,10)]
coef_matrix_lasso = pd.DataFrame(index=ind, columns=col)
#Define the models to plot
models_to_plot = {1e-10:231, 1e-5:232,1e-4:233, 1e-3:234, 1e-2:235, 1:236}
#Iterate over the 10 alpha values:
for i in range(10):
coef_matrix_lasso.iloc[i,] = lasso_regression(data, predictors, alpha_lasso[i], models_to_plot)
"""
Explanation: Lasso Regression
LASSO stands for Least Absolute Shrinkage and Selection Operator. There are 2 key words of interest ‘absolute‘ and ‘selection‘.
Lets consider the former first and worry about the latter later.
Lasso regression performs L1 regularization, i.e. it adds a factor of sum of absolute value of coefficients in the optimization objective. Thus, lasso regression optimizes the following:
Objective = RSS + α * (sum of absolute value of coefficients)
Here, α (alpha) works similar to that of ridge and provides a trade-off between balancing RSS and magnitude of coefficients. Like that of ridge, α can take various values. Lets iterate it here briefly:
α = 0: Same coefficients as simple linear regression
α = ∞: All coefficients zero (same logic as before)
0 < α < ∞: coefficients between 0 and that of simple linear regression
End of explanation
"""
# URL: http://scikit-learn.org/stable/auto_examples/linear_model/plot_lasso_and_elasticnet.html
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
# #############################################################################
# Generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal(size=n_samples)
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples // 2], y[:n_samples // 2]
X_test, y_test = X[n_samples // 2:], y[n_samples // 2:]
# #############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
# #############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
"""
Explanation: This again tells us that the model complexity decreases with increase in the values of alpha. But notice the straight line at alpha=1. Appears a bit strange to me. Let’s explore this further by looking at the coefficients:
Key Difference
Ridge: It includes all (or none) of the features in the model. Thus, the major advantage of ridge regression is coefficient shrinkage and reducing model complexity.
Lasso: Along with shrinking coefficients, lasso performs feature selection as well. (Remember the ‘selection‘ in the lasso full-form?) As we observed earlier, some of the coefficients become exactly zero, which is equivalent to the particular feature being excluded from the model.
Typical Use Cases
Ridge: It is majorly used to prevent overfitting. Since it includes all the features, it is not very useful in case of exorbitantly high #features, say in millions, as it will pose computational challenges.
Lasso: Since it provides sparse solutions, it is generally the model of choice (or some variant of this concept) for modelling cases where the #features are in millions or more. In such a case, getting a sparse solution is of great computational advantage as the features with zero coefficients can simply be ignored.
End of explanation
"""
|
fweik/espresso | doc/tutorials/ferrofluid/ferrofluid_part3.ipynb | gpl-3.0 | import espressomd
espressomd.assert_features('DIPOLES', 'LENNARD_JONES')
from espressomd.magnetostatics import DipolarP3M
import numpy as np
"""
Explanation: Ferrofluid - Part 3
Table of Contents
Susceptibility with fluctuation formulas
Derivation of the fluctuation formula
Simulation
Magnetization curve of a 3D system
Remark: The equilibration and sampling times used in this tutorial would be not sufficient for scientific purposes, but they are long enough to get at least a qualitative insight of the behaviour of ferrofluids. They have been shortened so we achieve reasonable computation times for the purpose of a tutorial.
Susceptibility with fluctuation formulas
In this part we want to calculate estimators for the initial susceptibility, i.e. the susceptibility at zero external magnetic field. One could carry out several simulations with different external magnetic field strengths and get the initial susceptibility by fitting a line to the results. We want to go a more elegant way by using fluctuation formulas known from statistical mechanics.
In three dimensions the initial susceptibility $\chi_{init}$ can be calculated with zero field simulations through
\begin{equation}
\chi_\mathrm{init} = \frac{V \cdot \mu_0}{3 \cdot k_\mathrm{B} T} \left( \langle \boldsymbol{M}^2 \rangle - \langle \boldsymbol{M} \rangle^2 \right) = \frac{\mu_0}{3 \cdot k_\mathrm{B} T \cdot V} \left( \langle \boldsymbol{\mu}^2 \rangle - \langle \boldsymbol{\mu} \rangle^2 \right)
\end{equation}
where $\boldsymbol{M}$ is the magnetization vector and $\boldsymbol{\mu}$ is the total magnetic dipole moment of the system. In direction $i$ it reads
\begin{equation}
M_i = \frac{1}{V} \Bigg\langle \sum_{j=1}^N \tilde{\mu}_j^i \Bigg\rangle
\end{equation}
where $\tilde{\mu}_j^i$ is the $j$ th dipole moment in direction $i$.
Derivation of the fluctuation formula
We want to derive the fluctuation formula. We start with the definition of the magnetic susceptibility. In general this reads
\begin{equation}
\chi \equiv \frac{\partial}{\partial H} \langle M_{\boldsymbol{H}} \rangle
\end{equation}
with $\langle M_{\boldsymbol{H}} \rangle$ the ensemble averaged magnetization in direction of a homogeneous external magnetic field $\boldsymbol{H}$.
In thermal equilibrium the ensemble average of the magnetization reads
\begin{equation}
\langle M_{\boldsymbol{H}} \rangle = \frac{1}{V Z_\mathrm{c}} \left \lbrack \sum_{\alpha} \mu_{\boldsymbol{H},\alpha} e^{ -\beta E_{\alpha}(H=0) + \beta\mu_0\mu_{\boldsymbol{H},\alpha}H }\right \rbrack
\end{equation}
with $Z_\mathrm{c}$ the canonical partition function, $E_{\alpha}(H=0)$ the energy without an external magnetic field $\boldsymbol{H}$, $\beta$ the inverse thermal energy $\frac{1}{k_\mathrm{B}T}$, $\mu_{\boldsymbol{H},\alpha}$ the total magnetic dipole moment of the system in direction of the external magnetic field $\boldsymbol{H}$ in microstate $\alpha$ and $V$ the system volume.
Now we insert the magnetization $\langle M_{\boldsymbol{H}} \rangle$ in the definition of the magnetic susceptibility $\chi$ and let the derivative operate on the ensemble average. We get the fluctuation formula
\begin{equation}
\chi = \frac{\beta\mu_0}{V} \left \lbrack \frac{1}{Z_\mathrm{c}}\sum_{\alpha} \mu_{\alpha}^2~ e^{ -\beta E_{\alpha}(H=0) + \beta\mu_0\mu_{\boldsymbol{H},\alpha}H } - \frac{1}{Z_\mathrm{c}}\sum_{\alpha} \mu_{\alpha}~ e^{ -\beta E_{\alpha}(H=0) + \beta\mu_0\mu_{\boldsymbol{H},\alpha}H }~~ \frac{1}{Z_\mathrm{c}}\sum_{\alpha'}\mu_{\alpha'}~ e^{ -\beta E_{\alpha'}(H=0) + \beta\mu_0\mu_{\boldsymbol{H},\alpha}H }\right \rbrack = \frac{\beta\mu_0}{V} \left \lbrack \langle \mu_{\boldsymbol{H}}^2 \rangle - \langle \mu_{\boldsymbol{H}} \rangle^2 \right \rbrack = \frac{\beta\mu_0}{V} \left(\Delta \mu_{\boldsymbol{H}}\right)^2
\end{equation}
At zero external magnetic field ($H = 0$) there is no distinct direction for the system, so we can take the fluctuations $\Delta \mu$ in all directions and divide it by the dimension. Thus we can use more data points of our simulation for the average and get a more precise estimator for the susceptibility. Thus finally the fluctuation formula for the initial susceptibility in three dimensions reads
\begin{equation}
\chi_\mathrm{init} = \frac{\beta\mu_0}{3V} \left \lbrack \langle \boldsymbol{\mu}^2 \rangle - \langle \boldsymbol{\mu} \rangle^2 \right \rbrack = \frac{V\beta\mu_0}{3} \left \lbrack \langle \boldsymbol{M}^2 \rangle - \langle \boldsymbol{M} \rangle^2 \right \rbrack
\end{equation}
where $\boldsymbol{\mu}$ and $\boldsymbol{M}$ are defined above.
Simulation
In this part we want to consider a three dimensional ferrofluid system and compare our result for the initial susceptibility $\chi_\mathrm{init}$ with them of Ref. <a href='#[1]'>[1]</a>.
First we import all necessary packages and check for the required ESPResSo features
End of explanation
"""
lj_sigma = 1
lj_epsilon = 1
lj_cut = 2**(1. / 6.) * lj_sigma
# magnetic field constant
mu_0 = 1.
# Particles
N = 1000
# Volume fraction
# phi = rho * 4. / 3. * np.pi * ( lj_sigma / 2 )**3.
phi = 0.0262
# Dipolar interaction parameter lambda = mu_0 m^2 /(4 pi sigma^3 kT)
dip_lambda = 3.
# Temperature
kT = 1.0
# Friction coefficient
gamma = 1.0
# Time step
dt = 0.02
# box size 3d
box_size = (N * np.pi * 4. / 3. * (lj_sigma / 2.)**3. / phi)**(1. / 3.)
"""
Explanation: Now we set up all necessary simulation parameters
End of explanation
"""
system = espressomd.System(box_l=(box_size, box_size, box_size))
system.time_step = dt
# Lennard-Jones interaction
system.non_bonded_inter[0, 0].lennard_jones.set_params(epsilon=lj_epsilon, sigma=lj_sigma, cutoff=lj_cut, shift="auto")
# Random dipole moments
np.random.seed(seed=1)
dip_phi = 2 * np.pi * np.random.random((N, 1))
dip_cos_theta = 2 * np.random.random((N, 1)) - 1
dip_sin_theta = np.sin(np.arccos(dip_cos_theta))
dip = np.hstack((
dip_sin_theta * np.sin(dip_phi),
dip_sin_theta * np.cos(dip_phi),
dip_cos_theta))
# Random positions in system volume
pos = box_size * np.random.random((N, 3))
# Add particles
system.part.add(pos=pos, rotation=N * [(1, 1, 1)], dip=dip)
# Remove overlap between particles by means of the steepest descent method
system.integrator.set_steepest_descent(
f_max=0, gamma=0.1, max_displacement=0.05)
while system.analysis.energy()["total"] > 5 * kT * N:
system.integrator.run(20)
# Switch to velocity Verlet integrator
system.integrator.set_vv()
system.thermostat.set_langevin(kT=kT, gamma=gamma, seed=1)
# tune verlet list skin
system.cell_system.skin = 0.8
# Setup dipolar P3M
accuracy = 5E-4
system.actors.add(DipolarP3M(accuracy=accuracy, prefactor=dip_lambda * lj_sigma**3 * kT))
"""
Explanation: Next we set up the system. As in part I, the orientation of the dipole moments is set directly on the particles, whereas the magnitude of the moments is taken into account when determining the prefactor of the dipolar P3M (for more details see part I).
Hint:
It should be noted that we seed both the Langevin thermostat and the random number generator of numpy. Latter means that the initial configuration of our system is the same every time this script is executed. As the time evolution of the system depends not solely on the Langevin thermostat but also on the numeric accuracy and DP3M (the tuned parameters are slightly different every time) it is only partly predefined. You can change the seeds to simulate with a different initial configuration and a guaranteed different time evolution.
End of explanation
"""
print("Equilibration...")
equil_rounds = 10
equil_steps = 1000
for i in range(equil_rounds):
system.integrator.run(equil_steps)
print("progress: {:3.0f}%, dipolar energy: {:9.2f}".format(
(i + 1) * 100. / equil_rounds, system.analysis.energy()["dipolar"]), end="\r")
print("\nEquilibration done")
"""
Explanation: Now we equilibrate for a while
End of explanation
"""
from espressomd.observables import MagneticDipoleMoment
dipm_tot_calc = MagneticDipoleMoment(ids=system.part[:].id)
"""
Explanation: As we need the magnetization of our system, we import <tt>MagneticDipoleMoment</tt> from <tt>observables</tt> which returns us the total dipole moment of the system which is the magnetization times the volume of the system.
End of explanation
"""
# Sampling
loops = 2000
"""
Explanation: Now we set the desired number of loops for the sampling
End of explanation
"""
print('Sampling ...')
# initialize array for hold the sampled dipole moments
dipms = np.full((loops, 3), np.nan)
# sample dipole moment
for i in range(loops):
system.integrator.run(10)
dipms[i, :] = dipm_tot_calc.calculate()
# print progress only every 10th cycle
if (i + 1) % 10 == 0:
print("progress: {:3.0f}%".format((i + 1) * 100. / loops), end="\r")
print("\nSampling done")
# calculate average first and second moment of total dipole moment
dipm_tot = np.mean(dipms, axis=0)
dipm_tot_2 = np.mean(dipms**2, axis=0)
"""
Explanation: and sample the first and second moment of the magnetization or total dipole moment, by averaging over all total dipole moments occurring during the simulation
End of explanation
"""
# dipole moment
dipm = np.sqrt(dip_lambda * 4 * np.pi * lj_sigma**3. * kT / mu_0)
print("dipm = {}".format(dipm))
"""
Explanation: For the estimator of the initial susceptibility $\chi_\mathrm{init}$ we need the magnitude of one single dipole moment
End of explanation
"""
# susceptibility in 3d system
chi = mu_0 / (system.volume() * 3. * kT) * (np.sum(dipm_tot_2 * dipm**2.) - np.sum(np.square(dipm_tot * dipm)))
"""
Explanation: Now we can calculate $\chi_\mathrm{init}$ from our simulation data
End of explanation
"""
print('chi = %.4f' % chi)
"""
Explanation: and print the result
End of explanation
"""
chi_L = 8. * dip_lambda * phi
print('chi_L = %.4f' % chi_L)
"""
Explanation: Compared with the value $\chi = 0.822 \pm 0.017$ of Ref. <a href='#[1]'>[1]</a> (see table 1) it should be very similar.
Now we want to compare the result with the theoretical expectations.
At first with the simple Langevin susceptibility
End of explanation
"""
chi_I = chi_L * (1 + chi_L / 3. + chi_L**2. / 144.)
print('chi_I = %.4f' % chi_I)
"""
Explanation: and at second with the more advanced one (see Ref. <a href='#[1]'>[1]</a> eq. (6)) which has a cubic accuracy in $\chi_\mathrm{L}$ and reads
\begin{equation}
\chi = \chi_\mathrm{L} \left( 1 + \frac{\chi_\mathrm{L}}{3} + \frac{\chi_\mathrm{L}^2}{144} \right)
\end{equation}
End of explanation
"""
alphas = np.array([0, 0.25, 0.5, 1, 2, 3, 4, 8])
"""
Explanation: Both of them should be smaller than our result, but the second one should be closer to our one. The deviation of the theoretical results to our simulation result can be explained by the fact that in the Langevin model there are no interactions between the particles incorporated at all and the more advanced (mean-field-type) one of Ref. <a href='#[1]'>[1]</a> do not take occurring cluster formations into account but assumes a homogeneous distribution of the particles. For higher values of the volume fraction $\phi$ and the dipolar interaction parameter $\lambda$ the deviations will increase as the cluster formation will become more pronounced.
Magnetization curve of a 3D system
At the end of this tutorial we now want to sample the magnetization curve of a three dimensional system and compare the results with analytical solutions. Again we will compare with the Langevin function but also with the approximation of Ref. <a href='#[2]'>[2]</a> (see also Ref. <a href='#[1]'>[1]</a> for the right coefficients) which takes the dipole-dipole interaction into account. For this approximation, which is a modified mean-field theory based on the pair correlation function, the Langevin parameter $\alpha$ is replaced by
\begin{equation}
\alpha' = \alpha + \chi_\mathrm{L}~L(\alpha) + \frac{\chi_\mathrm{L}^{2}}{16} L(\alpha) \frac{\mathrm{d} L(\alpha)}{\mathrm{d}\alpha}
\end{equation}
where $\chi_\mathrm{L}$ is the Langevin susceptibility
\begin{equation}
\chi_\mathrm{L} = \frac{N}{V}\frac{\mu_0 \mu^2}{3k_\mathrm{B}T} = 8 \cdot \lambda \cdot \phi
\end{equation}
Analogous to part II we start at zero external magnetic field and increase the external field successively. At every value of the external field we sample the total dipole moment which is proportional to the magnetization as we have a fixed volume.
First we create a list of values of the Langevin parameter $\alpha$. As we already sampled the magnetization at zero external field in the last section, we take this value and continue with the sampling of an external field unequal zero
End of explanation
"""
# remove all constraints
system.constraints.clear()
# array for magnetizations in field direction
magnetizations = np.full_like(alphas, np.nan)
# use result for alpha=0 from previous chapter
magnetizations[0] = np.average(dipm_tot)
# number of loops for sampling
loops_m = 500
for ndx, alpha in enumerate(alphas):
if alpha == 0:
continue
print("Sample for alpha = {}".format(alpha))
H_dipm = (alpha * kT)
H_field = [H_dipm, 0, 0]
print("Set magnetic field constraint...")
H_constraint = espressomd.constraints.HomogeneousMagneticField(H=H_field)
system.constraints.add(H_constraint)
print("done\n")
# Equilibration
print("Equilibration...")
for i in range(equil_rounds):
system.integrator.run(equil_steps)
print("progress: {:3.0f}%, dipolar energy: {:9.2f}".format(
(i + 1) * 100. / equil_rounds, system.analysis.energy()["dipolar"]), end="\r")
print("\nEquilibration done\n")
# Sampling
print("Sampling...")
magn_temp = np.full(loops_m, np.nan)
for i in range(loops_m):
system.integrator.run(20)
magn_temp[i] = dipm_tot_calc.calculate()[0]
print("progress: {:3.0f}%".format((i + 1) * 100. / loops_m), end="\r")
print("\n")
# save average magnetization
magnetizations[ndx] = np.mean(magn_temp)
print("Sampling for alpha = {} done \n".format(alpha))
print("magnetizations = {}".format(magnetizations))
print("total progress: {:5.1f}%\n".format(ndx * 100. / (len(alphas) - 1)))
# remove constraint
system.constraints.clear()
print("Magnetization curve sampling done")
"""
Explanation: Now for each value in this list we sample the total dipole moment / magnetization of the system for a while. Keep in mind that we only the current orientation of the dipole moments, i.e. the unit vector of the dipole moments, is saved in the particle list but not their magnitude. Thus we have to use $H\cdot \mu$ as the external magnetic field, where $\mu$ is the magnitude of a single magnetic dipole moment.
We will apply the field in x-direction using the class <tt>constraints</tt> of ESPResSo.
As in part II we use the same system for every value of the Langevin parameter $\alpha$. Thus we use that the system is already pre-equilibrated from the previous run so we save some equilibration time. For scientific purposes one would use a new system for every value for the Langevin parameter to ensure that the systems are independent and no correlation effects are measured. Also one would perform more than just one simulation for each value of $\alpha$ to increase the precision of the results.
End of explanation
"""
# Langevin function
def L(y):
return np.cosh(y) / np.sinh(y) - 1 / y
# second order mean-field-model from Ref. [2]
def alpha_mean_field(alpha, dip_lambda, phi):
chi = 8. * dip_lambda * phi
return alpha + chi * L(alpha) + chi**2. / 16. * L(alpha) * (1. / alpha**2. - 1. / np.sinh(alpha)**2.)
"""
Explanation: Now we define the Langevin function and the modified mean-field-approximation of the Langevin parameter of Ref. <a href='#[2]'>[2]</a>
End of explanation
"""
import matplotlib.pyplot as plt
y = np.arange(0.01, 10, 0.1)
initial_susceptibility = system.volume() * kT * chi / (N * mu_0 * dipm**2)
plt.figure(figsize=(10, 10))
plt.ylim(0, 1.)
plt.xlabel(r'$\alpha$', fontsize=20)
plt.ylabel(r'$M^*$', fontsize=20)
plt.plot(y, L(y), label='Langevin function')
plt.plot(y, L(alpha_mean_field(y, dip_lambda, phi)),
label='modified mean-field-theory')
plt.plot(alphas, magnetizations / N, 'o', label='simulation results')
plt.plot(y, initial_susceptibility * y,
label=r'linear approximation at $\alpha = 0$')
plt.legend(fontsize=20)
plt.show()
"""
Explanation: We also want to plot the linear approximation at $\alpha = 0$ to see for which values of $\alpha$ this approximation holds. We use the initial susceptibility calculated in the first chapter of this part as the gradient. As we want the gradient of $M^*$ with respect to $\alpha$ which fulfills the relation
\begin{equation}
\frac{\partial M^*}{\partial \alpha} = \frac{1}{M_\mathrm{sat}}\frac{\partial M}{\partial \left( \frac{\mu_0\mu}{k_\mathrm{B}T} H\right)} = \frac{k_\mathrm{B}T~V}{\mu_0\mu^2N}\frac{\partial M}{\partial H} = \frac{k_\mathrm{B}T~V}{\mu_0\mu^2N}~\chi
\end{equation}
we have to scale our calculated initial susceptibility $\chi_{init}$ by a factor to get it in our dimensionless units.
Now we plot the resulting curves together with our simulation results and the linear approximation
End of explanation
"""
|
mromanello/SunoikisisDC_NER | participants_notebooks/Sunoikisis - Named Entity Extraction 1b-GB.ipynb | gpl-3.0 | ########
# NLTK #
########
import nltk
from nltk.tag import StanfordNERTagger
########
# CLTK #
########
import cltk
from cltk.tag.ner import tag_ner
##############
# MyCapytain #
##############
import MyCapytain
from MyCapytain.resolvers.cts.api import HttpCTSResolver
from MyCapytain.retrievers.cts5 import CTS
from MyCapytain.common.constants import Mimetypes
#################
# other imports #
#################
import sys
sys.path.append("/opt/nlp/pymodules/")
from idai_journals.nlp import sub_leaves
"""
Explanation: Welcome
This notebook accompanies the Sunokisis Digital Classics common session on Named Entity Extraction, see https://github.com/SunoikisisDC/SunoikisisDC-2016-2017/wiki/Named-Entity-Extraction-I.
In this notebook we are going to experiment with three different methods for extracting named entities from a Latin text.
Library imports
External modules and libraries can be imported using import statements.
Let's the Natural Language ToolKit (NLTK), the Classical Language ToolKit (CLTK), MyCapytain and some local libraries that are used in this notebook.
End of explanation
"""
print(nltk.__version__)
print(cltk.__version__)
print(MyCapytain.__version__)
"""
Explanation: And more precisely, we are using the following versions:
End of explanation
"""
my_passage = "urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.1"
"""
Explanation: Let's grab some text
To start with, we need some text from which we'll try to extract named entities using various methods and libraries.
There are several ways of doing this e.g.:
1. copy and paste the text from Perseus or the Latin Library into a text document, and read it into a variable
2. load a text from one of the Latin corpora available via cltk (cfr. this blog post)
3. or load it from Perseus by leveraging its Canonical Text Services API
Let's gor for #3 :)
What's CTS?
CTS URNs stand for Canonical Text Service Uniform Resource Names.
You can think of a CTS URN like a social security number for texts (or parts of texts).
Here are some examples of CTS URNs with different levels of granularity:
- urn:cts:latinLit:phi0448 (Caesar)
- urn:cts:latinLit:phi0448.phi001 (Caesar's De Bello Gallico)
- urn:cts:latinLit:phi0448.phi001.perseus-lat2 DBG Latin edtion
- urn:cts:latinLit:phi0448.phi001.perseus-lat2:1 DBG Latin edition, book 1
- urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.1 DBG Latin edition, book 1, chapter 1, section 1
How do I find out the CTS URN of a given author or text? The Perseus Catalog is your friend! (crf. e.g. http://catalog.perseus.org/catalog/urn:cts:latinLit:phi0448)
Querying a CTS API
The URN of the Latin edition of Caesar's De Bello Gallico is urn:cts:latinLit:phi0448.phi001.perseus-lat2.
End of explanation
"""
# We set up a resolver which communicates with an API available in Leipzig
resolver = HttpCTSResolver(CTS("http://cts.dh.uni-leipzig.de/api/cts/"))
# We require some metadata information
textMetadata = resolver.getMetadata("urn:cts:latinLit:phi0448.phi001.perseus-lat2")
# Texts in CTS Metadata have one interesting property : its citation scheme.
# Citation are embedded objects that carries information about how a text can be quoted, what depth it has
print([citation.name for citation in textMetadata.citation])
"""
Explanation: With this information, we can query a CTS API and get some information about this text.
For example, we can "discover" its canonical text structure, an essential information to be able to cite this text.
End of explanation
"""
my_passage = "urn:cts:latinLit:phi0448.phi001.perseus-lat2:1"
my_passage_en = "urn:cts:latinLit:phi0448.phi001.perseus-eng2:1"
"""
Explanation: But we can also query the same API and get back the text of a specific text section, for example the entire book 1.
To do so, we need to append the indication of the reference scope (i.e. book 1) to the URN.
End of explanation
"""
passage = resolver.getTextualNode(my_passage)
passage_en = resolver.getTextualNode(my_passage_en)
"""
Explanation: So we retrieve the first book of the De Bello Gallico by passing its CTS URN (that we just stored in the variable my_passage) to the CTS API, via the resolver provided by MyCapytains:
End of explanation
"""
de_bello_gallico_book1 = passage.export(Mimetypes.PLAINTEXT)
de_bello_gallico_en_book1 = passage_en.export(Mimetypes.PLAINTEXT)
"""
Explanation: At this point the passage is available in various formats: text, but also TEI XML, etc.
Thus, we need to specify that we are interested in getting the text only:
End of explanation
"""
print(de_bello_gallico_en_book1)
"""
Explanation: Let's check that the text is there by printing the content of the variable de_bello_gallico_book1 where we stored it:
End of explanation
"""
from IPython.display import IFrame
IFrame('http://cts.dh.uni-leipzig.de/read/latinLit/phi0448/phi001/perseus-lat2/1', width=1000, height=350)
"""
Explanation: The text that we have just fetched by using a programming interface (API) can also be viewed in the browser.
Or even imported as an iframe into this notebook!
End of explanation
"""
len(de_bello_gallico_en_book1.split(" "))
"""
Explanation: Let's see how many words (tokens, more properly) there are in Caesar's De Bello Gallico I:
End of explanation
"""
"T".istitle()
"t".istitle()
# we need a list to store the tagged tokens
tagged_tokens = []
# tokenisation is done by using the string method `split(" ")`
# that splits a string upon white spaces
for n, token in enumerate(de_bello_gallico_en_book1.split(" ")):
if(token.istitle()):
tagged_tokens.append((token, "Entity"))
else:
tagged_tokens.append((token, "O"))
"""
Explanation: Very simple baseline
Now let's write what in NLP jargon is called a baseline, that is a method for extracting named entities that can serve as a term of comparison to evaluate the accuracy of other methods.
Baseline method:
- cycle through each token of the text
- if the token starts with a capital letter it's a named entity (only one type, i.e. Entity)
End of explanation
"""
tagged_tokens[:50]
"""
Explanation: Let's a havea look at the first 50 tokens that we just tagged:
End of explanation
"""
def extract_baseline(input_text):
"""
:param input_text: the text to tag (string)
:return: a list of tuples, where tuple[0] is the token and tuple[1] is the named entity tag
"""
# we need a list to store the tagged tokens
tagged_tokens = []
# tokenisation is done by using the string method `split(" ")`
# that splits a string upon white spaces
for n, token in enumerate(input_text.split(" ")):
if(token.istitle()):
tagged_tokens.append((token, "Entity"))
else:
tagged_tokens.append((token, "O"))
return tagged_tokens
"""
Explanation: For convenience we can also wrap our baseline code into a function that we call extract_baseline. Let's define it:
End of explanation
"""
tagged_tokens_baseline = extract_baseline(de_bello_gallico_book1)
tagged_tokens_baseline[-50:]
"""
Explanation: And now we can call it like this:
End of explanation
"""
def extract_baseline(input_text):
"""
:param input_text: the text to tag (string)
:return: a list of tuples, where tuple[0] is the token and tuple[1] is the named entity tag
"""
# we need a list to store the tagged tokens
tagged_tokens = []
# tokenisation is done by using the string method `split(" ")`
# that splits a string upon white spaces
for n, token in enumerate(input_text.split(" ")):
if(token.istitle()):
tagged_tokens.append((token, "Entity"))
context = input_text.split(" ")[n-5:n+5]
print("Found entity \"%s\" in context \"%s\""%(token, " ".join(context)))
else:
tagged_tokens.append((token, "O"))
return tagged_tokens
tagged_text_baseline = extract_baseline(de_bello_gallico_book1)
tagged_text_baseline[:50]
"""
Explanation: We can modify slightly our function so that it prints the snippet of text where an entity is found:
End of explanation
"""
%%time
tagged_text_cltk = tag_ner('latin', input_text=de_bello_gallico_book1)
"""
Explanation: NER with CLTK
The CLTK library has some basic support for the extraction of named entities from Latin and Greek texts (see CLTK's documentation).
The current implementation (as of version 0.1.47) uses a lookup-based method.
For each token in a text, the tagger checks whether that token is contained within a predefined list of possible named entities:
- list of Latin proper nouns: https://github.com/cltk/latin_proper_names_cltk
- list of Greek proper nouns: https://github.com/cltk/greek_proper_names_cltk
Let's run CLTK's tagger (it takes a moment):
End of explanation
"""
tagged_text_cltk[:10]
"""
Explanation: Let's have a look at the ouput, only the first 10 tokens (by using the list slicing notation):
End of explanation
"""
def reshape_cltk_output(tagged_tokens):
reshaped_output = []
for tagged_token in tagged_tokens:
if(len(tagged_token)==1):
reshaped_output.append((tagged_token[0], "O"))
else:
reshaped_output.append((tagged_token[0], tagged_token[1]))
return reshaped_output
"""
Explanation: The output looks slightly different from the one of our baseline function (the size of the tuples in the list varies).
But we can write a function to fix this, we call it reshape_cltk_output:
End of explanation
"""
tagged_text_cltk_reshaped = reshape_cltk_output(tagged_text_cltk)
"""
Explanation: We apply this function to CLTK's output:
End of explanation
"""
tagged_text_cltk[:20]
"""
Explanation: And the resulting output looks now ok:
End of explanation
"""
list(zip(tagged_text_baseline[:20], tagged_text_cltk_reshaped[:20]))
"""
Explanation: Now let's compare the two list of tagged tokens by using a python function called zip, which allows us to read multiple lists simultaneously:
End of explanation
"""
tagged_text_cltk = reshape_cltk_output(tag_ner('latin', input_text=de_bello_gallico_book1.split(" ")))
list(zip(tagged_text_baseline[:20], tagged_text_cltk[:20]))
"""
Explanation: But, as you can see, the two lists are not aligned.
This is due to how the CLTK function tokenises the text. The comma after "tres" becomes a token on its own, whereas when we tokenise by white space the comma is attached to "tres" (i.e. "tres,").
A solution to this is to pass to the tag_ner function the text already tokenised by text.
End of explanation
"""
stanford_model_italian = "/opt/nlp/stanford-tools/stanford-ner-2015-12-09/classifiers/ner-ita-nogpe-noiob_gaz_wikipedia_sloppy.ser.gz"
stanford_model_english = "/opt/nlp/stanford-tools/stanford-ner-2015-12-09/classifiers/english.muc.7class.distsim.crf.ser.gz"
ner_tagger = StanfordNERTagger(stanford_model_italian)
ner_tagger = StanfordNERTagger(stanford_model_english)
tagged_text_nltk = ner_tagger.tag(de_bello_gallico_en_book1.split(" "))
"""
Explanation: NER with NLTK
End of explanation
"""
tagged_text_nltk[:100]
"""
Explanation: Let's have a look at the output
End of explanation
"""
list(zip(tagged_text_baseline[:20], tagged_text_cltk[:20], tagged_text_nltk[:20]))
for baseline_out, cltk_out, nltk_out in zip(tagged_text_baseline[:20], tagged_text_cltk[:20], tagged_text_nltk[:20]):
print("Baseline: %s\nCLTK: %s\nNLTK: %s\n"%(baseline_out, cltk_out, nltk_out))
"""
Explanation: Wrap up
At this point we can "compare" the output of the three different methods we used, again by using the zip function.
End of explanation
"""
stanford_model_english = "/opt/nlp/stanford-tools/stanford-ner-2015-12-09/classifiers/english.muc.7class.distsim.crf.ser.gz"
"""
Explanation: Excercise
Extract the named entities from the English translation of the De Bello Gallico book 1.
The CTS URN for this translation is urn:cts:latinLit:phi0448.phi001.perseus-eng2:1.
Modify the code above to use the English model of the Stanford tagger instead of the italian one.
Hint:
End of explanation
"""
|
informatics-isi-edu/deriva-py | docs/derivapy-datapath-example-3.ipynb | apache-2.0 | # Import deriva modules
from deriva.core import ErmrestCatalog, get_credential
# Connect with the deriva catalog
protocol = 'https'
hostname = 'www.facebase.org'
catalog_number = 1
# If you need to authenticate, use Deriva Auth agent and get the credential
credential = get_credential(hostname)
catalog = ErmrestCatalog(protocol, hostname, catalog_number, credential)
# Get the path builder interface for this catalog
pb = catalog.getPathBuilder()
"""
Explanation: Datapath Example 3
This notebook gives an example of how to build relatively simple data paths.
It assumes that you understand the concepts presented in the example 2
notebook.
Exampe Data Model
The examples require that you understand a little bit about the example
catalog data model, which is based on the FaceBase project.
Key tables
'dataset' : represents a unit of data usually a 'study' or 'collection'
'experiment' : a bioassay (typically RNA-seq or ChIP-seq assays)
'replicate' : a record of a replicate (bio or technical) related to an experiment
Relationships
dataset <- experiment: A dataset may have one to many experiments. I.e., there
is a foreign key reference from experiment to dataset.
experiment <- replicate: An experiment may have one to many replicates. I.e., there is a
foreign key reference from replicate to experiment.
End of explanation
"""
dataset = pb.isa.dataset
experiment = pb.isa.experiment
replicate = pb.isa.replicate
"""
Explanation: Building a DataPath
Build a data path by linking together tables that are related. To make things a little easier we will use python variables to reference the tables. This is not necessary, but simplifies the examples.
End of explanation
"""
path = dataset.path
print(path.uri)
"""
Explanation: Initiate a path from a table object
Like the example 2 notebook, begin by initiating a path instance from a Table object. This path will be "rooted" at the table it was initiated from, in this case, the dataset table. DataPath's have URIs that identify the resource in the catalog.
End of explanation
"""
path.link(experiment).link(replicate)
print(path.uri)
"""
Explanation: Link other related tables to the path
In the catalog's model, tables are related by foreign key references. Related tables may be linked together in a DataPath. Here we link the following tables based on their foreign key references (i.e., dataset <- experiment <- replicate).
End of explanation
"""
path.context.name
"""
Explanation: Path context
By default, DataPath objects return entities for the last linked entity set in the path. The path from the prior step ended in replicate which is therefore the context for this path.
End of explanation
"""
entities = path.entities()
len(entities)
"""
Explanation: Get entities for the current context
The following DataPath will fetch replicate entities not datasets.
End of explanation
"""
path.table_instances['dataset']
# or
path.dataset
"""
Explanation: Get entities for a different path context
Let's say we wanted to fetch the entities for the dataset table rather than the current context which is the replicate table. We can do that by referencing the table as a property of the path object. Note that these are known as "table instances" rather than tables when used within a path expression. We will discuss table instances later in this notebook.
End of explanation
"""
entities = path.dataset.entities()
len(entities)
"""
Explanation: From that table instance we can fetch entities, add a filter specific to that table instance, or even link another table. Here we will get the dataset entities from the path.
End of explanation
"""
path.filter(replicate.bioreplicate_number == 1)
print(path.uri)
entities = path.entities()
len(entities)
"""
Explanation: Notice that we fetched fewer entities this time which is the number of dataset entities rather than the replicate entities that we previously fetched.
Filtering a DataPath
Building off of the path, a filter can be added. Like fetching entities, linking and filtering are performed relative to the current context. In this filter, the assay's attriburtes are referenced in the expression.
Currently, binary comparisons and logical operators are supported. Unary opertors have not yet been implemented. In binary comparisons, the left operand must be an attribute (column name) while the right operand must be a literal
value.
End of explanation
"""
dataset_instance = path.table_instances['dataset']
# or
dataset_instance = path.dataset
"""
Explanation: Table Instances
So far we have discussed base tables. A base table is a representation of the table as it is stored in the ERMrest catalog. A table instance is a usage or reference of a table within the context of a data path. As demonstrated above, we may link together multiple tables and thus create multiple table instances within a data path.
For example, in path.link(dataset).link(experiment).link(replicate) the table instance experiment is no longer the same as the original base table experiment because within the context of this data path the experiment entities must satisfy the constraints of the data path. The experiment entities must reference a dataset entity, and they must be referenced by a replicate entity. Thus within this path, the entity set for experiment may be quite different than the entity set for the base table on its own.
Table instances are bound to the path
Whenever you initiate a data path (e.g., table.path) or link a table to a path (e.g., path.link(table)) a table instance is created and bound to the DataPath object (e.g., path). These table instances can be referenced via the DataPath's table_instances container or directly as a property of the DataPath object itself.
End of explanation
"""
path.link(dataset.alias('D'))
path.D.uri
"""
Explanation: Aliases for table instances
Whenever a table instance is created and bound to a path, it is given a name. If no name is specified for it, it will be named after the name of its base table. For example, a table named "My Table" will result in a table instance also named "My Table". Tables may appear more than once in a path (as table instances), and if the table name is taken, the instance will be given the "'base name' + number" (e.g., "My Table2").
You may wish to specify the name of your table instance. In conventional database terms, an alternate name is called an "alias". Here we give the dataset table instance an alias of 'D' though longer strings are also valid as long as they do not contain special characters in them.
End of explanation
"""
path = dataset.path.link(experiment).link(replicate).filter(replicate.bioreplicate_number == 1)
print(path.uri)
"""
Explanation: You'll notice that in this path we added an additional instance of the dataset table from our catalog model. In addition, we linked it to the isa.replicate table. This was possible because in this model, there is a foriegn key reference from the base table replicate to the base table dataset. The entities for the table instance named dataset and the instance name D will likely consist of different entities because the constraints for each are different.
Selecting Attributes From Linked Entities
Returning to the initial example, if we want to include additional attributes
from other table instances in the path, we need to be able to reference the
table instances at any point in the path. First, we will build our original path.
End of explanation
"""
results = path.attributes(path.dataset.accession,
path.experiment.experiment_type.alias('type_of_experiment'),
path.replicate.technical_replicate_number.alias('technical_replicate_num'))
print(results.uri)
"""
Explanation: Now let's fetch an entity set with attributes pulled from each of the table instances in the path.
End of explanation
"""
path.uri != results.uri
"""
Explanation: Notice that the ResultSet also has a uri property. This URI may differ from the origin path URI because the attribute projection does not get appended to the path URI.
End of explanation
"""
results.fetch(limit=5)
for result in results:
print(result)
"""
Explanation: As usual, fetch(...) the entities from the catalog.
End of explanation
"""
|
muratcemkose/cy-rest-python | advanced/integratingDrugbank.ipynb | mit | import requests
import json
import pandas as pd
PORT_NUMBER = 1234
BASE = 'http://localhost:' + str(PORT_NUMBER) + '/v1/'
HEADERS = {'Content-Type': 'application/json'}
requests.post(BASE + 'networks?source=url&collection=KEGG', data=json.dumps(['http://rest.kegg.jp/get/eco00250/kgml']), headers=HEADERS)
"""
Explanation: Mapping Drugbank drug targets on KEGG pathway
by Kozo Nishida (Riken, Japan)
Here we show a example of data integration. We map drug targets(from Drugbank) on KEGG pathway. To manage several tables, we use MongoDB and PyMongo.
Loading all data into pandas dataframe
First we import a KEGG pathway: Alanine, aspartate and glutamate metabolism, eco00250
End of explanation
"""
res = requests.get(BASE + 'networks')
networkIds = eval(res.content)
print networkIds
res = requests.get(BASE + 'networks/' + str(networkIds[0]) + '/tables/defaultnode.tsv')
f = open('alanine_nodes.tsv', 'w')
f.write(res.content)
f.close()
"""
Explanation: and get node attribute table as alanine_nodes.tsv
End of explanation
"""
alanine_df = pd.read_table('alanine_nodes.tsv')
alanine_df.head()
"""
Explanation: import alanine_nodes.tsv into pandas dataframe
End of explanation
"""
!curl -O http://www.drugbank.ca/system/downloads/current/all_target_ids_all.csv.zip
!unzip all_target_ids_all.csv.zip
"""
Explanation: next we download DRUGBANK drugtarget and ID mapping table
End of explanation
"""
drugbank_df = pd.read_csv('all_target_ids_all.csv')
drugbank_df.head()
"""
Explanation: import Drugbank drug targets into pandas dataframe
End of explanation
"""
!curl -o conv_eco_uniprot.tsv http://rest.kegg.jp/conv/eco/uniprot
!head conv_eco_uniprot.tsv
"""
Explanation: Get uniprot-keggid conversion table. This takes long time.
End of explanation
"""
idconversion_df = pd.read_table('conv_eco_uniprot.tsv', header=None)
idconversion_df.head()
"""
Explanation: import uniprot-KEGG ID conversion table
End of explanation
"""
target_uniprot = []
target_drug = []
for i, keggtype in alanine_df['KEGG_NODE_TYPE'].iteritems():
target_uniprot.append(None)
target_drug.append(None)
if keggtype == 'gene':
uniprotids = []
for locus in alanine_df['KEGG_ID'][i].split('|'):
uniprot = idconversion_df[idconversion_df[1] == locus][0]
uniprotid = uniprot.values[0].replace('up:', '')
uniprotids.append(uniprotid)
for j, unip in drugbank_df['UniProt ID'].iteritems():
if unip in uniprotids:
target_uniprot.pop()
target_uniprot.append(unip)
target_drug.pop()
target_drug.append(drugbank_df['Drug IDs'][j])
s1 = pd.Series(target_uniprot, name='TARGET_UNIPROT')
s2 = pd.Series(target_drug, name='TARGET_DRUG')
merged_df = pd.concat([alanine_df, s1, s2], axis=1)
merged_df.head()
drugjson = json.loads(merged_df.to_json(orient="records"))
new_table_data = {
"key": "KEGG_NODE_LABEL",
"dataKey": "KEGG_NODE_LABEL",
"data" : drugjson
}
update_table_url = BASE + "networks/" + str(networkIds[0]) + "/tables/defaultnode"
print update_table_url
requests.put(update_table_url, data=json.dumps(new_table_data), headers=HEADERS)
"""
Explanation: Merging pandas dataframes
We integrate the three table(network nodes, drug targets table, id conversion table). Here we append columns drug target and drug to Cytoscape’s node table.
End of explanation
"""
|
karlstroetmann/Formal-Languages | Python/Regexp-Tutorial.ipynb | gpl-2.0 | import re
"""
Explanation: Regular Expressions in Python (A Short Tutorial)
This is a tutorial showing how regular expressions are supported in Python.
The assumption is that the reader already has a grasp of the concept of
regular expressions as it is taught in lectures
on formal languages, for example in
Formal Languages and Their Application, but does not know how regular expressions are supported in Python.
In Python, regular expressions are not part of the core language but are rather implemented in the module re. This module is part of the Python standard library and therefore there is no need
to install this module. The full documentation of this module can be found at
https://docs.python.org/3/library/re.html.
End of explanation
"""
re.findall('a', 'abcabcABC')
"""
Explanation: Regular expressions are strings that describe <em style=\color:blue>languages</em>, where a
<em style=\color:blue>language</em> is defined as a <em style=\color:blue\ a>set of strings</em>.
In the following, let us assume that $\Sigma$ is the set of all Unicode characters and $\Sigma^$ is the set
of strings consisting of Unicode characters. We will define the set $\textrm{RegExp}$ of regular expressions inductively.
In order to define the meaning of a regular expression $r$ we define a function
$$ \mathcal{L}:\textrm{RegExp} \rightarrow 2^{\Sigma^} $$
such that $\mathcal{L}(r)$ is the <em style=\color:blue>language</em> specified by the regular expression $r$.
In order to demonstrate how regular expressions work we will use the function findall from the module
re. This function is called in the following way:
$$ \texttt{re.findall}(r, s, \textrm{flags}=0) $$
Here, the arguments are interpreted as follows:
- $r$ is a string that is interpreted as a regular expression,
- $s$ is a string. The regular expression $r$ specifies substrings of $s$ that we want to find.
- $\textrm{flags}$ is an optional argument of type int which is set to $0$ by default.
This argument is useful to set flags that might be used to alter the interpretation of the regular
expression $r$. For example, if the flag re.IGNORECASE is set, then the search performed by findall is not case sensitive.
The function findall returns a list of those non-overlapping substrings of the string $s$ that
match the regular expression $r$. In the following example, the regular expression $r$ searches
for the letter a and since the string $s$ contains the character a two times, findall returns a
list with two occurrences of a:
End of explanation
"""
re.findall('a', 'abcabcABC', re.IGNORECASE)
"""
Explanation: In the next example, the flag re.IGNORECASE is set and hence the function call returns a list of length 3.
End of explanation
"""
re.findall('a', 'abaa')
"""
Explanation: To begin our definition of the set $\textrm{RegExp}$ of Python regular expressions, we first have to define
the set $\texttt{MetaChars}$ of all meta-characters:
MetaChars := { '.', '^', '$', '*', '+', '?', '{', '}', '[', ']', '\', '|', '(', ')' }
These characters are used as <em style="color:blue">operator symbols</em> or as
part of operator symbols inside of regular expressions.
Now we can start our inductive definition of regular expressions:
- Any Unicode character $c$ such that $c \not\in \textrm{MetaChars}$ is a regular expression.
The regular expressions $c$ matches the character $c$, i.e. we have
$$ \mathcal{L}(c) = { c }. $$
- If $c$ is a meta character, i.e. we have $c \in \textrm{MetaChars}$, then the string $\backslash c$
is a regular expression matching the meta-character $c$, i.e. we have
$$ \mathcal{L}(\backslash c) = { c }. $$
End of explanation
"""
re.findall(r'\+', '+-+')
re.findall('\\+', '+-+')
"""
Explanation: In the following example we have to use <em style="color:blue">raw strings</em> in order to prevent
the backlash character to be mistaken as an <em style="color:blue">escape sequence</em>. A string is a
<em style="color:blue">raw string</em> if the opening quote character is preceded with the character
r.
End of explanation
"""
re.findall(r'the', 'The horse, the dog, and the cat.', flags=re.IGNORECASE)
"""
Explanation: Concatenation
The next rule shows how regular expressions can be <em style="color:blue">concatenated</em>:
- If $r_1$ and $r_2$ are regular expressions, then $r_1r_2$ is a regular expression. This
regular expression matches any string $s$ that can be split into two substrings $s_1$ and $s_2$
such that $r_1$ matches $s_1$ and $r_2$ matches $s_2$. Formally, we have
$$\mathcal{L}(r_1r_2) :=
\bigl{ s_1s_2 \mid s_1 \in \mathcal{L}(r_1) \wedge s_2 \in \mathcal{L}(r_2) \bigr}.
$$
In the lecture notes we have used the notation $r_1 \cdot r_2$ instead of the Python notation $r_1r_2$.
Using concatenation of regular expressions, we can now find words.
End of explanation
"""
re.findall(r'The|a', 'The horse, the dog, and a cat.', flags=re.IGNORECASE)
"""
Explanation: Choice
Regular expressions provide the operator | that can be used to choose between
<em style="color:blue">alternatives:</em>
- If $r_1$ and $r_2$ are regular expressions, then $r_1|r_2$ is a regular expression. This
regular expression matches any string $s$ that can is matched by either $r_1$ or $r_2$.
Formally, we have
$$\mathcal{L}(r_1|r_2) := \mathcal{L}(r_1) \cup \mathcal{L}(r_2). $$
In the lecture notes we have used the notation $r_1 + r_2$ instead of the Python notation $r_1|r_2$.
End of explanation
"""
re.findall(r'a+', 'abaabaAaba.', flags=re.IGNORECASE)
"""
Explanation: Quantifiers
The most interesting regular expression operators are the <em style="color:blue">quantifiers</em>.
The official documentation calls them <em style="color:blue">repetition qualifiers</em> but in this notebook
they are called quantifiers, since this is shorter. Syntactically, quantifiers are
<em style="color:blue">postfix operators</em>.
- If $r$ is a regular expressions, then $r+$ is a regular expression. This
regular expression matches any string $s$ that can be split into a list on $n$ substrings $s_1$,
$s_2$, $\cdots$, $s_n$ such that $r$ matches $s_i$ for all $i \in {1,\cdots,n}$.
Formally, we have
$$\mathcal{L}(r+) :=
\Bigl{ s \Bigm| \exists n \in \mathbb{N}: \bigl(n \geq 1 \wedge
\exists s_1,\cdots,s_n : (s_1 \cdots s_n = s \wedge
\forall i \in {1,\cdots, n}: s_i \in \mathcal{L}(r)\bigr)
\Bigr}.
$$
Informally, $r+$ matches $r$ any positive number of times.
End of explanation
"""
re.findall(r'a*', 'abaabaaaba')
"""
Explanation: If $r$ is a regular expressions, then $r$ is a regular expression. This
regular expression matches either the empty string or any string $s$ that can be split into a list on $n$ substrings $s_1$,
$s_2$, $\cdots$, $s_n$ such that $r$ matches $s_i$ for all $i \in {1,\cdots,n}$.
Formally, we have
$$\mathcal{L}(r) := \bigl{ \texttt{''} \bigr} \cup
\Bigl{ s \Bigm| \exists n \in \mathbb{N}: \bigl(n \geq 1 \wedge
\exists s_1,\cdots,s_n : (s_1 \cdots s_n = s \wedge
\forall i \in {1,\cdots, n}: s_i \in \mathcal{L}(r)\bigr)
\Bigr}.
$$
Informally, $r*$ matches $r$ any number of times, including zero times. Therefore, in the following example the result also contains various empty strings. For example, in the string 'abaabaaaba' the regular expression a* will find an empty string at the beginning of each occurrence of the character 'b'. The final occurrence of the empty string is found at the end of the string:
End of explanation
"""
re.findall(r'a?', 'abaa')
"""
Explanation: If $r$ is a regular expressions, then $r?$ is a regular expression. This
regular expression matches either the empty string or any string $s$ that is matched by $r$. Formally we have
$$\mathcal{L}(r?) := \bigl{ \texttt{''} \bigr} \cup \mathcal{L}(r). $$
Informally, $r?$ matches $r$ at most one times but also zero times. Therefore, in the following example the result also contains two empty strings. One of these is found at the beginning of the character 'b', the second is found at the end of the string.
End of explanation
"""
re.findall(r'a{2,3}', 'aaaa')
"""
Explanation: If $r$ is a regular expressions and $m,n\in\mathbb{N}$ such that $m \leq n$, then $r{m,n}$ is a
regular expression. This regular expression matches any number $k$ of repetitions of $r$ such that $m \leq k \leq n$.
Formally, we have
$$\mathcal{L}(r{m,n}) =
\Bigl{ s \mid \exists k \in \mathbb{N}: \bigl(m \leq k \leq n \wedge
\exists s_1,\cdots,s_k : (s_1 \cdots s_k = s \wedge
\forall i \in {1,\cdots, k}: s_i \in \mathcal{L}(r)\bigr)
\Bigr}.
$$
Informally, $r{m,n}$ matches $r$ at least $m$ times and at most $n$ times.
End of explanation
"""
re.findall(r'a{2}', 'aabaaaba')
"""
Explanation: Above, the regular expression r'a{2,3}' matches the string 'aaaa' only once since the first match consumes three occurrences of a and then there is only a single a left.
If $r$ is a regular expressions and $n\in\mathbb{N}$, then $r{n}$ is a regular expression. This regular expression matches exactly $n$ repetitions of $r$. Formally, we have
$$\mathcal{L}(r{n}) = \mathcal{L}(r{n,n}).$$
End of explanation
"""
re.findall(r'a{,2}', 'aabaaabba')
"""
Explanation: If $r$ is a regular expressions and $n\in\mathbb{N}$, then $r{,n}$ is a regular expression. This regular expression matches up to $n$ repetitions of $r$. Formally, we have
$$\mathcal{L}(r{,n}) = \mathcal{L}(r{0,n}).$$
End of explanation
"""
re.findall(r'a{2,}', 'aabaaaba')
"""
Explanation: If $r$ is a regular expressions and $n\in\mathbb{N}$, then $r{n,}$ is a regular expression. This regular expression matches $n$ or more repetitions of $r$. Formally, we have
$$\mathcal{L}(r{n,}) = \mathcal{L}(r{n}r*).$$
End of explanation
"""
re.findall(r'a{2,3}?', 'aaaa'), re.findall(r'a{2,3}', 'aaaa')
"""
Explanation: Non-Greedy Quantifiers
The quantifiers ?, +, *, {m,n}, {n}, {,n}, and {n,} are <em style="color:blue">greedy</em>, i.e. they
match the longest possible substrings. Suffixing these operators with the character ? makes them
<em style="color:blue">non-greedy</em>. For example, the regular expression a{2,3}? matches either
two occurrences or three occurrences of the character a but will prefer to match only two characters. Hence, the regular expression a{2,3}? will find two matches in the string 'aaaa', while the regular expression a{2,3} only finds a single match.
End of explanation
"""
re.findall(r'[abc]+', 'abcdcba')
"""
Explanation: Character Classes
In order to match a set of characters we can use a <em style="color:blue">character class</em>.
If $c_1$, $\cdots$, $c_n$ are Unicode characters, then $[c_1\cdots c_n]$ is a regular expression that
matches any of the characters from the set ${c_1,\cdots,c_n}$:
$$ \mathcal{L}\bigl([c_1\cdots c_n]\bigr) := { c_1, \cdots, c_n } $$
End of explanation
"""
re.findall(r'[1-9][0-9]*|0', '11 abc 12 2345 007 42 0')
"""
Explanation: Character classes can also contain <em style="color:blue">ranges</em>. Syntactically, a range has the form
$c_1\texttt{-}c_2$, where $c_1$ and $c_2$ are Unicode characters.
For example, the regular expression [0-9] contains the range 0-9 and matches any decimal digit. To find all natural numbers embedded in a string we could use the regular expression [1-9][0-9]*|[0-9]. This regular expression matches either a single digit or a string that starts with a non-zero digit and is followed by any number of digits.
End of explanation
"""
re.findall(r'[0-9]|[1-9][0-9]*', '11 abc 12 2345 007 42 0')
"""
Explanation: Note that the next example looks quite similar but gives a different result:
End of explanation
"""
re.findall(r'[\dabc]+', '11 abc12 1a2 2b3c4d5')
"""
Explanation: Here, the regular expression starts with the alternative [0-9], which matches any single digit.
So once a digit is found, the resulting substring is returned and the search starts again. Therefore, if this regular expression is used in findall, it will only return a list of single digits.
There are some predefined character classes:
- \d matches any digit.
- \D matches any non-digit character.
- \s matches any whitespace character.
- \S matches any non-whitespace character.
- \w matches any alphanumeric character.
If we would use only <font style="font-variant: small-caps">Ascii</font> characters this would
be equivalent to the character class [0-9a-zA-Z_].
- \W matches any non-alphanumeric character.
- \b matches at a word boundary. The string that is matched is the empty string.
- \B matches at any place that is not a word boundary.
Again, the string that is matched is the empty string.
These escape sequences can also be used inside of square brackets.
End of explanation
"""
re.findall(r'[^abc]+', 'axyzbuvwchij')
re.findall(r'\b\w+\b', 'This is some text where we want to extract the words.')
"""
Explanation: Character classes can be negated if the first character after the opening [ is the character ^.
For example, [^abc] matches any character that is different from a, b, or c.
End of explanation
"""
re.findall(r'\b(0|[1-9][0-9]*)\b', '11 abc 12 2345 007 42 0')
"""
Explanation: The following regular expression uses the character class \b to isolate numbers. Note that we had to use parentheses since concatenation of regular expressions binds stronger than the choice operator |.
End of explanation
"""
re.findall(r'(\d+)\s+\1', '12 12 23 23 17 18')
"""
Explanation: Grouping
If $r$ is a regular expression, then $(r)$ is a regular expression describing the same language as
$r$. There are two reasons for using parentheses:
- Parentheses can be used to override the precedence of an operator.
This concept is the same as in programming languages. For example, the regular expression ab+
matches the character a followed by any positive number of occurrences of the character b because
the precedence of a quantifiers is higher than the precedence of concatenation of regular expressions.
However, (ab)+ matches the strings ab, abab, ababab, and so on.
- Parentheses can be used for <em style="color:blue">back-references</em> because inside
a regular expression we can refer to the substring matched by a regular expression enclosed in a pair of
parentheses using the syntax $\backslash n$ where $n \in {1,\cdots,9}$.
Here, $\backslash n$ refers to the $n^{\mathrm{th}}$ parenthesized <em style="color:blue">group</em> in the regular
expression, where a group is defined as any part of the regular expression enclosed in parentheses.
Counting starts with the left parentheses, For example, the regular expression
(a(b|c)*d)?ef(gh)+
has three groups:
1. (a(b|c)*d) is the first group,
2. (b|c) is the second group, and
3. (gh) is the third group.
For example, if we want to recognize a string that starts with a number followed by some white space and then
followed by the <b>same</b> number we can use the regular expression (\d+)\w+\1.
End of explanation
"""
re.findall(r'c.*?t', 'ct cat caat could we look at that!')
"""
Explanation: In general, given a digit $n$, the expression $\backslash n$ refers to the string matched in the $n$-th group of the regular expression.
The Dot
The regular expression . matches any character except the newline. For example, c.*?t matches any string that starts with the character c and ends with the character t and does not contain any newline. If we are using the non-greedy version of the quantifier *, we can find all such words in the string below.
End of explanation
"""
re.findall(r'((?P<quote>[\'"])\w*(?P=quote))', 'abc "uvw" and \'xyz\'')
"""
Explanation: The dot . does not have any special meaning when used inside a character range. Hence, the regular expression
[.] matches only the character ..
Named Groups
Referencing a group via the syntax \n where n is a natural number is both cumbersome and error-prone. Instead, we can use named groups.
The syntax to define a named group is
(?P<name>r)
where name is the name of the group and r is the regular expression. To refer to the string matched by this group we use the following syntax:
(?P=name)
For example, below we try to find a string of alphanumeric characters that is either contained in single quotes or in double quotes. The regular
expression [\'"] matches either a single or a double quote. By referring to the regular expression that has been named
quote we ensure that an opening single quote is matched by a closing single quote and an opening double quote is matched by a
closing double quote.
End of explanation
"""
data = \
'''
This is a text containing five lines, two of which are empty.
This is the second non-empty line,
and this is the third non-empty line.
'''
re.findall(r'^.*$', data, flags=re.MULTILINE)
"""
Explanation: Start and End of a Line
The regular expression ^ matches at the start of a string. If we set the flag re.MULTILINE, which we
will usually do when working with this regular expression containing the expression ^,
then ^ also matches at the beginning of each line,
i.e. it matches after every newline character.
Similarly, the regular expression $ matches at the end of a string. If we set the flag re.MULTILINE, then $ also matches at the end of each line,
i.e. it matches before every newline character.
End of explanation
"""
text = 'Here is 1$, here are 21€, and there are 42 $.'
L = re.findall(r'([0-9]+)(?=\s*\$)', text)
print(f'L = {L}')
sum(int(x) for x in L)
"""
Explanation: Lookahead Assertions
Sometimes we need to look ahead in order to know whether we have found what we are looking for. Consider the case that you want to add up all numbers followed by a dollar symbol but you are not interested in any other numbers. In this case a
lookahead assertion comes in handy. The syntax of a lookahead assertion is:
$$ r_1 (\texttt{?=}r_2) $$
Here $r_1$ and $r_2$ are regular expressions and ?= is the <em style="color:blue">lookahead operator</em>. $r_1$ is the regular expression you are searching for while $r_2$ is the regular expression describing the lookahead. Note that this lookahead is not matched. It is only checked whether $r_1$ is followed by $r_2$ but only the text matching $r_1$ is matched. Syntactically, the
lookahead $r_2$ has to be preceded by the lookahead operator and both have to be surrounded by parentheses.
In the following example we are looking for all numbers that are followed by dollar symbols and we sum these numbers up.
End of explanation
"""
text = 'Here is 1$, here are 21 €, and there are 42 $.'
L = re.findall(r'[0-9]+(?![0-9]*\s*\$)', text)
print(f'L = {L}')
sum(int(x) for x in L)
"""
Explanation: There are also <em style="color:blue">negative lookahead assertion</em>. The syntax is:
$$ r_1 (\texttt{?!}r_2) $$
Here $r_1$ and $r_2$ are regular expressions and ?! is the <em style="color:blue">negative lookahead operator</em>.
The expression above checks for all occurrences of $r_1$ that are <b>not</b> followed by $r_2$.
In the following examples we sum up all numbers that are <u>not</u> followed by a dollar symbol.
Note that the lookahead expression has to ensure that there are no additional digits. In general, negative lookahead is very tricky and I recommend against using it.
End of explanation
"""
with open('alice.txt', 'r') as f:
text = f.read()
print(text[:1020])
"""
Explanation: Examples
In order to have some strings to play with, let us read the file alice.txt, which contains the book
Alice's Adventures in Wonderland written by
Lewis Carroll.
End of explanation
"""
len(re.findall(r'^.*\S.*?$', text, flags=re.MULTILINE))
"""
Explanation: How many non-empty lines does this story have?
End of explanation
"""
set(re.findall(r'\b[dfs]\w{2}[kt]\b', text, flags=re.IGNORECASE))
"""
Explanation: Next, let us check, whether this text is suitable for minors. In order to do so we search for all four
letter words that start with either d, f or s and end with k or t.
End of explanation
"""
L = re.findall(r'\b\w+\b', text.lower())
S = set(L)
print(f'There are {len(L)} words in this book and {len(S)} different words.')
"""
Explanation: How many words are in this text and how many different words are used?
End of explanation
"""
|
simpeg/simpegmt | notebooks/Derivative test MT1D.ipynb | mit | import SimPEG as simpeg
import simpegEM as simpegem, simpegMT as simpegmt
from SimPEG.Utils import meshTensor
import numpy as np
simpegmt.FieldsMT.FieldsMT_1D
# Setup the problem
sigmaHalf = 1e-2
# Frequency
nFreq = 33
# freqs = np.logspace(3,-3,nFreq)
freqs = np.array([100])
# Make the mesh
ct = 5
air = meshTensor([(ct,25,1.3)])
# coreT0 = meshTensor([(ct,15,1.2)])
# coreT1 = np.kron(meshTensor([(coreT0[-1],15,1.3)]),np.ones((7,)))
core = np.concatenate( ( np.kron(meshTensor([(ct,15,-1.2)]),np.ones((10,))) , meshTensor([(ct,20)]) ) )
bot = meshTensor([(core[0],10,-1.3)])
x0 = -np.array([np.sum(np.concatenate((core,bot)))])
# Change to use no air
m1d = simpeg.Mesh.TensorMesh([np.concatenate((bot,core))], x0=x0)
# Make the model
sigma = np.zeros(m1d.nC) + sigmaHalf
sigma[ m1d.gridCC > 0 ] = 1e-8
rxList = []
for rxType in ['z1dr','z1di']:
rxList.append(simpegmt.SurveyMT.RxMT(simpeg.mkvc(np.array([0.0]),2).T,rxType))
# Source list
srcList =[]
tD = False
if tD:
for freq in freqs:
srcList.append(simpegmt.SurveyMT.srcMT_polxy_1DhomotD(rxList,freq))
else:
for freq in freqs:
srcList.append(simpegmt.SurveyMT.srcMT_polxy_1Dprimary(rxList,freq))
# Make the survey
survey = simpegmt.SurveyMT.SurveyMT(srcList)
# Set the problem
problem = simpegmt.ProblemMT1D.eForm_psField(m1d)
problem.sigmaPrimary = sigma
problem.pair(survey)
# Get the fields
fields = problem.fields(sigma)
# Project the data
data = survey.projectFields(fields)
%debug
"""
Explanation: Testing derivaties for 1D MT problem.
Especially the rx.projectFieldsDeriv
End of explanation
"""
# Unused code &= \frac{ P_{ex} P_{bx} \frac{1}{\mu_0} \left( f_b(src,m) - f_e(src,m) \right) } { \left(P_{bx}f_b(src,m) \frac{1}{\mu_0} \right)^2 }
"""
Explanation: We need calculate this derivative.
\begin{align}
\underbrace{\frac{\partial P(f(u(m)),m^{fix})}{\partial f}}_{Rx}
\end{align}
Use the rule
\begin{align}
\frac{d}{dx}\left( \frac{a(x)}{b(x)} \right) = \frac{\frac{d }{dx} a(x) b(x) - a(x)\frac{d }{dx} b(x) }{ b(x)^2 }
\end{align}
In the case of the 1D MT problem the data is calculated as
\begin{align}
MT1Ddata = P(f(m)) &= \frac{P_{ex} f_e(src,m)}{P_{bx} f_b(src,m) \frac{1}{\mu_0}} = \frac{P_e u}{P_b f_b(u)} \
\frac{\partial P(f(m))}{\partial u} v &= \frac{P_e}{P_b \frac{1}{mu_0} f_b(u)}v - \frac{P_e u}{\left(P_b \frac{1}{mu_0} f_b(u)\right)^2} P_b \frac{1}{mu_0} \frac{d f_b}{du} v
\end{align}
where u is the fields that we solve for.
\begin{align}
\frac{d f_b}{du} = - \frac{1}{i \omega} \nabla
\end{align}
End of explanation
"""
# def projectFields(self, src, mesh, u):
# '''
# Project the fields and return the
# '''
# if self.projType is 'Z1D':
# Pex = mesh.getInterpolationMat(self.locs,'Fx')
# Pbx = mesh.getInterpolationMat(self.locs,'Ex')
# ex = Pex*mkvc(u[src,'e_1d'],2)
# bx = Pbx*mkvc(u[src,'b_1d'],2)/mu_0
# f_part_complex = ex/bx
# real_or_imag = self.projComp
# f_part = getattr(f_part_complex, real_or_imag)
# return f_part
# Initate things for the derivs Test
src = survey.srcList[0]
rx = src.rxList[0]
v = np.random.randn(m1d.nN)
v0 = np.random.randn(m1d.nF+m1d.nE)
u0 = np.random.randn(m1d.nN)+np.random.randn(m1d.nN)*1j
f0 = problem.fieldsPair(m1d,survey)
f0[src,'e_1dSolution'] = u0
# f0[src,'b_1d'] = -1/(1j*simpegem.Utils.EMUtils.omega(src.freq))*m1d.nodalGrad*u0
# Run a test
def fun(u):
f = problem.fieldsPair(m1d,survey)
f[src,'e_1dSolution'] = u
return rx.projectFields(src,m1d,f), lambda t: rx.projectFieldsDeriv(src,m1d,f0,t)
simpeg.Tests.checkDerivative(fun,u0,num=5,plotIt=False)
rx.projectFieldsDeriv(src,m1d,f0,u0)
rx.projectFields(src,m1d,f0)
# Test the Jvec derivative.
# print '%s formulation - %s' % (fdemType, comp)
CONDUCTIVITY = 0.01
m0 = np.log(np.ones(problem.mesh.nC)*CONDUCTIVITY)
# mu = np.log(np.ones(problem.mesh.nC)*MU)
if True:
m0 = m0 + np.random.randn(problem.mesh.nC)*CONDUCTIVITY*1e-1
# mu = mu + np.random.randn(prb.mesh.nC)*MU*1e-1
# prb.mu = mu
# survey = prb.survey
def fun(x):
return survey.dpred(x), lambda x: problem.Jvec(m0, x)
simpeg.Tests.checkDerivative(fun, m0, num=4, plotIt=False)
%debug
### Adjoint test
"""
Explanation: As matrices the formulas above can be written as
\begin{align}
\left[ \frac{\partial P(f(m))}{\partial u} v \right] = \left[ diag \left[ \frac{1}{\left(P_b \frac{1}{mu_0} f_b(u)\right)} \right] [P_e v] , diag[P_e u] diag \left[ \frac{1}{\left(P_b \frac{1}{mu_0} f_b(u)\right)} \right]^T diag \left[ \frac{1}{\left(P_b \frac{1}{mu_0} f_b(u)\right)} \right] \left[ P_b \frac{1}{mu_0} \frac{d f_b}{du}(v) \right] \right]
\end{align}
The adjoint problem is done simliarly
\begin{align}
\left[ \frac{\partial P(f(m))}{\partial u} v \right]^T = [P_e]^T diag \left[ \frac{1}{\left(P_b \frac{1}{mu_0} f_b(u)\right)} \right]^T v - \left[ P_b \frac{d f_b}{du} \frac{1}{mu_0} \right]^T diag \left[ \frac{1}{\left(P_b \frac{1}{mu_0} f_b(u)\right)} \right] diag \left[ \frac{1}{\left(P_b \frac{1}{mu_0} f_b(u)\right)} \right]^T diag \left[ P_e u \right]^T v
\end{align}
End of explanation
"""
# Run a test
TOL = 1e-4
FLR = 1e-20
def projectFieldsAdjointTest(fdemType, comp):
print 'Adjoint %s formulation - %s' % (fdemType, comp)
m = np.log(np.ones(problem.mesh.nC)*0.01)
if True:
m = m + np.random.randn(problem.mesh.nC)*0.01*1e-1
u = problem.fields(m)
v = np.random.randn(1)#+np.random.randn(1)*1j
# print prb.PropMap.PropModel.nP
w = np.random.randn(m1d.nN)+np.random.randn(m1d.nN)*1j
vJw = v.dot(rx.projectFieldsDeriv(src,m1d,f0,w))
wJtv = w.dot(rx.projectFieldsDeriv(src,m1d,f0,v,adjoint=True)).real
tol = np.max([TOL*(10**int(np.log10(np.abs(vJw)))),FLR])
print vJw, wJtv, vJw - wJtv, tol, np.abs(vJw - wJtv) < tol
return np.abs(vJw - wJtv) < tol
projectFieldsAdjointTest('e','projectFieldsDeriv')
# Run a test
TOL = 1e-4
FLR = 1e-20
def getADeriv_mAdjointTest():
print 'Adjoint test e formulation - getADeriv_m'
m = np.log(np.ones(problem.mesh.nC)*0.01)
if True:
m = m + np.random.randn(problem.mesh.nC)*0.01*1e-1
u = problem.fields(m)
v = np.random.randn(m1d.nN)#+np.random.randn(1)*1j
# print prb.PropMap.PropModel.nP
w = np.random.randn(m1d.nC)#+np.random.randn(m1d.nN)*1j
vJw = v.dot(problem.getADeriv_m(freq,f0,w))
wJtv = w.dot(problem.getADeriv_m(freq,f0,v,adjoint=True))
tol = np.max([TOL*(10**int(np.log10(np.abs(vJw)))),FLR])
print vJw, wJtv, vJw - wJtv, tol, np.abs(vJw - wJtv) < tol
return np.abs(vJw - wJtv) < tol
getADeriv_mAdjointTest()
%debug
# Run a test
TOL = 1e-4
FLR = 1e-20
def getRHSDeriv_mAdjointTest():
print 'Adjoint test e formulation - getRHSDeriv_m'
m = np.log(np.ones(problem.mesh.nC)*0.01)
if True:
m = m + np.random.randn(problem.mesh.nC)*0.01*1e-1
u = problem.fields(m)
v = np.random.randn(m1d.nN)#+np.random.randn(1)*1j
# print prb.PropMap.PropModel.nP
w = np.random.randn(m1d.nC)#+np.random.randn(m1d.nN)*1j
vJw = v.dot(problem.getRHSDeriv_m(freq,w))
wJtv = w.dot(problem.getRHSDeriv_m(freq,v,adjoint=True))
tol = np.max([TOL*(10**int(np.log10(np.abs(vJw)))),FLR])
print vJw, wJtv, vJw - wJtv, tol, np.abs(vJw - wJtv) < tol
return np.abs(vJw - wJtv) < tol
getRHSDeriv_mAdjointTest( )
simpeg.mkvc(np.random.randn(survey.nD)+np.random.randn(survey.nD)*1j,2)
print survey.nD
TOL = 1e-4
FLR = 1e-20
def JvecAdjointTest():
print 'Adjoint e formulation - Jvec'
m = np.log(np.ones(problem.mesh.nC)*0.01)
if True:
m = m + np.random.randn(problem.mesh.nC)*0.01*1e-1
u = problem.fields(m)
v = np.random.rand(survey.nD)
# print prb.PropMap.PropModel.nP
w = np.random.rand(problem.mesh.nC)
vJw = v.dot(problem.Jvec(m, w, u))
wJtv = w.dot(problem.Jtvec(m, v, u))
tol = np.max([TOL*(10**int(np.log10(np.abs(vJw)))),FLR])
print vJw, wJtv, vJw - wJtv, tol, np.abs(vJw - wJtv) < tol
return np.abs(vJw - wJtv) < tol
JvecAdjointTest()
"""
Explanation: We have
\begin{align}
Jvec =&
End of explanation
"""
|
aschaffn/phys202-2015-work | assignments/assignment03/NumpyEx01.ipynb | mit | import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import antipackage
import github.ellisonbg.misc.vizarray as va
"""
Explanation: Numpy Exercise 1
Imports
End of explanation
"""
# there's got to be a more efficient way using some sort
# of list comprehension
def checkerboard(size):
cb = np.ones((size,size), dtype = float)
for i in range(size):
for j in range(size):
if(i+j) % 2 == 1:
cb[i,j] = 0.0
return cb
checkerboard(4)
a = checkerboard(4)
assert a[0,0]==1.0
assert a.sum()==8.0
assert a.dtype==np.dtype(float)
assert np.all(a[0,0:5:2]==1.0)
assert np.all(a[1,0:5:2]==0.0)
b = checkerboard(5)
assert b[0,0]==1.0
assert b.sum()==13.0
assert np.all(b.ravel()[0:26:2]==1.0)
assert np.all(b.ravel()[1:25:2]==0.0)
"""
Explanation: Checkerboard
Write a Python function that creates a square (size,size) 2d Numpy array with the values 0.0 and 1.0:
Your function should work for both odd and even size.
The 0,0 element should be 1.0.
The dtype should be float.
End of explanation
"""
va.set_block_size(10)
va.enable()
checkerboard(20)
assert True
"""
Explanation: Use vizarray to visualize a checkerboard of size=20 with a block size of 10px.
End of explanation
"""
va.set_block_size(5)
va.enable()
checkerboard(27)
assert True
"""
Explanation: Use vizarray to visualize a checkerboard of size=27 with a block size of 5px.
End of explanation
"""
|
spencer2211/deep-learning | sentiment-rnn/Sentiment_RNN_Solution.ipynb | mit | import numpy as np
import tensorflow as tf
with open('../sentiment-network/reviews.txt', 'r') as f:
reviews = f.read()
with open('../sentiment-network/labels.txt', 'r') as f:
labels = f.read()
reviews[:2000]
"""
Explanation: Sentiment Analysis with an RNN
In this notebook, you'll implement a recurrent neural network that performs sentiment analysis. Using an RNN rather than a feedfoward network is more accurate since we can include information about the sequence of words. Here we'll use a dataset of movie reviews, accompanied by labels.
The architecture for this network is shown below.
<img src="assets/network_diagram.png" width=400px>
Here, we'll pass in words to an embedding layer. We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the word2vec lesson. You can actually train up an embedding with word2vec and use it here. But it's good enough to just have an embedding layer and let the network learn the embedding table on it's own.
From the embedding layer, the new representations will be passed to LSTM cells. These will add recurrent connections to the network so we can include information about the sequence of words in the data. Finally, the LSTM cells will go to a sigmoid output layer here. We're using the sigmoid because we're trying to predict if this text has positive or negative sentiment. The output layer will just be a single unit then, with a sigmoid activation function.
We don't care about the sigmoid outputs except for the very last one, we can ignore the rest. We'll calculate the cost from the output of the last step and the training label.
End of explanation
"""
from string import punctuation
all_text = ''.join([c for c in reviews if c not in punctuation])
reviews = all_text.split('\n')
all_text = ' '.join(reviews)
words = all_text.split()
all_text[:2000]
words[:100]
"""
Explanation: Data preprocessing
The first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit.
You can see an example of the reviews data above. We'll want to get rid of those periods. Also, you might notice that the reviews are delimited with newlines \n. To deal with those, I'm going to split the text into each review using \n as the delimiter. Then I can combined all the reviews back together into one big string.
First, let's remove all punctuation. Then get all the text without the newlines and split it into individual words.
End of explanation
"""
from collections import Counter
# Create your dictionary that maps vocab words to integers here
counts = Counter(words)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}
# Convert the reviews to integers, same shape as reviews list, but with integers
reviews_ints = []
for each in reviews:
reviews_ints.append([vocab_to_int[word] for word in each.split()])
"""
Explanation: Encoding the words
The embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network.
Exercise: Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers start at 1, not 0.
Also, convert the reviews to integers and store the reviews in a new list called reviews_ints.
End of explanation
"""
labels = labels.split('\n')
labels = np.array([1 if each == 'positive' else 0 for each in labels])
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
"""
Explanation: Encoding the labels
Our labels are "positive" or "negative". To use these labels in our network, we need to convert them to 0 and 1.
Exercise: Convert labels from positive and negative to 1 and 0, respectively.
End of explanation
"""
non_zero_idx = [ii for ii, review in enumerate(reviews_ints) if len(review) != 0]
len(non_zero_idx)
reviews_ints[-1]
"""
Explanation: Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters.
Exercise: First, remove the review with zero length from the reviews_ints list.
End of explanation
"""
reviews_ints = [reviews_ints[ii] for ii in non_zero_idx]
labels = np.array([labels[ii] for ii in non_zero_idx])
"""
Explanation: Turns out its the final review that has zero length. But that might not always be the case, so let's make it more general.
End of explanation
"""
seq_len = 200
features = np.zeros((len(reviews_ints), seq_len), dtype=int)
for i, row in enumerate(reviews_ints):
features[i, -len(row):] = np.array(row)[:seq_len]
features[:10,:100]
"""
Explanation: Exercise: Now, create an array features that contains the data we'll pass to the network. The data should come from review_ints, since we want to feed integers to the network. Each row should be 200 elements long. For reviews shorter than 200 words, left pad with 0s. That is, if the review is ['best', 'movie', 'ever'], [117, 18, 128] as integers, the row will look like [0, 0, 0, ..., 0, 117, 18, 128]. For reviews longer than 200, use on the first 200 words as the feature vector.
This isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data.
End of explanation
"""
split_frac = 0.8
split_idx = int(len(features)*0.8)
train_x, val_x = features[:split_idx], features[split_idx:]
train_y, val_y = labels[:split_idx], labels[split_idx:]
test_idx = int(len(val_x)*0.5)
val_x, test_x = val_x[:test_idx], val_x[test_idx:]
val_y, test_y = val_y[:test_idx], val_y[test_idx:]
print("\t\t\tFeature Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nValidation set: \t{}".format(val_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
"""
Explanation: Training, Validation, Test
With our data in nice shape, we'll split it into training, validation, and test sets.
Exercise: Create the training, validation, and test sets here. You'll need to create sets for the features and the labels, train_x and train_y for example. Define a split fraction, split_frac as the fraction of data to keep in the training set. Usually this is set to 0.8 or 0.9. The rest of the data will be split in half to create the validation and testing data.
End of explanation
"""
lstm_size = 256
lstm_layers = 1
batch_size = 500
learning_rate = 0.001
"""
Explanation: With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like:
Feature Shapes:
Train set: (20000, 200)
Validation set: (2500, 200)
Test set: (2500, 200)
Build the graph
Here, we'll build the graph. First up, defining the hyperparameters.
lstm_size: Number of units in the hidden layers in the LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc.
lstm_layers: Number of LSTM layers in the network. I'd start with 1, then add more if I'm underfitting.
batch_size: The number of reviews to feed the network in one training pass. Typically this should be set as high as you can go without running out of memory.
learning_rate: Learning rate
End of explanation
"""
n_words = len(vocab_to_int)
# Create the graph object
graph = tf.Graph()
# Add nodes to the graph
with graph.as_default():
inputs_ = tf.placeholder(tf.int32, [None, None], name='inputs')
labels_ = tf.placeholder(tf.int32, [None, None], name='labels')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
"""
Explanation: For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability.
Exercise: Create the inputs_, labels_, and drop out keep_prob placeholders using tf.placeholder. labels_ needs to be two-dimensional to work with some functions later. Since keep_prob is a scalar (a 0-dimensional tensor), you shouldn't provide a size to tf.placeholder.
End of explanation
"""
# Size of the embedding vectors (number of units in the embedding layer)
embed_size = 300
with graph.as_default():
embedding = tf.Variable(tf.random_uniform((n_words, embed_size), -1, 1))
embed = tf.nn.embedding_lookup(embedding, inputs_)
"""
Explanation: Embedding
Now we'll add an embedding layer. We need to do this because there are 74000 words in our vocabulary. It is massively inefficient to one-hot encode our classes here. You should remember dealing with this problem from the word2vec lesson. Instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using word2vec, then load it here. But, it's fine to just make a new layer and let the network learn the weights.
Exercise: Create the embedding lookup matrix as a tf.Variable. Use that embedding matrix to get the embedded vectors to pass to the LSTM cell with tf.nn.embedding_lookup. This function takes the embedding matrix and an input tensor, such as the review vectors. Then, it'll return another tensor with the embedded vectors. So, if the embedding layer as 200 units, the function will return a tensor with size [batch_size, 200].
End of explanation
"""
with graph.as_default():
# Your basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
# Getting an initial state of all zeros
initial_state = cell.zero_state(batch_size, tf.float32)
"""
Explanation: LSTM cell
<img src="assets/network_diagram.png" width=400px>
Next, we'll create our LSTM cells to use in the recurrent network (TensorFlow documentation). Here we are just defining what the cells look like. This isn't actually building the graph, just defining the type of cells we want in our graph.
To create a basic LSTM cell for the graph, you'll want to use tf.contrib.rnn.BasicLSTMCell. Looking at the function documentation:
tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=<function tanh at 0x109f1ef28>)
you can see it takes a parameter called num_units, the number of units in the cell, called lstm_size in this code. So then, you can write something like
lstm = tf.contrib.rnn.BasicLSTMCell(num_units)
to create an LSTM cell with num_units. Next, you can add dropout to the cell with tf.contrib.rnn.DropoutWrapper. This just wraps the cell in another cell, but with dropout added to the inputs and/or outputs. It's a really convenient way to make your network better with almost no effort! So you'd do something like
drop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
Most of the time, you're network will have better performance with more layers. That's sort of the magic of deep learning, adding more layers allows the network to learn really complex relationships. Again, there is a simple way to create multiple layers of LSTM cells with tf.contrib.rnn.MultiRNNCell:
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
Here, [drop] * lstm_layers creates a list of cells (drop) that is lstm_layers long. The MultiRNNCell wrapper builds this into multiple layers of RNN cells, one for each cell in the list.
So the final cell you're using in the network is actually multiple (or just one) LSTM cells with dropout. But it all works the same from an achitectural viewpoint, just a more complicated graph in the cell.
Exercise: Below, use tf.contrib.rnn.BasicLSTMCell to create an LSTM cell. Then, add drop out to it with tf.contrib.rnn.DropoutWrapper. Finally, create multiple LSTM layers with tf.contrib.rnn.MultiRNNCell.
Here is a tutorial on building RNNs that will help you out.
End of explanation
"""
with graph.as_default():
outputs, final_state = tf.nn.dynamic_rnn(cell, embed,
initial_state=initial_state)
"""
Explanation: RNN forward pass
<img src="assets/network_diagram.png" width=400px>
Now we need to actually run the data through the RNN nodes. You can use tf.nn.dynamic_rnn to do this. You'd pass in the RNN cell you created (our multiple layered LSTM cell for instance), and the inputs to the network.
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state)
Above I created an initial state, initial_state, to pass to the RNN. This is the cell state that is passed between the hidden layers in successive time steps. tf.nn.dynamic_rnn takes care of most of the work for us. We pass in our cell and the input to the cell, then it does the unrolling and everything else for us. It returns outputs for each time step and the final_state of the hidden layer.
Exercise: Use tf.nn.dynamic_rnn to add the forward pass through the RNN. Remember that we're actually passing in vectors from the embedding layer, embed.
End of explanation
"""
with graph.as_default():
predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
cost = tf.losses.mean_squared_error(labels_, predictions)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
"""
Explanation: Output
We only care about the final output, we'll be using that as our sentiment prediction. So we need to grab the last output with outputs[:, -1], the calculate the cost from that and labels_.
End of explanation
"""
with graph.as_default():
correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
"""
Explanation: Validation accuracy
Here we can add a few nodes to calculate the accuracy which we'll use in the validation pass.
End of explanation
"""
def get_batches(x, y, batch_size=100):
n_batches = len(x)//batch_size
x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
for ii in range(0, len(x), batch_size):
yield x[ii:ii+batch_size], y[ii:ii+batch_size]
"""
Explanation: Batching
This is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the x and y arrays and returns slices out of those arrays with size [batch_size].
End of explanation
"""
epochs = 10
with graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
iteration = 1
for e in range(epochs):
state = sess.run(initial_state)
for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 0.5,
initial_state: state}
loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
if iteration%5==0:
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Train loss: {:.3f}".format(loss))
if iteration%25==0:
val_acc = []
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
for x, y in get_batches(val_x, val_y, batch_size):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: val_state}
batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)
val_acc.append(batch_acc)
print("Val acc: {:.3f}".format(np.mean(val_acc)))
iteration +=1
saver.save(sess, "checkpoints/sentiment.ckpt")
"""
Explanation: Training
Below is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the checkpoints directory exists.
End of explanation
"""
test_acc = []
with tf.Session(graph=graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
test_state = sess.run(cell.zero_state(batch_size, tf.float32))
for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: test_state}
batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)
test_acc.append(batch_acc)
print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
"""
Explanation: Testing
End of explanation
"""
|
phoebe-project/phoebe2-docs | 2.2/tutorials/ETV.ipynb | gpl-3.0 | !pip install -I "phoebe>=2.1,<2.2"
"""
Explanation: ETV Datasets and Options
Setup
Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
End of explanation
"""
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
"""
Explanation: As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details.
End of explanation
"""
ps, constraints = phoebe.dataset.etv(component='mycomponent')
print ps
"""
Explanation: Dataset Parameters
Let's create the ParameterSet which would be added to the Bundle when calling add_dataset. Later we'll call add_dataset, which will create and attach this ParameterSet for us.
End of explanation
"""
print ps['Ns']
"""
Explanation: Currently, none of the available etv methods actually compute fluxes. But if one is added that computes a light-curve and actually finds the time of mid-eclipse, then the passband-dependend parameters will be added here.
For information on these passband-dependent parameters, see the section on the lc dataset
Ns
End of explanation
"""
print ps['time_ephems']
"""
Explanation: time_ephems
NOTE: this parameter will be constrained when added through add_dataset
End of explanation
"""
print ps['time_ecls']
"""
Explanation: time_ecls
End of explanation
"""
print ps['etvs']
"""
Explanation: etvs
NOTE: this parameter will be constrained when added through add_dataset
End of explanation
"""
print ps['sigmas']
"""
Explanation: sigmas
End of explanation
"""
ps_compute = phoebe.compute.phoebe()
print ps_compute
"""
Explanation: Compute Options
Let's look at the compute options (for the default PHOEBE 2 backend) that relate to the ETV dataset.
Other compute options are covered elsewhere:
* parameters related to dynamics are explained in the section on the orb dataset
End of explanation
"""
print ps_compute['etv_method']
"""
Explanation: etv_method
End of explanation
"""
print ps_compute['etv_tol']
"""
Explanation: etv_tol
End of explanation
"""
b.add_dataset('etv', Ns=np.linspace(0,10,11), dataset='etv01')
b.add_compute()
b.run_compute()
b['etv@model'].twigs
print b['time_ephems@primary@etv@model']
print b['time_ecls@primary@etv@model']
print b['etvs@primary@etv@model']
"""
Explanation: Synthetics
End of explanation
"""
axs, artists = b['etv@model'].plot()
"""
Explanation: Plotting
By default, ETV datasets plot as etv vs time_ephem. Of course, a simple binary with no companion or apsidal motion won't show much of a signal (this is essentially flat with some noise). To see more ETV examples see:
Apsidal Motion
Minimial Hierarchical Triple
LTTE ETVs in a Hierarchical Triple
End of explanation
"""
axs, artists = b['etv@model'].plot(x='time_ecls', y=2)
"""
Explanation: Alternatively, especially when overplotting with a light curve, its sometimes handy to just plot ticks at each of the eclipse times. This can easily be done by passing a single value for 'y'.
For other examples with light curves as well see:
* Apsidal Motion
* LTTE ETVs in a Hierarchical Triple
End of explanation
"""
|
teuben/astr288p | notebooks/05-images.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# import pyfits as fits # deprecated
from astropy.io import fits
"""
Explanation: Images: rows, columns and all that jazzy mess....
Two dimensional data arrays are normally stored in column-major or row-major order. In row-major order adjacent elements in a row are stored next to each other in memory. In column-major order adjacent elements in a column are stored next to each other in memory. See also https://en.wikipedia.org/wiki/Matrix_representation
For the usual mathematical matrix notation $A_{ij}$, where $i$ is the row, and $j$ the column, we have in the case of a $3x4$ matrix:
$$
A = \begin{bmatrix}
a_{11} & a_{12} & a_{13} & a_{14}\
a_{21} & a_{22} & a_{23} & a_{24}\
a_{31} & a_{32} & a_{33} & a_{34}\
\end{bmatrix}
$$
Classic languages such as Fortran store their arrays in so-called column-major order. FDATA(NR,NC), and indices started at 1 with the first versions.
More modern language, such a C, store their arrays in row-major order, CDATA[NR][NC], with indices starting at 0.
col major: fdata(1,1), fdata(2,1), ... first index runs fastest
row major: cdata[0][0], cdata[0][1], ... last index runs fastest
Examples of column major are: Fortran, [FITS], MatLab, IDL, R, Julia
Examples of row major are: C, Python, (java)
Images are often referred to in X and Y coordinates, like a mathematical system. The origin would be at (0,0) in the lower left corner. Image processing software normally puts the (0,0) origin at the top left corner, which corresponds a bit how the matrix above is printed. This, together with row-major and column-major can make it challenging to interchange data and plot them on the screen.
Add to this that for very large data, re-ordering axes can be a very expensive operation.
See also https://en.wikipedia.org/wiki/Iliffe_vector for another view on storing data in multi-dimensional arrays.
End of explanation
"""
hdu = fits.open('../data/cube432.fits')
print(len(hdu))
h = hdu[0].header
d = hdu[0].data
print(d.shape)
print(d)
"""
Explanation: Get the Header-Data-Units (hdu's) from a fits file. This particular one only has 1.
End of explanation
"""
d1 = np.zeros(2*3*4).reshape(2,3,4)
for z in range(2):
for y in range(3):
for x in range(4):
d1[z,y,x] = x + 10*y + 100*z
print(d1)
print(d1.flatten())
# are two arrays the same (or close enough?)
np.allclose(d,d1)
"""
Explanation: This 4x3x2 matrix can actually also be generated from scratch using basic numpy:
End of explanation
"""
p0 = d[0,:,:]
p1 = d[1,:,:]
print(np.flipud(p0))
plt.imshow(p0)
plt.colorbar()
plt.matshow(p0,origin='lower')
"""
Explanation: We now want to take a plane from this cube, and plot this in a heatmap or contour map. We are now faced deciding how columns and rows translate to X and Y on a plot. Math, Astronomy, Geography and Image Processing groups all differ a bit how they prefer to see this, so numpy comes with a number of function to help you with this:
np.reshape
np.transpose (or T)
np.flipud
np.fliprd
np.rot90
np.swapaxes
np.moveaxis
the important thing to realize is that they all give a new view of the array, which often is more efficient as moving the actual values.
End of explanation
"""
plt.imshow(p0,interpolation='none')
plt.colorbar()
"""
Explanation: Note that for a small 4x3 matrix this image has been artificially made smooth by interpolating in imshow(); however you can already see that the integer coordinates are at the center of a cell: (0.0) is the center of the lower left cell. This is a little more obvious when you turn off interpolation:
End of explanation
"""
print(np.flipud(p0))
plt.imshow(np.flipud(p0),interpolation='none')
plt.colorbar()
"""
Explanation: if you want to print the array values on the terminal with 0 at the bottom left, use the np.flipup() function:
End of explanation
"""
d2 = np.arange(3*4).reshape(3,4,order='C')
d3 = np.arange(3*4).reshape(3,4,order='F')
print('C\n',d2)
print('F\n',d3)
d3.transpose()
"""
Explanation: Arrays in numpy are in C-order (row-major) by default, but you can actually change it to Fortran-order (column-major):
End of explanation
"""
try:
import casacore.images.image as image
print("we have casacore")
im = image('../data/cube432.fits')
print(im.shape()) # -> [2, 3, 4]
print(im.datatype()) # -> 'float'
d=im.getdata()
m=im.getmask()
print(d.shape) # -> (2,3,4)
print(d[0,0,0],m[0,0,0])
"""
[[[[ 0. 1. 2. 3.]
[ 10. 11. 12. 13.]
[ 20. 21. 22. 23.]]
[[ 100. 101. 102. 103.]
[ 110. 111. 112. 113.]
[ 120. 121. 122. 123.]]
"""
except:
print("no casacore")
import numpy.ma as ma
a = np.arange(4)
am = ma.masked_equal(a,2)
print(a.sum(),am.sum())
print(am.data,am.mask)
"""
Explanation: CASA
CASA is a python package used in radio astronomy (ALMA, VLA etc.), but is peculiar in the sense that it caters to astronomers with a fortran background, or mathematicians with a DATA(x,y) expectation: CASA uses column-major arrays with an index starting at 0. CASA images can also store a mask alongside the data, but the logic is the reverse from the masking used in numpy.ma: in CASA a True means a good data point, in numpy it means a bad point!
Notebooks don't work within casa (yet), but if you install casacore in your local python, the examples below should work. The kernsuite software should give you one easy option to install casacore, another way is to compile the code directly from https://github.com/casacore/casacore
Hence the example here is shown inline, and not in the notebook form yet. (note CASA currently uses python2)
```
casa
ia.open('../data/cube432.fits')
d1 = ia.getchunk()
d1.shape
(4,3,2)
d1[3,2,1]
123.0
print d1
[[[ 0. 100.]
[ 10. 110.]
[ 20. 120.]]
[[ 1. 101.]
[ 11. 111.]
[ 21. 121.]]
[[ 2. 102.]
[ 12. 112.]
[ 22. 122.]]
[[ 3. 103.]
[ 13. 113.]
[ 23. 123.]]]
p0 = d1[:,:,0]
print p0
[[ 0. 10. 20.]
[ 1. 11. 21.]
[ 2. 12. 22.]
[ 3. 13. 23.]]
print np.flipud(np.rot90(p0))
[[ 0. 1. 2. 3.]
[ 10. 11. 12. 13.]
[ 20. 21. 22. 23.]]
print np.flipud(np.rot90(p0)).flatten()
[ 0. 1. 2. 3. 10. 11. 12. 13. 20. 21. 22. 23.]
mask boolean in CASA is the opposite of the one in numpy.ma
d1m = ia.getchunk(getmask=True)
print d1[0,0,0],d1m[0,0,0]
0.0 True
or create the array from scratch
ia.fromshape(shape=[4,3,2])
p2 = ia.getchunk()
p2.shape
(4,3,2)
etc.etc.
```
casacore and casacore-python
Using just casacore, you will find the equivalent getchunk() is now called getdata() and converts to a proper numpy array without the need for np.rot90() and np.flipud(). The casacore-python version is able to work in python3 as well.
End of explanation
"""
%%time
n = 100
n1 = n
n2 = n+1
n3 = n+2
np.random.seed(123)
a = np.random.normal(size=n1*n2*n3).reshape(n1,n2,n3)
print(len(a.flatten()))
print(a[0,0,0])
a.flatten()[0]=-1
print(a[0,0,0]) # how come?
%%time
b = a.transpose()
# note B is another view of A
"""
Explanation: Arrray Transposing
End of explanation
"""
%%time
n = 2
m = n+1
np.random.seed(123)
a = np.random.normal(size=m*n).reshape(m,n)
x = np.random.normal(size=n)
print(x[0])
#
#a = np.arange(n*n).reshape(n,n)
#x = np.arange(n)
%%time
b = np.matmul(a,x)
print(a.shape,x.shape,b.shape)
%%time
b1 = np.zeros(m)
for i in range(m):
for j in range(n):
b1[i] = b1[i] + a[i,j]*x[j]
%%time
b2 = np.zeros(m)
for i in range(m):
ai = a[i,:]
b2[i] = np.inner(ai,x)
%%time
b3 = np.zeros(m)
for j in range(n):
for i in range(m):
b3[i] = b3[i] + a[i,j]*x[j]
if n < 3:
print('a',a,'\nx',x)
print('b',b,'\nb1',b1,'\nb2',b2,'\nb3',b3)
else:
print(n)
"""
Explanation: Inner and Outer loop order of execution
Set up a (random) square matrix and vector. Multiply the matrix with a vector and measure the performance difference if you order the loops differently.
End of explanation
"""
from numpy.linalg import inv
n = 2
a1 = np.random.normal(size=n*n).reshape(n,n)
%%time
ainv = inv(a1)
print(a1)
print(ainv)
i1=np.matmul(a1,ainv)
i0=np.eye(n)
print(np.allclose(i0,i1,atol=1e-10))
print(i1)
"""
Explanation: Matrix Inversion
End of explanation
"""
|
tuanavu/coursera-university-of-washington | machine_learning/4_clustering_and_retrieval/assigment/week6/.ipynb_checkpoints/6_hierarchical_clustering_graphlab-checkpoint.ipynb | mit | import graphlab
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
import time
from scipy.sparse import csr_matrix
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances
%matplotlib inline
'''Check GraphLab Create version'''
from distutils.version import StrictVersion
assert (StrictVersion(graphlab.version) >= StrictVersion('1.8.5')), 'GraphLab Create must be version 1.8.5 or later.'
"""
Explanation: Hierarchical Clustering
Hierarchical clustering refers to a class of clustering methods that seek to build a hierarchy of clusters, in which some clusters contain others. In this assignment, we will explore a top-down approach, recursively bipartitioning the data using k-means.
Note to Amazon EC2 users: To conserve memory, make sure to stop all the other notebooks before running this notebook.
Import packages
The following code block will check if you have the correct version of GraphLab Create. Any version later than 1.8.5 will do. To upgrade, read this page.
End of explanation
"""
wiki = graphlab.SFrame('people_wiki.gl/')
"""
Explanation: Load the Wikipedia dataset
End of explanation
"""
wiki['tf_idf'] = graphlab.text_analytics.tf_idf(wiki['text'])
"""
Explanation: As we did in previous assignments, let's extract the TF-IDF features:
End of explanation
"""
from em_utilities import sframe_to_scipy # converter
# This will take about a minute or two.
tf_idf, map_index_to_word = sframe_to_scipy(wiki, 'tf_idf')
"""
Explanation: To run k-means on this dataset, we should convert the data matrix into a sparse matrix.
End of explanation
"""
from sklearn.preprocessing import normalize
tf_idf = normalize(tf_idf)
"""
Explanation: To be consistent with the k-means assignment, let's normalize all vectors to have unit norm.
End of explanation
"""
def bipartition(cluster, maxiter=400, num_runs=4, seed=None):
'''cluster: should be a dictionary containing the following keys
* dataframe: original dataframe
* matrix: same data, in matrix format
* centroid: centroid for this particular cluster'''
data_matrix = cluster['matrix']
dataframe = cluster['dataframe']
# Run k-means on the data matrix with k=2. We use scikit-learn here to simplify workflow.
kmeans_model = KMeans(n_clusters=2, max_iter=maxiter, n_init=num_runs, random_state=seed, n_jobs=-1)
kmeans_model.fit(data_matrix)
centroids, cluster_assignment = kmeans_model.cluster_centers_, kmeans_model.labels_
# Divide the data matrix into two parts using the cluster assignments.
data_matrix_left_child, data_matrix_right_child = data_matrix[cluster_assignment==0], \
data_matrix[cluster_assignment==1]
# Divide the dataframe into two parts, again using the cluster assignments.
cluster_assignment_sa = graphlab.SArray(cluster_assignment) # minor format conversion
dataframe_left_child, dataframe_right_child = dataframe[cluster_assignment_sa==0], \
dataframe[cluster_assignment_sa==1]
# Package relevant variables for the child clusters
cluster_left_child = {'matrix': data_matrix_left_child,
'dataframe': dataframe_left_child,
'centroid': centroids[0]}
cluster_right_child = {'matrix': data_matrix_right_child,
'dataframe': dataframe_right_child,
'centroid': centroids[1]}
return (cluster_left_child, cluster_right_child)
"""
Explanation: Bipartition the Wikipedia dataset using k-means
Recall our workflow for clustering text data with k-means:
Load the dataframe containing a dataset, such as the Wikipedia text dataset.
Extract the data matrix from the dataframe.
Run k-means on the data matrix with some value of k.
Visualize the clustering results using the centroids, cluster assignments, and the original dataframe. We keep the original dataframe around because the data matrix does not keep auxiliary information (in the case of the text dataset, the title of each article).
Let us modify the workflow to perform bipartitioning:
Load the dataframe containing a dataset, such as the Wikipedia text dataset.
Extract the data matrix from the dataframe.
Run k-means on the data matrix with k=2.
Divide the data matrix into two parts using the cluster assignments.
Divide the dataframe into two parts, again using the cluster assignments. This step is necessary to allow for visualization.
Visualize the bipartition of data.
We'd like to be able to repeat Steps 3-6 multiple times to produce a hierarchy of clusters such as the following:
(root)
|
+------------+-------------+
| |
Cluster Cluster
+------+-----+ +------+-----+
| | | |
Cluster Cluster Cluster Cluster
Each parent cluster is bipartitioned to produce two child clusters. At the very top is the root cluster, which consists of the entire dataset.
Now we write a wrapper function to bipartition a given cluster using k-means. There are three variables that together comprise the cluster:
dataframe: a subset of the original dataframe that correspond to member rows of the cluster
matrix: same set of rows, stored in sparse matrix format
centroid: the centroid of the cluster (not applicable for the root cluster)
Rather than passing around the three variables separately, we package them into a Python dictionary. The wrapper function takes a single dictionary (representing a parent cluster) and returns two dictionaries (representing the child clusters).
End of explanation
"""
wiki_data = {'matrix': tf_idf, 'dataframe': wiki} # no 'centroid' for the root cluster
left_child, right_child = bipartition(wiki_data, maxiter=100, num_runs=8, seed=1)
"""
Explanation: The following cell performs bipartitioning of the Wikipedia dataset. Allow 20-60 seconds to finish.
Note. For the purpose of the assignment, we set an explicit seed (seed=1) to produce identical outputs for every run. In pratical applications, you might want to use different random seeds for all runs.
End of explanation
"""
left_child
"""
Explanation: Let's examine the contents of one of the two clusters, which we call the left_child, referring to the tree visualization above.
End of explanation
"""
right_child
"""
Explanation: And here is the content of the other cluster we named right_child.
End of explanation
"""
def display_single_tf_idf_cluster(cluster, map_index_to_word):
'''map_index_to_word: SFrame specifying the mapping betweeen words and column indices'''
wiki_subset = cluster['dataframe']
tf_idf_subset = cluster['matrix']
centroid = cluster['centroid']
# Print top 5 words with largest TF-IDF weights in the cluster
idx = centroid.argsort()[::-1]
for i in xrange(5):
print('{0:s}:{1:.3f}'.format(map_index_to_word['category'][idx[i]], centroid[idx[i]])),
print('')
# Compute distances from the centroid to all data points in the cluster.
distances = pairwise_distances(tf_idf_subset, [centroid], metric='euclidean').flatten()
# compute nearest neighbors of the centroid within the cluster.
nearest_neighbors = distances.argsort()
# For 8 nearest neighbors, print the title as well as first 180 characters of text.
# Wrap the text at 80-character mark.
for i in xrange(8):
text = ' '.join(wiki_subset[nearest_neighbors[i]]['text'].split(None, 25)[0:25])
print('* {0:50s} {1:.5f}\n {2:s}\n {3:s}'.format(wiki_subset[nearest_neighbors[i]]['name'],
distances[nearest_neighbors[i]], text[:90], text[90:180] if len(text) > 90 else ''))
print('')
"""
Explanation: Visualize the bipartition
We provide you with a modified version of the visualization function from the k-means assignment. For each cluster, we print the top 5 words with highest TF-IDF weights in the centroid and display excerpts for the 8 nearest neighbors of the centroid.
End of explanation
"""
display_single_tf_idf_cluster(left_child, map_index_to_word)
display_single_tf_idf_cluster(right_child, map_index_to_word)
"""
Explanation: Let's visualize the two child clusters:
End of explanation
"""
athletes = left_child
non_athletes = right_child
"""
Explanation: The left cluster consists of athletes, whereas the right cluster consists of non-athletes. So far, we have a single-level hierarchy consisting of two clusters, as follows:
Wikipedia
+
|
+--------------------------+--------------------+
| |
+ +
Athletes Non-athletes
Is this hierarchy good enough? When building a hierarchy of clusters, we must keep our particular application in mind. For instance, we might want to build a directory for Wikipedia articles. A good directory would let you quickly narrow down your search to a small set of related articles. The categories of athletes and non-athletes are too general to facilitate efficient search. For this reason, we decide to build another level into our hierarchy of clusters with the goal of getting more specific cluster structure at the lower level. To that end, we subdivide both the athletes and non-athletes clusters.
Perform recursive bipartitioning
Cluster of athletes
To help identify the clusters we've built so far, let's give them easy-to-read aliases:
End of explanation
"""
# Bipartition the cluster of athletes
left_child_athletes, right_child_athletes = bipartition(athletes, maxiter=100, num_runs=8, seed=1)
"""
Explanation: Using the bipartition function, we produce two child clusters of the athlete cluster:
End of explanation
"""
display_single_tf_idf_cluster(left_child_athletes, map_index_to_word)
"""
Explanation: The left child cluster mainly consists of baseball players:
End of explanation
"""
display_single_tf_idf_cluster(right_child_athletes, map_index_to_word)
"""
Explanation: On the other hand, the right child cluster is a mix of football players and ice hockey players:
End of explanation
"""
baseball = left_child_athletes
ice_hockey_football = right_child_athletes
"""
Explanation: Note. Concerning use of "football"
The occurrences of the word "football" above refer to association football. This sports is also known as "soccer" in United States (to avoid confusion with American football). We will use "football" throughout when discussing topic representation.
Our hierarchy of clusters now looks like this:
Wikipedia
+
|
+--------------------------+--------------------+
| |
+ +
Athletes Non-athletes
+
|
+-----------+--------+
| |
| +
+ football/
baseball ice hockey
Should we keep subdividing the clusters? If so, which cluster should we subdivide? To answer this question, we again think about our application. Since we organize our directory by topics, it would be nice to have topics that are about as coarse as each other. For instance, if one cluster is about baseball, we expect some other clusters about football, basketball, volleyball, and so forth. That is, we would like to achieve similar level of granularity for all clusters.
Notice that the right child cluster is more coarse than the left child cluster. The right cluster posseses a greater variety of topics than the left (ice hockey/football vs. baseball). So the right child cluster should be subdivided further to produce finer child clusters.
Let's give the clusters aliases as well:
End of explanation
"""
# Bipartition the cluster of non-athletes
left_child_non_athletes, right_child_non_athletes = bipartition(non_athletes, maxiter=100, num_runs=8, seed=1)
display_single_tf_idf_cluster(left_child_non_athletes, map_index_to_word)
display_single_tf_idf_cluster(right_child_non_athletes, map_index_to_word)
"""
Explanation: Cluster of ice hockey players and football players
In answering the following quiz question, take a look at the topics represented in the top documents (those closest to the centroid), as well as the list of words with highest TF-IDF weights.
Quiz Question. Bipartition the cluster of ice hockey and football players. Which of the two child clusters should be futher subdivided?
Note. To achieve consistent results, use the arguments maxiter=100, num_runs=8, seed=1 when calling the bipartition function.
The left child cluster
The right child cluster
Caution. The granularity criteria is an imperfect heuristic and must be taken with a grain of salt. It takes a lot of manual intervention to obtain a good hierarchy of clusters.
If a cluster is highly mixed, the top articles and words may not convey the full picture of the cluster. Thus, we may be misled if we judge the purity of clusters solely by their top documents and words.
Many interesting topics are hidden somewhere inside the clusters but do not appear in the visualization. We may need to subdivide further to discover new topics. For instance, subdividing the ice_hockey_football cluster led to the appearance of golf.
Quiz Question. Which diagram best describes the hierarchy right after splitting the ice_hockey_football cluster? Refer to the quiz form for the diagrams.
Cluster of non-athletes
Now let us subdivide the cluster of non-athletes.
End of explanation
"""
scholars_politicians_etc = left_child_non_athletes
musicians_artists_etc = right_child_non_athletes
"""
Explanation: The first cluster consists of scholars, politicians, and government officials whereas the second consists of musicians, artists, and actors. Run the following code cell to make convenient aliases for the clusters.
End of explanation
"""
|
ryan-leung/PHYS4650_Python_Tutorial | notebooks/02-Python-Data-Structures.ipynb | bsd-3-clause | a = 1 # integer
b = 1.1 #floating point numbers
c = True; d = False # Boolean (logical expression)
e = "Hello" # Strings
"""
Explanation: Python Data Structures
Data structure in computing
Data structures are how computer programs store information. Theses information can be processed, analyzed
and visualized easily from the programme. Scientific data can be large and complex and may require data structures appropriate for scientific programming. In Astronomy, the fits file is one of the most widely used data-storing medium, it can store a lot of information including the coordinates, the precious time, a very large cataelog table, multi-dimension data cube, etc.. These data, when it is opened by the programme, shall be recognised and easily managed by the programme.
In Python, there are pre-defined advanced data structure depending on the kind of data you wish to store.
You will have to choose data structures that best meet your requirements for the problem you are trying to solve. In this section, I will go through specifically examine three Python data structures: datetime, lists, tuples, sets, and dictionaries.
<a href="https://colab.research.google.com/github/ryan-leung/PHYS4650_Python_Tutorial/blob/master/notebooks/02-Python-Data-Structures.ipynb"><img align="right" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory">
</a>
Built-In Types
Python's simple types are summarized in the following table:
| Type | Example | Description |
|-------------|----------------|--------------------------------------------------------------|
| int | x = 1 | integers (i.e., whole numbers) |
| float | x = 1.0 | floating-point numbers (i.e., real numbers) |
| complex | x = 1 + 2j | Complex numbers (i.e., numbers with real and imaginary part) |
| bool | x = True | Boolean: True/False values |
| str | x = 'abc' | String: characters or text |
| NoneType| x = None | Special object indicating nulls |
We'll take a quick look at each of these in turn.
End of explanation
"""
# addition, subtraction, multiplication
(4 + 8) * (6.5 - 3)
"""
Explanation: Arithmetic Operations
Python implements seven basic binary arithmetic operators, two of which can double as unary operators.
They are summarized in the following table:
| Operator | Name | Description |
|--------------|----------------|--------------------------------------------------------|
| a + b | Addition | Sum of a and b |
| a - b | Subtraction | Difference of a and b |
| a * b | Multiplication | Product of a and b |
| a / b | True division | Quotient of a and b |
| a // b | Floor division | Quotient of a and b, removing fractional parts |
| a % b | Modulus | Integer remainder after division of a by b |
| a ** b | Exponentiation | a raised to the power of b |
| -a | Negation | The negative of a |
| +a | Unary plus | a unchanged (rarely used) |
These operators can be used and combined in intuitive ways, using standard parentheses to group operations.
For example:
End of explanation
"""
x = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
"""
Explanation: Strings in Python 2 and 3
```Python
Python 2
print type("Hello World!")
<type 'str'>
this is a byte string
print type(u"Hello World!")
<type 'unicode'>
this is a Unicode string
```
```Python
Python 3
print(type("Hello World!"))
<class 'str'>
this is a Unicode string
print(type(b"Hello World!"))
<class 'bytes'>
this is a byte string
```
Built-In Data Structures
| Type Name | Example | Add Element | Get Element | Set Element | Description |
|-----------|---------------------------|--------------------------------------------|-------------|-------------|---------------------------------------|
| list | [1, 2, 3] | x.append(1) | x[0] | x[0]=2 | Ordered collection |
| tuple | (1, 2, 3) | no altering | x[0] | no altering | Immutable ordered collection |
| dict | {'a':1, 'b':2, 'c':3} | x['new_key'] = 4 or x.update({'new_key'=4} | x['a'] | x['a']=2 | Unordered (key,value) mapping |
| set | {1, 2, 3} | x.add(4) | no indexing | no indexing | Unordered collection of unique values |
list
A Python list is a sequence of values (elements) that are usually the same kind of item. They are in order and mutable. Mutable means they can be changed after they are created, of course, this implies you can exchange the order of the elements inside it. This is a Python list of prime numbers smaller than 50:
End of explanation
"""
print(x)
print(x[0])
"""
Explanation: Definition
It is defined with parentheses : [xx,xx,xx].
Get Element
The elements are called using a square bracket with an index starting from zero : x[y], 0..N.
Slice (sub-array)
You can slice the array using colon, in this case a[start:end] means items start up to end-1.
End of explanation
"""
print(x[1:2])
print(x[:])
print(x[:2])
print(x[1:])
"""
Explanation: A single colon a[:] means a copy of the whole array.
a[start:] return tuple of items start through the rest of the array.
a[:end]return tuple of items from the beginning through end-1.
End of explanation
"""
print(x[-1])
print(x[-2])
print(x[-2:])
print(x[:-2])
"""
Explanation: more interestingly, they have negative index
a[-1] means last item in the array
a[-2:] means last two items in the array
a[:-2] means everything except the last two items
End of explanation
"""
print(x[::-1])
"""
Explanation: You may reversed a list with xxx[::-1].
End of explanation
"""
print(x + [0,1])
print([0,1] + x)
print([0,1] * 5)
"""
Explanation: Concatenate
You may add up two list or we say concatenate, and multiply to duplicate the items.
End of explanation
"""
print(x[::-1])
y = sorted(x[::-1])
print(y)
"""
Explanation: Sorting
You may sort a list with sorted(x). Noted that it returns a new list.
End of explanation
"""
print(x)
x.append('A')
print(x)
print(x)
x.insert(5,'B') # insert 'B' between x[4] and x[5], results in x[5] = 'B'
print(x)
print(x);
x.pop(5); # Removed the x[5] item and return it
print(x);
x.pop(-1); # Removed the last item and return it
print(x)
"""
Explanation: Add element (append); Remove element (pop); Insert element (insert)
These functions are modified in-place, i.e. the original list will be changed
End of explanation
"""
corr = (22.28552, 114.15769)
print(corr)
corr[0] = 10
"""
Explanation: Tuple
A Python tuple is similar to a list. The elements are in order but fixed once they are created. In other words, they are immutable. The tuple can store differently type of elements.
Definition
It is defined with parentheses : (xx,xx,xx).
Get Element
The elements are called using a square bracket with an index starting from zero : x[y], 0..N.
Slice (sub-array)
You can slice the array using colon, in this case a[start:end] means items start up to end-1.
End of explanation
"""
# Creating an empty dictionary
location = {}
print(location)
# Defined with a curly bracket
location = {
'Berlin': (52.5170365, 13.3888599),
'London': (51.5073219, -0.1276474),
'Sydney': (-33.8548157, 151.2164539),
'Tokyo': (34.2255804, 139.294774527387),
'Paris': (48.8566101, 2.3514992),
'Moscow': (46.7323875, -117.0001651)
}
print(location)
# Update
location.update({'Hong Kong': (22.2793278, 114.1628131)})
print(location)
# Call element
location['Tokyo']
# Delete element
del location['Hong Kong']
location
for key, value in location.items():
print(key, value)
"""
Explanation: Dictionary
Dictionary is more flexible than list and its index is a string, it is defined with curly bracket:
data = {'k1' : y1 , 'k2' : y2 , 'k3' : y3 }
k1, k2, k3 are called keys while y1,y2 and y3 are elements.
Creating an empty dictionary
It is defined with a pair of curly bracket or the dict() fuction: data = {} or data = dict()
Creating a dictionary with initial values
It could be defined with a curly bracket with index:element pairs : data = {'k1' : y1 , 'k2' : y2 , 'k3' : y3 }.
It could also be defined with the dict() function : data = dict(k1=y1, k2=y2, k3=y3).
It could also be defined with tuples : data = {k: v for k, v in (('k1', y1),('k2',y2),('k3',y3))}.
Get Element
The elements are called using a square bracket with an index string : data[key].
Inserting/Updating a single value / multiple values
data['k1']=1 # Updates if 'k1' exists, else adds the element with index 'k1'
data.update({'k1':1})
data.update(dict(k1=1))
data.update(k1=1)
Multiple values : data.update({'k3':3,'k4':4}) # Updates 'k3' and adds 'k4'
Merged dictionary without modifying originals
data3 = {}
data3.update(data) # Modifies data3, not data
data3.update(data2) # Modifies data3, not data2
Delete an item
del data[key] # Removes specific element in a dictionary
data.pop(key) # Removes the key & returns the value
data.clear() # Clears entire dictionary
Check if a key is existed
key in data # Return a boolean
Iterate through pairs
for key in data: # Iterates just through the keys, ignoring the values
for key, value in d.items(): # Iterates through the pairs
End of explanation
"""
### More on slicing in list and tuple
start=2
end=5
step=2
print("Original:", x)
print("items start through end-1 :", x[start:end]) # items start through end-1
print("items start through the rest of the array :", x[start:]) # items start through the rest of the array
print("items from the beginning through end-1 :", x[:end]) # items from the beginning through end-1
print("whole array :", x[:]) # whole array
print("last item in the array :", x[-1]) # last item in the array
print("last two items in the array :", x[-2:]) # last two items in the array
print("everything except the last two items :", x[:-2]) # everything except the last two items
print("start through not past end, by step", x[start:end:step]) # start through no01-Python-Syntaxt past end, by step
"""
Explanation: Extra reading:
End of explanation
"""
|
mathemage/h2o-3 | examples/deeplearning/notebooks/deeplearning_image_reconstruction_and_clustering.ipynb | apache-2.0 | %matplotlib inline
import matplotlib
import numpy as np
import pandas as pd
import scipy.io
import matplotlib.pyplot as plt
from IPython.display import Image, display
import h2o
from h2o.estimators.deeplearning import H2OAutoEncoderEstimator
h2o.init()
"""
Explanation: Image Space Projection using Autoencoders
In this example we are going to autoencode the faces of the olivetti dataset and try to reconstruct them back.
End of explanation
"""
!wget -c http://www.cl.cam.ac.uk/Research/DTG/attarchive/pub/data/att_faces.tar.Z
!tar xzvf att_faces.tar.Z;rm att_faces.tar.Z;
"""
Explanation: http://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html
End of explanation
"""
import re
def read_pgm(filename, byteorder='>'):
"""Return image data from a raw PGM file as numpy array.
Format specification: http://netpbm.sourceforge.net/doc/pgm.html
"""
with open(filename, 'rb') as f:
buffer = f.read()
try:
header, width, height, maxval = re.search(
b"(^P5\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer).groups()
except AttributeError:
raise ValueError("Not a raw PGM file: '%s'" % filename)
return np.frombuffer(buffer,
dtype='u1' if int(maxval) < 256 else byteorder+'u2',
count=int(width)*int(height),
offset=len(header)
).reshape((int(height), int(width)))
image = read_pgm("orl_faces/s12/6.pgm", byteorder='<')
image.shape
plt.imshow(image, plt.cm.gray)
plt.show()
import glob
import os
from collections import defaultdict
images = glob.glob("orl_faces/**/*.pgm")
data = defaultdict(list)
image_data = []
for img in images:
_,label,_ = img.split(os.path.sep)
imgdata = read_pgm(img, byteorder='<').flatten().tolist()
data[label].append(imgdata)
image_data.append(imgdata)
"""
Explanation: We now need some code to read pgm files.
Thanks to StackOverflow we have some code to leverage:
End of explanation
"""
faces = h2o.H2OFrame(image_data)
faces.shape
from h2o.estimators.deeplearning import H2OAutoEncoderEstimator
model = H2OAutoEncoderEstimator(
activation="Tanh",
hidden=[50],
l1=1e-4,
epochs=10
)
model.train(x=faces.names, training_frame=faces)
model
"""
Explanation: Let's import it to H2O
End of explanation
"""
import pandas as pd
gaussian_noise = np.random.randn(10304)
plt.imshow(gaussian_noise.reshape(112, 92), plt.cm.gray);
"""
Explanation: Reconstructing the hidden space
Now that we have our model trained, we would like to understand better what is the internal representation of this model? What makes a face a .. face?
We will provide to the model some gaussian noise and see what is the results.
We star by creating some gaussian noise:
End of explanation
"""
gaussian_noise_pre = dict(zip(faces.names,gaussian_noise))
gaussian_noise_hf = h2o.H2OFrame.from_python(gaussian_noise_pre)
result = model.predict(gaussian_noise_hf)
result.shape
img = result.as_data_frame()
img_data = img.T.values.reshape(112, 92)
plt.imshow(img_data, plt.cm.gray);
"""
Explanation: Then we import this data inside H2O. We have to first map the columns to the gaussian data.
End of explanation
"""
|
minesh1291/Practicing-Kaggle | MNIST_2017/dump_/men_2018_0ld_logistic_script.ipynb | gpl-3.0 | #the seed information
#df_seeds = pd.read_csv('../input/NCAATourneySeeds.csv')
#print(df_seeds.shape)
#print(df_seeds.head())
#print(df_seeds.Season.value_counts())
#the seed information
df_seeds = pd.read_csv('../input/NCAATourneySeeds_SampleTourney2018.csv')
print(df_seeds.shape)
print(df_seeds.head())
#print(df_seeds.Season.value_counts())
#tour information
df_tour = pd.read_csv('../input/RegularSeasonCompactResults_Prelim2018.csv')
print(df_tour.shape)
print(df_tour.head())
"""
Explanation: First we import some datasets of interest
End of explanation
"""
df_seeds['seed_int'] = df_seeds['Seed'].apply( lambda x : int(x[1:3]) )
df_winseeds = df_seeds.loc[:, ['TeamID', 'Season', 'seed_int']].rename(columns={'TeamID':'WTeamID', 'seed_int':'WSeed'})
df_lossseeds = df_seeds.loc[:, ['TeamID', 'Season', 'seed_int']].rename(columns={'TeamID':'LTeamID', 'seed_int':'LSeed'})
df_dummy = pd.merge(left=df_tour, right=df_winseeds, how='left', on=['Season', 'WTeamID'])
df_concat = pd.merge(left=df_dummy, right=df_lossseeds, on=['Season', 'LTeamID'])
print(df_concat.shape)
print(df_concat.head())
"""
Explanation: Now we separate the winners from the losers and organize our dataset
End of explanation
"""
df_concat['DiffSeed'] = df_concat[['LSeed', 'WSeed']].apply(lambda x : 0 if x[0] == x[1] else 1, axis = 1)
print(df_concat.shape)
print(df_concat.head())
print(df_concat.Season.value_counts())
"""
Explanation: Now we match the detailed results to the merge dataset above
End of explanation
"""
df_sample_sub1 = pd.read_csv('../input/SampleSubmissionStage1.csv')
#prepares sample submission
df_sample_sub2 = pd.read_csv('../input/SampleSubmissionStage2.csv')
df_sample_sub=pd.concat([df_sample_sub1, df_sample_sub2])
print(df_sample_sub.shape)
print(df_sample_sub.head())
df_sample_sub['Season'] = df_sample_sub['ID'].apply(lambda x : int(x.split('_')[0]) )
df_sample_sub['TeamID1'] = df_sample_sub['ID'].apply(lambda x : int(x.split('_')[1]) )
df_sample_sub['TeamID2'] = df_sample_sub['ID'].apply(lambda x : int(x.split('_')[2]) )
print(df_sample_sub.shape)
print(df_sample_sub.head())
"""
Explanation: Here we get our submission info
End of explanation
"""
winners = df_concat.rename( columns = { 'WTeamID' : 'TeamID1',
'LTeamID' : 'TeamID2',
'WScore' : 'Team1_Score',
'LScore' : 'Team2_Score'}).drop(['WSeed', 'LSeed', 'WLoc'], axis = 1)
winners['Result'] = 1.0
losers = df_concat.rename( columns = { 'WTeamID' : 'TeamID2',
'LTeamID' : 'TeamID1',
'WScore' : 'Team2_Score',
'LScore' : 'Team1_Score'}).drop(['WSeed', 'LSeed', 'WLoc'], axis = 1)
losers['Result'] = 0.0
train = pd.concat( [winners, losers], axis = 0).reset_index(drop = True)
train['Score_Ratio'] = train['Team1_Score'] / train['Team2_Score']
train['Score_Total'] = train['Team1_Score'] + train['Team2_Score']
train['Score_Pct'] = train['Team1_Score'] / train['Score_Total']
print(train.shape)
print(train.head())
"""
Explanation: Training Data Creation
End of explanation
"""
years = [2014, 2015, 2016, 2017,2018]
"""
Explanation: We will only consider years relevant to our test submission
End of explanation
"""
train_test_inner = pd.merge( train.loc[ train['Season'].isin(years), : ].reset_index(drop = True),
df_sample_sub.drop(['ID', 'Pred'], axis = 1),
on = ['Season', 'TeamID1', 'TeamID2'], how = 'inner' )
train_test_inner.head()
train_test_inner.shape
"""
Explanation: Now lets just look at TeamID2, or just the second team info.
End of explanation
"""
team1d_num_ot = train_test_inner.groupby(['Season', 'TeamID1'])['NumOT'].median().reset_index()\
.set_index('Season').rename(columns = {'NumOT' : 'NumOT1'})
team2d_num_ot = train_test_inner.groupby(['Season', 'TeamID2'])['NumOT'].median().reset_index()\
.set_index('Season').rename(columns = {'NumOT' : 'NumOT2'})
num_ot = team1d_num_ot.join(team2d_num_ot).reset_index()
#sum the number of ot calls and subtract by one to prevent overcounting
num_ot['NumOT'] = num_ot[['NumOT1', 'NumOT2']].apply(lambda x : round( x.sum() ), axis = 1 )
num_ot.head()
"""
Explanation: From the inner join, we will create data per team id to estimate the parameters we are missing that are independent of the year. Essentially, we are trying to estimate the average behavior of the team across the year.
End of explanation
"""
def geo_mean( x ):
return np.exp( np.mean(np.log(x)) )
def harm_mean( x ):
return np.mean( x ** -1.0 ) ** -1.0
team1d_score_spread = train_test_inner.groupby(['Season', 'TeamID1'])[['Score_Ratio', 'Score_Pct']]\
.agg({ 'Score_Ratio': geo_mean, 'Score_Pct' : harm_mean}).reset_index()\
.set_index('Season').rename(columns = {'Score_Ratio' : 'Score_Ratio1', 'Score_Pct' : 'Score_Pct1'})
team2d_score_spread = train_test_inner.groupby(['Season', 'TeamID2'])[['Score_Ratio', 'Score_Pct']]\
.agg({ 'Score_Ratio': geo_mean, 'Score_Pct' : harm_mean}).reset_index()\
.set_index('Season').rename(columns = {'Score_Ratio' : 'Score_Ratio2', 'Score_Pct' : 'Score_Pct2'})
score_spread = team1d_score_spread.join(team2d_score_spread).reset_index()
#geometric mean of score ratio of team 1 and inverse of team 2
score_spread['Score_Ratio'] = score_spread[['Score_Ratio1', 'Score_Ratio2']].apply(lambda x : ( x[0] * ( x[1] ** -1.0) ), axis = 1 ) ** 0.5
#harmonic mean of score pct
score_spread['Score_Pct'] = score_spread[['Score_Pct1', 'Score_Pct2']].apply(lambda x : 0.5*( x[0] ** -1.0 ) + 0.5*( 1.0 - x[1] ) ** -1.0, axis = 1 ) ** -1.0
score_spread.head()
"""
Explanation: Here we look at the comparable statistics. For the TeamID2 column, we would consider the inverse of the ratio, and 1 minus the score attempt percentage.
End of explanation
"""
X_train = train_test_inner.loc[:, ['Season', 'NumOT', 'Score_Ratio', 'Score_Pct']]
train_labels = train_test_inner['Result']
train_test_outer = pd.merge( train.loc[ train['Season'].isin(years), : ].reset_index(drop = True),
df_sample_sub.drop(['ID', 'Pred'], axis = 1),
on = ['Season', 'TeamID1', 'TeamID2'], how = 'outer' )
train_test_outer = train_test_outer.loc[ train_test_outer['Result'].isnull(),
['TeamID1', 'TeamID2', 'Season']]
train_test_missing = pd.merge( pd.merge( score_spread.loc[:, ['TeamID1', 'TeamID2', 'Season', 'Score_Ratio', 'Score_Pct']],
train_test_outer, on = ['TeamID1', 'TeamID2', 'Season']),
num_ot.loc[:, ['TeamID1', 'TeamID2', 'Season', 'NumOT']],
on = ['TeamID1', 'TeamID2', 'Season'])
"""
Explanation: Now lets create a model just solely based on the inner group and predict those probabilities.
We will get the teams with the missing result.
End of explanation
"""
X_test = train_test_missing.loc[:, ['Season', 'NumOT', 'Score_Ratio', 'Score_Pct']]
n = X_train.shape[0]
train_test_merge = pd.concat( [X_train, X_test], axis = 0 ).reset_index(drop = True)
train_test_merge = pd.concat( [pd.get_dummies( train_test_merge['Season'].astype(object) ),
train_test_merge.drop('Season', axis = 1) ], axis = 1 )
train_test_merge = pd.concat( [pd.get_dummies( train_test_merge['NumOT'].astype(object) ),
train_test_merge.drop('NumOT', axis = 1) ], axis = 1 )
X_train = train_test_merge.loc[:(n - 1), :].reset_index(drop = True)
X_test = train_test_merge.loc[n:, :].reset_index(drop = True)
x_max = X_train.max()
x_min = X_train.min()
X_train = ( X_train - x_min ) / ( x_max - x_min + 1e-14)
X_test = ( X_test - x_min ) / ( x_max - x_min + 1e-14)
print(X_train.shape)
print(X_train.head())
print(train_labels.shape)
print(train_labels.head())
print(train_labels.value_counts())
print(X_test.shape)
print(X_test.head())
from sklearn.linear_model import LogisticRegressionCV
log_clf = LogisticRegressionCV(cv = 5,Cs=8,n_jobs=4,scoring="neg_log_loss")
log_clf.fit( X_train, train_labels )
import matplotlib.pyplot as plt
plt.plot(log_clf.scores_[1])
# plt.ylabel('some numbers')
plt.show()
"""
Explanation: We scale our data for our logistic regression, and make sure our categorical variables are properly processed.
End of explanation
"""
train_test_inner['Pred1'] = log_clf.predict_proba(X_train)[:,1]
train_test_missing['Pred1'] = log_clf.predict_proba(X_test)[:,1]
"""
Explanation: Here we store our probabilities
End of explanation
"""
sub = pd.merge(df_sample_sub,
pd.concat( [train_test_missing.loc[:, ['Season', 'TeamID1', 'TeamID2', 'Pred1']],
train_test_inner.loc[:, ['Season', 'TeamID1', 'TeamID2', 'Pred1']] ],
axis = 0).reset_index(drop = True),
on = ['Season', 'TeamID1', 'TeamID2'], how = 'outer')
print(sub.shape)
print(sub.head())
"""
Explanation: We merge our predictions
End of explanation
"""
team1_probs = sub.groupby('TeamID1')['Pred1'].apply(lambda x : (x ** -1.0).mean() ** -1.0 ).fillna(0.5).to_dict()
team2_probs = sub.groupby('TeamID2')['Pred1'].apply(lambda x : (x ** -1.0).mean() ** -1.0 ).fillna(0.5).to_dict()
"""
Explanation: We get the 'average' probability of success for each team
End of explanation
"""
sub['Pred'] = sub[['TeamID1', 'TeamID2','Pred1']]\
.apply(lambda x : team1_probs.get(x[0]) * ( 1 - team2_probs.get(x[1]) ) if np.isnan(x[2]) else x[2],
axis = 1)
print(sub.shape)
print(sub.head())
sub.ID.value_counts()
sub=sub.groupby('ID', as_index=False).agg({"Pred": "mean"})
sub.ID.value_counts()
sub2018=sub.loc[sub['ID'].isin(df_sample_sub2.ID)]
print(sub2018.shape)
sub2018[['ID', 'Pred']].to_csv('sub_2018_all_only18.csv', index = False)
sub2018[['ID', 'Pred']].head(20)
"""
Explanation: Any missing value for the prediciton will be imputed with the product of the probabilities calculated above. We assume these are independent events.
End of explanation
"""
|
guyhoffman/hri-statistics | notebooks/Binary_HMM_Filtering.ipynb | mit | import numpy as np
from matplotlib import pyplot as plt
class BinaryHMM:
"""
startprob: np.array(shape(2,))
transmat: np.array(shape(2,2)) - First column is P(X|x), second is P(X|~x)
emissionprob: np.array(shape(2,2)) - First column is P(E|x), second is P(E|~x)
"""
def __init__(self, startprob, transmat, emissionprob):
self._startprob = startprob.reshape(2,1)
self._transmat = transmat
self._emissionprob = emissionprob
self._belief = startprob
def timestep(self):
self._belief = np.dot(self._transmat, self._belief)
def evidence(self, e):
row = 1-e # True is row 0, False is row 1
p_e = (self._emissionprob[row] * self._belief.T).T # Re-weigh belief probability by likelihood
self._belief = p_e / sum(p_e) # Normalize to sum up to 1
def filter(self, e):
self.timestep()
self.evidence(e)
def reset(self, startprob=None):
if startprob is not None:
self._startprob = startprob
if self._startprob is not None:
self._belief = self._startprob
def print_belief(self):
print ("Current belief: ", self._belief)
plt.bar([0, 1], self._belief, width=.5)
ax = plt.gca()
ax.set_xlim(-.5, 1.5)
ax.set_ylim(0, 1)
labels = ["Hates", "Hates Not"]
ax.set_xticks([0, 1])
ax.set_xticklabels(labels)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
"""
Explanation: Binary Hidden Markov Model Filtering
This notebook implements filtering on a simple HMM with binary states and evidence variables, walking you through an example step-by-step.
You can also see how the HMM model can be represented as two matrices and subsequently both stages of filtering reduce to the efficient operation of matrix multiplication.
End of explanation
"""
# These are all the vectors / matrices representing the above model
startprob = np.array([.5, .5])
transmat = np.array([[0.5, 0.1], [0.5, 0.9]])
emissionprob = np.array([[0.99, 0.7], [0.01, 0.3]])
"""
Explanation: Set up the Model
Let's take an example of whether your advisor hates you, which is the hidden variable. The evidence is whether they sent you a curt email. We will assume that their mood doesn't change throughout the day and that they send you one email per day.
Initial Probabilities - $P(X_1)$
Initially, we do not know anything about whether they hate you on Day 1 of the semester.
$$
P(X_1=hates) = 0.5
$$
And therefore
$$
P(X_1=\neg hates) = 0.5
$$
In other words, we start with a uniform distribution about our Beliefs at time $t=1$. We will simplify by just writing $P(X_i=h)$ and $P(X_i=\neg h)$.
Transition Model - $P(X_{i+1}|X_i)$
The good thing about this advisor is that they don't hold a grudge. Or, more likely, they don't remember stuff from one day to the next. This is lucky, because if they hate you at time $t=i$, you can't say much about $t=i+1$. It's a complete toss-up what will happen the next day.
Formally
$$
P(X_{i+1}=h | X_i=h) = 0.5 \quad \text{and therefore} \quad P(X_{i+1}=\neg h | X_i=h) = 0.5 \
$$
If they don't hate you at $t=i$, your odds are actually better the next day. The chance of them hating you all of a sudden are small, a mere 10%.
$$
P(X_{i+1}=h | X_i=\neg h) = 0.1 \quad \text{and therefore} \quad P(X_{i+1}=\neg h | X_i=\neg h) = 0.9
$$
Evidence (Sensor) Model - $P(e_i|X_i)$
The only way for us to guess if they hate us or not is based on whether they sent us a curt email. From talking to other students, we discover that there's a pattern to their email sending conditioned on their feelings for us. If they hate us, they are almost certain to sent us a curt email:
$$
P(e_i=curt | X_i=h) = 0.99 \quad \text{and therefore} \quad P(e_i=\neg curt | X_i=h) = 0.01 \
$$
Again, we will shorten to $P(e_i=c)$.
It turns out that if they don't hate us, they will still usually send a curt email, just because they're insanely busy. In fact that will happen 70% of the time they don't hate us.
$$
P(e_i=c | X_i=\neg h) = 0.7 \quad \text{and therefore} \quad P(e_i=\neg c | \ X_i=\neg h) = 0.3 \
$$
End of explanation
"""
model = BinaryHMM (startprob, transmat, emissionprob)
model.print_belief()
"""
Explanation: Let's initialize the HMM with this model. As you can see, the current belief $B(X_1)$ is 50%-50%.
End of explanation
"""
model.timestep()
model.print_belief()
"""
Explanation: Time Passage / Dynamics
You wait a day, and now have a new belief based on your Transition Model. This is $B^(X_2)$, or the intermediate belief after time has passed, but before* you got today's email.
This time step belief update is given by
$$
B^*(X_{t+1}) = \sum_{x_t} B(x_t)P(X_{t+1} | x_t)
$$
Or in our case
$$
B^*(X_{t+1}=h) = 0.5 \times 0.5 + 0.5 \times 0.1 = 0.25 + 0.05 = 0.3
$$
Read this as: What's the probability of them hating us today knowing only the belief from the previous day?
Either they hated us yesterday (0.5 chance), then it's a toss-up (0.5), or they didn't (also 0.5 chance), but then it's unlikely that they hate us today (0.1).
The probability of them not hating us is the 1-complement. You could equally calculate from the second vector component.
Also notice that this can be compactly represented as a dot product of the transition matrix and the previous day's belief. In our code it looks like this:
python
def timestep(self):
self._belief = np.dot(self._transmat, self._belief)
End of explanation
"""
model.evidence(True)
model.print_belief()
"""
Explanation: Evidence Observation
Now you got your daily email, and what do you know? It's curt.
You want to update your belief based on the evidence and get $B(X_2)$
This evidence-based belief update is done by re-weighing each of your belief values by the likelihood (the Bayesian "flip") of the evidence given the belief value. In other words, you take your intermediate belief $B^*(X_{t+1})$ and for each value you multiply it by the probability to have seen the specific evidence given that value, normalized by the posterior to sum up to 1.
Formally:
$$
B(X_{t+1}) = \frac{B^*(x_{t+1})P(e_{t+1} | x_{t+1})}{P(e_{t+1})}
$$
Remember that the denominator is just the sum of all the values for the numerator.
Let's calculate the numerators:
$$
B(X_{t+1}=h) \propto 0.3 \times 0.99 = 0.297
$$
Read this as: The product of the current belief (probability) of being hated times the likelihood of the curt email evidence if the advisor actually hated you.
And conversely:
$$
B(X_{t+1}=\neg h) \propto 0.7 \times 0.7 = 0.49
$$
Read this as: The product of the current belief of not being hated times the likelihood of the curt email evidence if there is no hatred involved. Due to being busy, that is still quite likely (70%).
To get the actual probability for $B(X_{t+1}=h)$ we normalize by the sum of all probability values:
$$
B(X_{t+1}=h) = \frac{0.297}{0.297+.49} = 0.377
$$
And conversely:
$$
B(X_{t+1}=\neg h) = \frac{0.49}{0.297+.49} = 0.623
$$
In our code it looks like this:
python
def evidence(self, e):
row = 1-e # True is row 0, False is row 1
p_e = (self._emissionprob[row] * self._belief.T).T # Re-weigh belief probability by likelihood
self._belief = p_e / sum(p_e) # Normalize to sum up to 1
End of explanation
"""
model.reset()
belief_seq = []
for i in range(10):
model.timestep()
print ("After Transition")
print (model._belief)
belief_seq.append(model._belief[0])
model.evidence(True)
print ("After Curt Email Evidence")
print (model._belief)
belief_seq.append(model._belief[0])
print ("------------")
plt.plot(belief_seq, 'r-')
plt.gca().set_ylim(0, .4)
"""
Explanation: Looking at these numbers, it is interesting to note that even though the email was curt, our model infers that the probability of the advisor hating us is still low, and close to the pre-evidence value of 0.3. It is slightly higher because curt emails are more likely when they hate us, but the "prior" of 0.3 and the high likelihood of the curt emails without hate held the increase back.
The transition model made our original estimate go from 0.5 to 0.3 because, let's face it, the advisor most likely does not hate you. They are just busy. The transition model also indicates that, if they did hate you, they generally forgot about it the next day, which also contributes to the decline in $P(X_t=h)$.
Let's see what happens when we run this for ten more days, with you receiving a curt email every day.
End of explanation
"""
model.timestep()
print ("After Transition")
print (model._belief)
belief_seq.append(model._belief[0])
model.evidence(False)
print ("After Long Email Evidence")
print (model._belief)
belief_seq.append(model._belief[0])
plt.plot(belief_seq, '-r')
plt.gca().set_ylim(0, .4)
"""
Explanation: Every day that passes the probability of hatred goes down ($B^*$ before evidence), and every curt email, it goes up. Eventually, our belief near-settles on an oscillation between 0.21 and 0.27. This convergence is due to the fact that our evidence is constant.
What would happen if we get a long email all of a sudden? Try to guess before running the code. Then try to calculate it by hand.
End of explanation
"""
|
phobson/statsmodels | examples/notebooks/tsa_dates.ipynb | bsd-3-clause | from __future__ import print_function
import statsmodels.api as sm
import numpy as np
import pandas as pd
"""
Explanation: Dates in timeseries models
End of explanation
"""
data = sm.datasets.sunspots.load()
"""
Explanation: Getting started
End of explanation
"""
from datetime import datetime
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
"""
Explanation: Right now an annual date series must be datetimes at the end of the year.
End of explanation
"""
endog = pd.TimeSeries(data.endog, index=dates)
"""
Explanation: Using Pandas
Make a pandas TimeSeries or DataFrame
End of explanation
"""
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
"""
Explanation: Instantiate the model
End of explanation
"""
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
"""
Explanation: Out-of-sample prediction
End of explanation
"""
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
"""
Explanation: Using explicit dates
End of explanation
"""
print(ar_res.data.predict_dates)
"""
Explanation: This just returns a regular array, but since the model has date information attached, you can get the prediction dates in a roundabout way.
End of explanation
"""
|
BjornFJohansson/pydna-examples | notebooks/strawberry_aat/strawberry.ipynb | bsd-3-clause | # Import the pydna package functions
from pydna.all import *
# Give your email address to Genbank, so they can contact you.
# This is a requirement for using their services
gb=Genbank("bjornjobb@gmail.com")
# download the SAAT CDS from Genbank
# We know from inspecting the
saat = gb.nucleotide("AF193791 REGION: 78..1895")
# The representation of the saat Dseqrecord object contains a link to Genbank
saat
# design two new primers for SAAT
saat_amplicon = primer_design(saat)
fw="aa"+saat_amplicon.forward_primer
rv=saat_amplicon.reverse_primer
# We can set the primer identities to something descriptive
fw.id, rv.id = "fw_saat_cds", "rv_saat_cds"
saat_pcr_prod = pcr(fw,rv, saat)
# The result is an object of the Amplicon class
saat_pcr_prod
# The object has several useful methods like .figure()
# which shows how the primers anneal
saat_pcr_prod.figure()
# read the cloning vector from a local file
pYPKa=read("pYPKa.gb")
# This is a GenbankFile object, its representation include a link to the local file:
pYPKa
# import the restriction enzyme AjiI from Biopython
from Bio.Restriction import AjiI
# cut the vector with the .linearize method. This will give an error is more than one
# fragment is formed
pYPKa_AjiI = pYPKa.linearize(AjiI)
# The result from the digestion is a linear Dseqrecord object
pYPKa_AjiI
# clone the PCR product by adding the linearized vector to the insert
# and close it using the .looped() method.
pYPKa_A_saat = ( pYPKa_AjiI + saat_pcr_prod ).looped()
pYPKa_A_saat
# read promoter vector from a local file
pYPKa_Z_prom = read("pYPKa_Z_TEF1.gb")
# read terminator vector from a local file
pYPKa_E_term = read("pYPKa_E_TPI1.gb")
pYPKa_Z_prom
pYPKa_E_term
[pYPKa_Z_prom,pYPKa_Z_prom]
"""
Explanation: A strawberry flavour gene vector for Saccharomyces cerevisiae
This Jupyter notebook describes the simulated cloning of the strawberry Fragaria × ananassa alcohol acyltransferase SAAT gene and the construction of a S. cerevisiae expression vector for this gene.
The SAAT gene is involved in the production of the strawberry fragrance. It is necessary to first produce cDNA, a process which is not decribed in this notebook. Here is a recent protocol for the extraction of nucleic acids from Strawberry.
End of explanation
"""
# Standard primers
p567,p577,p468,p467,p568,p578 = parse_primers('''
>567_pCAPsAjiIF (23-mer)
GTcggctgcaggtcactagtgag
>577_crp585-557 (29-mer)
gttctgatcctcgagcatcttaagaattc
>468_pCAPs_release_fw (25-mer)
gtcgaggaacgccaggttgcccact
>467_pCAPs_release_re (31-mer)
ATTTAAatcctgatgcgtttgtctgcacaga
>568_pCAPsAjiIR (22-mer)
GTGCcatctgtgcagacaaacg
>578_crp42-70 (29-mer)
gttcttgtctcattgccacattcataagt''')
p567
# Promoter amplified using p577 and p567
p = pcr(p577, p567, pYPKa_Z_prom)
# Gene amplified using p468 and p467
g = pcr(p468, p467, pYPKa_A_saat)
# Terminator amplified using p568 and p578
t = pcr(p568, p578, pYPKa_E_term)
# Yeast backbone vector read from a local file
pYPKpw = read("pYPKpw.gb")
from Bio.Restriction import ZraI
# Vector linearized with ZraI
pYPKpw_lin = pYPKpw.linearize(ZraI)
# Assembly simulation between four linear DNA fragments:
# plasmid, promoter, gene and terminator
# Only one circular product is formed (8769 bp)
asm = Assembly( (pYPKpw_lin, p, g, t) )
asm
# Inspect the only circular product
candidate = asm.assemble_circular()[0]
candidate.figure()
# Synchronize vectors
pYPK0_TDH3_FaPDC_TEF1 = candidate.synced(pYPKa)
# Write new vector to local file
pYPK0_TDH3_FaPDC_TEF1.write("pYPK0_TDH3_FaPDC_TPI1.gb")
"""
Explanation: In the cell below, primers relevant to the Yeast Pathway Kit are read into six sequence objects. These are similar to the ones created in cell [3]
End of explanation
"""
from Bio.Restriction import PvuI
#PYTEST_VALIDATE_IGNORE_OUTPUT
%matplotlib inline
from pydna.gel import Gel, weight_standard_sample
standard = weight_standard_sample('1kb+_GeneRuler')
Gel( [ standard,
pYPKpw.cut(PvuI),
pYPK0_TDH3_FaPDC_TEF1.cut(PvuI) ] ).run()
"""
Explanation: The final vector pYPKa_TDH3_FaPDC_TEF1 has 8769 bp.
The sequence can be inspected by the hyperlink above.
The restriction enzyme PvuI cuts twice in the plasmid backbone and once in the SAAT gene.
End of explanation
"""
|
anandha2017/udacity | nd101 Deep Learning Nanodegree Foundation/DockerImages/27_seq2seq/notebooks/seq2seq/sequence_to_sequence_implementation.ipynb | mit | import numpy as np
import time
import helper
source_path = 'data/letters_source.txt'
target_path = 'data/letters_target.txt'
source_sentences = helper.load_data(source_path)
target_sentences = helper.load_data(target_path)
"""
Explanation: Character Sequence to Sequence
In this notebook, we'll build a model that takes in a sequence of letters, and outputs a sorted version of that sequence. We'll do that using what we've learned so far about Sequence to Sequence models. This notebook was updated to work with TensorFlow 1.1 and builds on the work of Dave Currie. Check out Dave's post Text Summarization with Amazon Reviews.
<img src="images/sequence-to-sequence.jpg"/>
Dataset
The dataset lives in the /data/ folder. At the moment, it is made up of the following files:
* letters_source.txt: The list of input letter sequences. Each sequence is its own line.
* letters_target.txt: The list of target sequences we'll use in the training process. Each sequence here is a response to the input sequence in letters_source.txt with the same line number.
End of explanation
"""
source_sentences[:50].split('\n')
"""
Explanation: Let's start by examining the current state of the dataset. source_sentences contains the entire input sequence file as text delimited by newline symbols.
End of explanation
"""
target_sentences[:50].split('\n')
"""
Explanation: target_sentences contains the entire output sequence file as text delimited by newline symbols. Each line corresponds to the line from source_sentences. target_sentences contains a sorted characters of the line.
End of explanation
"""
def extract_character_vocab(data):
special_words = ['<PAD>', '<UNK>', '<GO>', '<EOS>']
set_words = set([character for line in data.split('\n') for character in line])
int_to_vocab = {word_i: word for word_i, word in enumerate(special_words + list(set_words))}
vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()}
return int_to_vocab, vocab_to_int
# Build int2letter and letter2int dicts
source_int_to_letter, source_letter_to_int = extract_character_vocab(source_sentences)
target_int_to_letter, target_letter_to_int = extract_character_vocab(target_sentences)
# Convert characters to ids
source_letter_ids = [[source_letter_to_int.get(letter, source_letter_to_int['<UNK>']) for letter in line] for line in source_sentences.split('\n')]
target_letter_ids = [[target_letter_to_int.get(letter, target_letter_to_int['<UNK>']) for letter in line] + [target_letter_to_int['<EOS>']] for line in target_sentences.split('\n')]
print("Example source sequence")
print(source_letter_ids[:3])
print("\n")
print("Example target sequence")
print(target_letter_ids[:3])
"""
Explanation: Preprocess
To do anything useful with it, we'll need to turn the each string into a list of characters:
<img src="images/source_and_target_arrays.png"/>
Then convert the characters to their int values as declared in our vocabulary:
End of explanation
"""
from distutils.version import LooseVersion
import tensorflow as tf
from tensorflow.python.layers.core import Dense
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
"""
Explanation: This is the final shape we need them to be in. We can now proceed to building the model.
Model
Check the Version of TensorFlow
This will check to make sure you have the correct version of TensorFlow
End of explanation
"""
# Number of Epochs
epochs = 60
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 50
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 15
decoding_embedding_size = 15
# Learning Rate
learning_rate = 0.001
"""
Explanation: Hyperparameters
End of explanation
"""
def get_model_inputs():
input_data = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
lr = tf.placeholder(tf.float32, name='learning_rate')
target_sequence_length = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length, name='max_target_len')
source_sequence_length = tf.placeholder(tf.int32, (None,), name='source_sequence_length')
return input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length
"""
Explanation: Input
End of explanation
"""
def encoding_layer(input_data, rnn_size, num_layers,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
# Encoder embedding
enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, encoding_embedding_size)
# RNN cell
def make_cell(rnn_size):
enc_cell = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
return enc_cell
enc_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
enc_output, enc_state = tf.nn.dynamic_rnn(enc_cell, enc_embed_input, sequence_length=source_sequence_length, dtype=tf.float32)
return enc_output, enc_state
"""
Explanation: Sequence to Sequence Model
We can now start defining the functions that will build the seq2seq model. We are building it from the bottom up with the following components:
2.1 Encoder
- Embedding
- Encoder cell
2.2 Decoder
1- Process decoder inputs
2- Set up the decoder
- Embedding
- Decoder cell
- Dense output layer
- Training decoder
- Inference decoder
2.3 Seq2seq model connecting the encoder and decoder
2.4 Build the training graph hooking up the model with the
optimizer
2.1 Encoder
The first bit of the model we'll build is the encoder. Here, we'll embed the input data, construct our encoder, then pass the embedded data to the encoder.
Embed the input data using tf.contrib.layers.embed_sequence
<img src="images/embed_sequence.png" />
Pass the embedded input into a stack of RNNs. Save the RNN state and ignore the output.
<img src="images/encoder.png" />
End of explanation
"""
# Process the input we'll feed to the decoder
def process_decoder_input(target_data, vocab_to_int, batch_size):
'''Remove the last word id from each batch and concat the <GO> to the begining of each batch'''
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], vocab_to_int['<GO>']), ending], 1)
return dec_input
"""
Explanation: 2.2 Decoder
The decoder is probably the most involved part of this model. The following steps are needed to create it:
1- Process decoder inputs
2- Set up the decoder components
- Embedding
- Decoder cell
- Dense output layer
- Training decoder
- Inference decoder
Process Decoder Input
In the training process, the target sequences will be used in two different places:
Using them to calculate the loss
Feeding them to the decoder during training to make the model more robust.
Now we need to address the second point. Let's assume our targets look like this in their letter/word form (we're doing this for readibility. At this point in the code, these sequences would be in int form):
<img src="images/targets_1.png"/>
We need to do a simple transformation on the tensor before feeding it to the decoder:
1- We will feed an item of the sequence to the decoder at each time step. Think about the last timestep -- where the decoder outputs the final word in its output. The input to that step is the item before last from the target sequence. The decoder has no use for the last item in the target sequence in this scenario. So we'll need to remove the last item.
We do that using tensorflow's tf.strided_slice() method. We hand it the tensor, and the index of where to start and where to end the cutting.
<img src="images/strided_slice_1.png"/>
2- The first item in each sequence we feed to the decoder has to be GO symbol. So We'll add that to the beginning.
<img src="images/targets_add_go.png"/>
Now the tensor is ready to be fed to the decoder. It looks like this (if we convert from ints to letters/symbols):
<img src="images/targets_after_processing_1.png"/>
End of explanation
"""
def decoding_layer(target_letter_to_int, decoding_embedding_size, num_layers, rnn_size,
target_sequence_length, max_target_sequence_length, enc_state, dec_input):
# 1. Decoder Embedding
target_vocab_size = len(target_letter_to_int)
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
# 2. Construct the decoder cell
def make_cell(rnn_size):
dec_cell = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
return dec_cell
dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
# 3. Dense layer to translate the decoder's output at each time
# step into a choice from the target vocabulary
output_layer = Dense(target_vocab_size,
kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1))
# 4. Set up a training decoder and an inference decoder
# Training Decoder
with tf.variable_scope("decode"):
# Helper for the training process. Used by BasicDecoder to read inputs.
training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input,
sequence_length=target_sequence_length,
time_major=False)
# Basic decoder
training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
training_helper,
enc_state,
output_layer)
# Perform dynamic decoding using the decoder
training_decoder_output = tf.contrib.seq2seq.dynamic_decode(training_decoder,
impute_finished=True,
maximum_iterations=max_target_sequence_length)[0]
# 5. Inference Decoder
# Reuses the same parameters trained by the training process
with tf.variable_scope("decode", reuse=True):
start_tokens = tf.tile(tf.constant([target_letter_to_int['<GO>']], dtype=tf.int32), [batch_size], name='start_tokens')
# Helper for the inference process.
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings,
start_tokens,
target_letter_to_int['<EOS>'])
# Basic decoder
inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
inference_helper,
enc_state,
output_layer)
# Perform dynamic decoding using the decoder
inference_decoder_output = tf.contrib.seq2seq.dynamic_decode(inference_decoder,
impute_finished=True,
maximum_iterations=max_target_sequence_length)[0]
return training_decoder_output, inference_decoder_output
"""
Explanation: Set up the decoder components
- Embedding
- Decoder cell
- Dense output layer
- Training decoder
- Inference decoder
1- Embedding
Now that we have prepared the inputs to the training decoder, we need to embed them so they can be ready to be passed to the decoder.
We'll create an embedding matrix like the following then have tf.nn.embedding_lookup convert our input to its embedded equivalent:
<img src="images/embeddings.png" />
2- Decoder Cell
Then we declare our decoder cell. Just like the encoder, we'll use an tf.contrib.rnn.LSTMCell here as well.
We need to declare a decoder for the training process, and a decoder for the inference/prediction process. These two decoders will share their parameters (so that all the weights and biases that are set during the training phase can be used when we deploy the model).
First, we'll need to define the type of cell we'll be using for our decoder RNNs. We opted for LSTM.
3- Dense output layer
Before we move to declaring our decoders, we'll need to create the output layer, which will be a tensorflow.python.layers.core.Dense layer that translates the outputs of the decoder to logits that tell us which element of the decoder vocabulary the decoder is choosing to output at each time step.
4- Training decoder
Essentially, we'll be creating two decoders which share their parameters. One for training and one for inference. The two are similar in that both created using tf.contrib.seq2seq.BasicDecoder and tf.contrib.seq2seq.dynamic_decode. They differ, however, in that we feed the the target sequences as inputs to the training decoder at each time step to make it more robust.
We can think of the training decoder as looking like this (except that it works with sequences in batches):
<img src="images/sequence-to-sequence-training-decoder.png"/>
The training decoder does not feed the output of each time step to the next. Rather, the inputs to the decoder time steps are the target sequence from the training dataset (the orange letters).
5- Inference decoder
The inference decoder is the one we'll use when we deploy our model to the wild.
<img src="images/sequence-to-sequence-inference-decoder.png"/>
We'll hand our encoder hidden state to both the training and inference decoders and have it process its output. TensorFlow handles most of the logic for us. We just have to use the appropriate methods from tf.contrib.seq2seq and supply them with the appropriate inputs.
End of explanation
"""
def seq2seq_model(input_data, targets, lr, target_sequence_length,
max_target_sequence_length, source_sequence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers):
# Pass the input data through the encoder. We'll ignore the encoder output, but use the state
_, enc_state = encoding_layer(input_data,
rnn_size,
num_layers,
source_sequence_length,
source_vocab_size,
encoding_embedding_size)
# Prepare the target sequences we'll feed to the decoder in training mode
dec_input = process_decoder_input(targets, target_letter_to_int, batch_size)
# Pass encoder state and decoder inputs to the decoders
training_decoder_output, inference_decoder_output = decoding_layer(target_letter_to_int,
decoding_embedding_size,
num_layers,
rnn_size,
target_sequence_length,
max_target_sequence_length,
enc_state,
dec_input)
return training_decoder_output, inference_decoder_output
"""
Explanation: 2.3 Seq2seq model
Let's now go a step above, and hook up the encoder and decoder using the methods we just declared
End of explanation
"""
# Build the graph
train_graph = tf.Graph()
# Set the graph to default to ensure that it is ready for training
with train_graph.as_default():
# Load the model inputs
input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length = get_model_inputs()
# Create the training and inference logits
training_decoder_output, inference_decoder_output = seq2seq_model(input_data,
targets,
lr,
target_sequence_length,
max_target_sequence_length,
source_sequence_length,
len(source_letter_to_int),
len(target_letter_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers)
# Create tensors for the training logits and inference logits
training_logits = tf.identity(training_decoder_output.rnn_output, 'logits')
inference_logits = tf.identity(inference_decoder_output.sample_id, name='predictions')
# Create the weights for sequence_loss
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Model outputs training_decoder_output and inference_decoder_output both contain a 'rnn_output' logits tensor that looks like this:
<img src="images/logits.png"/>
The logits we get from the training tensor we'll pass to tf.contrib.seq2seq.sequence_loss() to calculate the loss and ultimately the gradient.
End of explanation
"""
def pad_sentence_batch(sentence_batch, pad_int):
"""Pad sentences with <PAD> so that each sentence of a batch has the same length"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(targets, sources, batch_size, source_pad_int, target_pad_int):
"""Batch targets, sources, and the lengths of their sentences together"""
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_targets_batch, pad_sources_batch, pad_targets_lengths, pad_source_lengths
"""
Explanation: Get Batches
There's little processing involved when we retreive the batches. This is a simple example assuming batch_size = 2
Source sequences (it's actually in int form, we're showing the characters for clarity):
<img src="images/source_batch.png" />
Target sequences (also in int, but showing letters for clarity):
<img src="images/target_batch.png" />
End of explanation
"""
# Split data to training and validation sets
train_source = source_letter_ids[batch_size:]
train_target = target_letter_ids[batch_size:]
valid_source = source_letter_ids[:batch_size]
valid_target = target_letter_ids[:batch_size]
(valid_targets_batch, valid_sources_batch, valid_targets_lengths, valid_sources_lengths) = next(get_batches(valid_target, valid_source, batch_size,
source_letter_to_int['<PAD>'],
target_letter_to_int['<PAD>']))
display_step = 20 # Check training loss after every 20 batches
checkpoint = "best_model.ckpt"
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(1, epochs+1):
for batch_i, (targets_batch, sources_batch, targets_lengths, sources_lengths) in enumerate(
get_batches(train_target, train_source, batch_size,
source_letter_to_int['<PAD>'],
target_letter_to_int['<PAD>'])):
# Training step
_, loss = sess.run(
[train_op, cost],
{input_data: sources_batch,
targets: targets_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths})
# Debug message updating us on the status of the training
if batch_i % display_step == 0 and batch_i > 0:
# Calculate validation cost
validation_loss = sess.run(
[cost],
{input_data: valid_sources_batch,
targets: valid_targets_batch,
lr: learning_rate,
target_sequence_length: valid_targets_lengths,
source_sequence_length: valid_sources_lengths})
print('Epoch {:>3}/{} Batch {:>4}/{} - Loss: {:>6.3f} - Validation loss: {:>6.3f}'
.format(epoch_i,
epochs,
batch_i,
len(train_source) // batch_size,
loss,
validation_loss[0]))
# Save Model
saver = tf.train.Saver()
saver.save(sess, checkpoint)
print('Model Trained and Saved')
"""
Explanation: Train
We're now ready to train our model. If you run into OOM (out of memory) issues during training, try to decrease the batch_size.
End of explanation
"""
def source_to_seq(text):
'''Prepare the text for the model'''
sequence_length = 7
return [source_letter_to_int.get(word, source_letter_to_int['<UNK>']) for word in text]+ [source_letter_to_int['<PAD>']]*(sequence_length-len(text))
input_sentence = 'hello'
text = source_to_seq(input_sentence)
checkpoint = "./best_model.ckpt"
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(checkpoint + '.meta')
loader.restore(sess, checkpoint)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
#Multiply by batch_size to match the model's input parameters
answer_logits = sess.run(logits, {input_data: [text]*batch_size,
target_sequence_length: [len(text)]*batch_size,
source_sequence_length: [len(text)]*batch_size})[0]
pad = source_letter_to_int["<PAD>"]
print('Original Text:', input_sentence)
print('\nSource')
print(' Word Ids: {}'.format([i for i in text]))
print(' Input Words: {}'.format(" ".join([source_int_to_letter[i] for i in text])))
print('\nTarget')
print(' Word Ids: {}'.format([i for i in answer_logits if i != pad]))
print(' Response Words: {}'.format(" ".join([target_int_to_letter[i] for i in answer_logits if i != pad])))
"""
Explanation: Prediction
End of explanation
"""
|
telecombcn-dl/2017-cfis | sessions/convnets.ipynb | mit | import matplotlib.pyplot as plt
%matplotlib inline
from utils import plot_samples, plot_curves
import time
import numpy as np
# force random seed for results to be reproducible
SEED = 4242
np.random.seed(SEED)
"""
Explanation: Convolutional Neural Networks
So far we have been treating images as flattened arrays of data. One might argue that such representation is not the best, since by flattening 2D images we are losing all spatial information. Let's now use a different network that exploits spatial information by using convolutional layers.
End of explanation
"""
from keras.datasets import mnist
from keras.utils import np_utils
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
img_rows = X_train.shape[1]
img_cols = X_train.shape[2]
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1) # add third axis with 1
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
nb_classes = 10
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
"""
Explanation: MNIST
In the first part of this session we will use MNIST data to train the network. Using fully connected layers we achieved an accuracy of nearly 0.98 on the test set. Let's see if we can improve this with convolutional layers.
End of explanation
"""
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import Convolution2D, MaxPooling2D, Flatten
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3,3)
h_dim = 28
dr_ratio = 0.2
# something of the form:
# conv - relu - conv - relu - maxpool - dense - relu - dropout - classifier - softmax
model = Sequential()
# ...
model.summary()
"""
Explanation: Exercise: Design a model with convolutional layers for MNIST classification.
You may also need to use other layer types:
python
keras.layers.pooling.MaxPooling2D(pool_size=(2, 2), border_mode='valid')
keras.layers.core.Flatten()
keras.layers.core.Activation()
keras.layers.core.Dropout()
Keep things simple: remember that training these models is computationally expensive. Limit the amount of layers and neurons to reduce the number of parameters. More than 100K parameters is not recommended for training in CPU.
End of explanation
"""
from keras.optimizers import SGD
lr = 0.01
# For now we will not decrease the learning rate
decay = 0
optim = SGD(lr=lr, decay=decay, momentum=0.9, nesterov=True)
batch_size = 32
nb_epoch = 10
model.compile(loss='categorical_crossentropy',
optimizer=optim,
metrics=['accuracy'])
t = time.time()
# GeForce GTX 980 - 161 seconds 30 epochs bs 128
# GeForce GTX Titan Black - 200 seconds 30 epochs bs 128
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=2,validation_data=(X_test, Y_test))
print (time.time() - t, "seconds.")
score = model.evaluate(X_test, Y_test, verbose=0)
print ("-"*10)
print ("Loss: %f"%(score[0]))
print ("Accuracy: %f"%(score[1]))
plot_curves(history,nb_epoch)
model.save('../models/mnist_conv.h5')
"""
Explanation: Exercise: The convolutional layer in keras has a parameter border_mode, which can take values of 'valid' (no padding) or 'same' (+padding). What is the impact in the parameters when setting it to 'valid' or 'same', respectively? Why does the number of parameters change?
Let's train the model. Notice that this procedure is going to take a lot longer than the ones in the previous session. Convolutions are computationally expensive, and even the simplest model takes a long time to train without a GPU.
End of explanation
"""
from keras.datasets import cifar10
import numpy as np
np.random.seed(4242)
"""
Explanation: While you are waiting...
Exercise: Calculate the number of parameters of the new network you just defined.
We could probably train this model for a lot longer and results would still improve. Since our time is limited, let's move on for now.
Transitioning to RGB images: CIFAR 10
At this point we already know how to train a convnet for classification. Let's now switch to a more challenging dataset of colour images: CIFAR 10.
End of explanation
"""
# The data, shuffled and split between train and test sets:
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
plot_samples(X_train,5)
"""
Explanation: Let's load the dataset and display some samples:
End of explanation
"""
nb_classes = 10
# Convert class vectors to binary class matrices.
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train.shape
img_rows = X_train.shape[1]
img_cols = X_train.shape[2]
input_shape = (img_rows, img_cols,3)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
input_shape
"""
Explanation: We format the data before training:
End of explanation
"""
nb_epoch=10
lr = 0.01
decay = 1e-6
optim = SGD(lr=lr, decay=decay, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=optim,
metrics=['accuracy'])
t = time.time()
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=2,validation_data=(X_test, Y_test))
print (time.time() - t, 'seconds.')
score = model.evaluate(X_test, Y_test, verbose=0)
print ("Loss: %f"%(score[0]))
print ("Accuracy: %f"%(score[1]))
plot_curves(history,nb_epoch)
"""
Explanation: Exercise: Design and train a convnet to classify cifar 10 images. Hint: Images now have 3 channels instead of 1 !
End of explanation
"""
model.save('../models/cifar10.h5')
"""
Explanation: While you are waiting...
Exercise: Calculate the number of parameters of the model.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.12/_downloads/plot_channel_epochs_image.ipynb | bsd-3-clause | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
"""
Explanation: Visualize channel over epochs as an image
This will produce what is sometimes called an event related
potential / field (ERP/ERF) image.
2 images are produced. One with a good channel and one with a channel
that does not see any evoked field.
It is also demonstrated how to reorder the epochs using a 1d spectral
embedding as described in:
Graph-based variability estimation in single-trial event-related neural
responses A. Gramfort, R. Keriven, M. Clerc, 2010,
Biomedical Engineering, IEEE Trans. on, vol. 57 (5), 1051-1061
https://hal.inria.fr/inria-00497023
End of explanation
"""
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
"""
Explanation: Set parameters
End of explanation
"""
# and order with spectral reordering
# If you don't have scikit-learn installed set order_func to None
from sklearn.cluster.spectral import spectral_embedding # noqa
from sklearn.metrics.pairwise import rbf_kernel # noqa
def order_func(times, data):
this_data = data[:, (times > 0.0) & (times < 0.350)]
this_data /= np.sqrt(np.sum(this_data ** 2, axis=1))[:, np.newaxis]
return np.argsort(spectral_embedding(rbf_kernel(this_data, gamma=1.),
n_components=1, random_state=0).ravel())
good_pick = 97 # channel with a clear evoked response
bad_pick = 98 # channel with no evoked response
plt.close('all')
mne.viz.plot_epochs_image(epochs, [good_pick, bad_pick], sigma=0.5, vmin=-100,
vmax=250, colorbar=True, order=order_func, show=True)
"""
Explanation: Show event related fields images
End of explanation
"""
|
whitead/numerical_stats | unit_12/hw_2017/problem_set_1.ipynb | gpl-3.0 | import scipy.stats as ss
ss.shapiro([-26.3,-24.2, -20.9, -25.8, -24.3, -22.6, -23.0, -26.8, -26.5, -23.1, -20.0, -23.1, -22.4, -22.8])
"""
Explanation: Problem 1 Instructions
Answer the following short-answer questions using Markdown cells
Problem 1.1
A $t$-test and $zM$ test rely on the assumption of normality. How could you test that assumption?
Shapiro Wilks hypothesis test
Problem 1.2
What is $\hat{\beta}$ in OLS?
The best-fit slope
Problem 1.3
What is $S_{\epsilon}$ in OLS?
The standard error in residuals.
Problem 1.4
What is the difference between SSR and TSS? Is one always greater than the other?
SSR is the sum of squared distance between fit y and data y. TTS is the sum of squared distance between average y and all y data. $TTS \geq SSR$
Problem 1.5
We learned three ways to do regression. One way was with algebraic equations (OLS-ND). What were the other two ways?
OLS-1D, NLS-ND
Problem 1.6
Aside from a plot, what are the steps to complete for a good regression analysis?
(1) Justify with Spearmann test (2) Check normality of residuals (3) hypothesis tests/confidence intervals as needed
Problem 1.7
Is a goodness of fit applicable to a multidimensional regression? If so, what are the x/y axes for this plot?
yes, $y$ vs $\hat{y}$
Problem 1.8
When is it valid to linearize a non-linear problem?
When it doesn't change the noise in the model from normal to some other distribution
Problem 1.9
Sometimes expressions for a model have $\hat{y}=\ldots$ on the left-hand side and other times $y=\ldots$. What is the difference between these two quantities and what changes on the right-hand side when adding/removing the $\hat{}$?
$\hat{y}$ is the best fit and $y$ is the data. When we write $y$, to achieve equality with our model we have to add $\epsilon$, some noise to describe the discrepancy between our model and the data.
Problem Set 2
Problem 2.1
Are these numbers normally distributed? [-26.3,-24.2, -20.9, -25.8, -24.3, -22.6, -23.0, -26.8, -26.5, -23.1, -20.0, -23.1, -22.4, -22.8]
End of explanation
"""
import numpy as np
T = (0.2 - 0) / np.sqrt(0.4)
# Use 11 - 1 because null hypothesis is there is no intercept!
1 - (ss.t.cdf(T, 11 - 1) - ss.t.cdf(-T, 11- 1))
"""
Explanation: The $p$-value is 0.43, so it could be normal
Problem 2.2
Given $\hat{\alpha} = 0.2$, $\hat{\beta} = 1.6$, $N = 11$, $S^2_\alpha = 0.4$, $S^2_\epsilon = 0.5$, $S^2_\beta = 4$, give a justification for or against their being an intercept
End of explanation
"""
T = 1.6 / np.sqrt(4)
ss.t.cdf(T, 11 - 1) - ss.t.cdf(0,11 - 1)
"""
Explanation: The $p$-value is 0.76, so we cannot reject the null hypothesis of no intercept
Problem 2.3
Conduct a hpyothesis test for the slope being positive using the above data. This is a one-sided hypothesis test. Hint: a good null hypothesis would be that the slope is negative. Describe your test in Markdown first, then complete it in Python, and finally write an explanation of the p-value in the final cell.
Let's make the null hypothesis that the slope is negative as suggested. We will create a T statistic, which should correspond to some interval/$p$-value that gets smaller (closer to our significance threshold) as we get more positive in our slope. This will work:
$$ p = 1 - \int_{0}^{T} p(t)\,dt$$
where $T$ is our positive value reflecting how positive the slope is.
You can use 1 or 2 deducted degrees of freedom. 1 is correct, since there is no degree of freedom for the intercept here, but it's a little bit tricky to see that.
End of explanation
"""
def ssr(beta):
yhat = beta[0] * x + beta[1] * np.exp(-beta[2] * x)
return np.sum( (y - yhat)**2)
"""
Explanation: The $p$-value is 0.28, so it's not guaranteed that the slope is positive. This is due to the large uncertainty in the intercept
Problem 2.4
Write a function which computes the SSR for $\hat{y} = \beta_0 x + \beta_1 \exp\left( -\beta_2 x\right) $. Your function should take in one argument. You may assume $x$ and $y$ are defined elsewhere in the code.
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/ai-for-finance/solution/aapl_regression_scikit_learn.ipynb | apache-2.0 | !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
!pip install --user google-cloud-bigquery==1.25.0
"""
Explanation: Building a Regression Model for a Financial Dataset
In this notebook, you will build a simple linear regression model to predict the closing AAPL stock price. The lab objectives are:
* Pull data from BigQuery into a Pandas dataframe
* Use Matplotlib to visualize data
* Use Scikit-Learn to build a regression model
End of explanation
"""
%%bash
bq mk -d ai4f
bq load --autodetect --source_format=CSV ai4f.AAPL10Y gs://cloud-training/ai4f/AAPL10Y.csv
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
plt.rc('figure', figsize=(12, 8.0))
"""
Explanation: Note: Restart your kernel to use updated packages.
Kindly ignore the deprecation warnings and incompatibility errors related to google-cloud-storage.
End of explanation
"""
%%bigquery df
WITH
raw AS (
SELECT
date,
close,
LAG(close, 1) OVER(ORDER BY date) AS min_1_close,
LAG(close, 2) OVER(ORDER BY date) AS min_2_close,
LAG(close, 3) OVER(ORDER BY date) AS min_3_close,
LAG(close, 4) OVER(ORDER BY date) AS min_4_close
FROM
`ai4f.AAPL10Y`
ORDER BY
date DESC ),
raw_plus_trend AS (
SELECT
date,
close,
min_1_close,
IF (min_1_close - min_2_close > 0, 1, -1) AS min_1_trend,
IF (min_2_close - min_3_close > 0, 1, -1) AS min_2_trend,
IF (min_3_close - min_4_close > 0, 1, -1) AS min_3_trend
FROM
raw ),
train_data AS (
SELECT
date,
close,
min_1_close AS day_prev_close,
IF (min_1_trend + min_2_trend + min_3_trend > 0, 1, -1) AS trend_3_day
FROM
raw_plus_trend
ORDER BY
date ASC )
SELECT
*
FROM
train_data
"""
Explanation: Pull Data from BigQuery
In this section we'll use a magic function to query a BigQuery table and then store the output in a Pandas dataframe. A magic function is just an alias to perform a system command. To see documentation on the "bigquery" magic function execute the following cell:
The query below selects everything you'll need to build a regression model to predict the closing price of AAPL stock. The model will be very simple for the purposes of demonstrating BQML functionality. The only features you'll use as input into the model are the previous day's closing price and a three day trend value. The trend value can only take on two values, either -1 or +1. If the AAPL stock price has increased over any two of the previous three days then the trend will be +1. Otherwise, the trend value will be -1.
Note, the features you'll need can be generated from the raw table ai4f.AAPL10Y using Pandas functions. However, it's better to take advantage of the serverless-ness of BigQuery to do the data pre-processing rather than applying the necessary transformations locally.
End of explanation
"""
print(type(df))
df.dropna(inplace=True)
df.head()
"""
Explanation: View the first five rows of the query's output. Note that the object df containing the query output is a Pandas Dataframe.
End of explanation
"""
df.plot(x='date', y='close');
"""
Explanation: Visualize data
The simplest plot you can make is to show the closing stock price as a time series. Pandas DataFrames have built in plotting funtionality based on Matplotlib.
End of explanation
"""
start_date = '2018-06-01'
end_date = '2018-07-31'
plt.plot(
'date', 'close', 'k--',
data = (
df.loc[pd.to_datetime(df.date).between(start_date, end_date)]
)
)
plt.scatter(
'date', 'close', color='b', label='pos trend',
data = (
df.loc[df.trend_3_day == 1 & pd.to_datetime(df.date).between(start_date, end_date)]
)
)
plt.scatter(
'date', 'close', color='r', label='neg trend',
data = (
df.loc[(df.trend_3_day == -1) & pd.to_datetime(df.date).between(start_date, end_date)]
)
)
plt.legend()
plt.xticks(rotation = 90);
df.shape
"""
Explanation: You can also embed the trend_3_day variable into the time series above.
End of explanation
"""
features = ['day_prev_close', 'trend_3_day']
target = 'close'
X_train, X_test = df.loc[:2000, features], df.loc[2000:, features]
y_train, y_test = df.loc[:2000, target], df.loc[2000:, target]
# Create linear regression object
regr = linear_model.LinearRegression(fit_intercept=False)
# Train the model using the training set
regr.fit(X_train, y_train)
# Make predictions using the testing set
y_pred = regr.predict(X_test)
# The mean squared error
print('Root Mean Squared Error: {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred))))
# Explained variance score: 1 is perfect prediction
print('Variance Score: {0:.2f}'.format(r2_score(y_test, y_pred)))
plt.scatter(y_test, y_pred)
plt.plot([140, 240], [140, 240], 'r--', label='perfect fit')
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.legend();
"""
Explanation: Build a Regression Model in Scikit-Learn
In this section you'll train a linear regression model to predict AAPL closing prices when given the previous day's closing price day_prev_close and the three day trend trend_3_day. A training set and test set are created by sequentially splitting the data after 2000 rows.
End of explanation
"""
print('Root Mean Squared Error: {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, X_test.day_prev_close))))
"""
Explanation: The model's predictions are more or less in line with the truth. However, the utility of the model depends on the business context (i.e. you won't be making any money with this model). It's fair to question whether the variable trend_3_day even adds to the performance of the model:
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.19/_downloads/162648d33d7b9ea4f5ce1e8bb494a02d/plot_mne_inverse_label_connectivity.ipynb | bsd-3-clause | # Authors: Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Nicolas P. Rougier (graph code borrowed from his matplotlib gallery)
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.connectivity import spectral_connectivity
from mne.viz import circular_layout, plot_connectivity_circle
print(__doc__)
"""
Explanation: Compute source space connectivity and visualize it using a circular graph
This example computes the all-to-all connectivity between 68 regions in
source space based on dSPM inverse solutions and a FreeSurfer cortical
parcellation. The connectivity is visualized using a circular graph which
is ordered based on the locations of the regions in the axial plane.
End of explanation
"""
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Load data
inverse_operator = read_inverse_operator(fname_inv)
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
# Pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
"""
Explanation: Load our data
First we'll load the data we'll use in connectivity estimation. We'll use
the sample MEG data provided with MNE.
End of explanation
"""
# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list.
snr = 1.0 # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
pick_ori="normal", return_generator=True)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels = mne.read_labels_from_annot('sample', parc='aparc',
subjects_dir=subjects_dir)
label_colors = [label.color for label in labels]
# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
return_generator=True)
fmin = 8.
fmax = 13.
sfreq = raw.info['sfreq'] # the sampling frequency
con_methods = ['pli', 'wpli2_debiased']
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method=con_methods, mode='multitaper', sfreq=sfreq, fmin=fmin,
fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=1)
# con is a 3D array, get the connectivity for the first (and only) freq. band
# for each method
con_res = dict()
for method, c in zip(con_methods, con):
con_res[method] = c[:, :, 0]
"""
Explanation: Compute inverse solutions and their connectivity
Next, we need to compute the inverse solution for this data. This will return
the sources / source activity that we'll use in computing connectivity. We'll
compute the connectivity in the alpha band of these sources. We can specify
particular frequencies to include in the connectivity with the fmin and
fmax flags. Notice from the status messages how mne-python:
reads an epoch from the raw file
applies SSP and baseline correction
computes the inverse to obtain a source estimate
averages the source estimate to obtain a time series for each label
includes the label time series in the connectivity computation
moves to the next epoch.
This behaviour is because we are using generators. Since we only need to
operate on the data one epoch at a time, using a generator allows us to
compute connectivity in a computationally efficient manner where the amount
of memory (RAM) needed is independent from the number of epochs.
End of explanation
"""
# First, we reorder the labels based on their location in the left hemi
label_names = [label.name for label in labels]
lh_labels = [name for name in label_names if name.endswith('lh')]
# Get the y-location of the label
label_ypos = list()
for name in lh_labels:
idx = label_names.index(name)
ypos = np.mean(labels[idx].pos[:, 1])
label_ypos.append(ypos)
# Reorder the labels based on their location
lh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))]
# For the right hemi
rh_labels = [label[:-2] + 'rh' for label in lh_labels]
# Save the plot order and create a circular layout
node_order = list()
node_order.extend(lh_labels[::-1]) # reverse the order
node_order.extend(rh_labels)
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=[0, len(label_names) / 2])
# Plot the graph using node colors from the FreeSurfer parcellation. We only
# show the 300 strongest connections.
plot_connectivity_circle(con_res['pli'], label_names, n_lines=300,
node_angles=node_angles, node_colors=label_colors,
title='All-to-All Connectivity left-Auditory '
'Condition (PLI)')
"""
Explanation: Make a connectivity plot
Now, we visualize this connectivity using a circular graph layout.
End of explanation
"""
fig = plt.figure(num=None, figsize=(8, 4), facecolor='black')
no_names = [''] * len(label_names)
for ii, method in enumerate(con_methods):
plot_connectivity_circle(con_res[method], no_names, n_lines=300,
node_angles=node_angles, node_colors=label_colors,
title=method, padding=0, fontsize_colorbar=6,
fig=fig, subplot=(1, 2, ii + 1))
plt.show()
"""
Explanation: Make two connectivity plots in the same figure
We can also assign these connectivity plots to axes in a figure. Below we'll
show the connectivity plot using two different connectivity methods.
End of explanation
"""
# fname_fig = data_path + '/MEG/sample/plot_inverse_connect.png'
# fig.savefig(fname_fig, facecolor='black')
"""
Explanation: Save the figure (optional)
By default matplotlib does not save using the facecolor, even though this was
set when the figure was generated. If not set via savefig, the labels, title,
and legend will be cut off from the output png file.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/ncc/cmip6/models/noresm2-lm/atmos.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'noresm2-lm', 'atmos')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmos
MIP Era: CMIP6
Institute: NCC
Source ID: NORESM2-LM
Topic: Atmos
Sub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos.
Properties: 156 (127 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:24
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Overview
2. Key Properties --> Resolution
3. Key Properties --> Timestepping
4. Key Properties --> Orography
5. Grid --> Discretisation
6. Grid --> Discretisation --> Horizontal
7. Grid --> Discretisation --> Vertical
8. Dynamical Core
9. Dynamical Core --> Top Boundary
10. Dynamical Core --> Lateral Boundary
11. Dynamical Core --> Diffusion Horizontal
12. Dynamical Core --> Advection Tracers
13. Dynamical Core --> Advection Momentum
14. Radiation
15. Radiation --> Shortwave Radiation
16. Radiation --> Shortwave GHG
17. Radiation --> Shortwave Cloud Ice
18. Radiation --> Shortwave Cloud Liquid
19. Radiation --> Shortwave Cloud Inhomogeneity
20. Radiation --> Shortwave Aerosols
21. Radiation --> Shortwave Gases
22. Radiation --> Longwave Radiation
23. Radiation --> Longwave GHG
24. Radiation --> Longwave Cloud Ice
25. Radiation --> Longwave Cloud Liquid
26. Radiation --> Longwave Cloud Inhomogeneity
27. Radiation --> Longwave Aerosols
28. Radiation --> Longwave Gases
29. Turbulence Convection
30. Turbulence Convection --> Boundary Layer Turbulence
31. Turbulence Convection --> Deep Convection
32. Turbulence Convection --> Shallow Convection
33. Microphysics Precipitation
34. Microphysics Precipitation --> Large Scale Precipitation
35. Microphysics Precipitation --> Large Scale Cloud Microphysics
36. Cloud Scheme
37. Cloud Scheme --> Optical Cloud Properties
38. Cloud Scheme --> Sub Grid Scale Water Distribution
39. Cloud Scheme --> Sub Grid Scale Ice Distribution
40. Observation Simulation
41. Observation Simulation --> Isscp Attributes
42. Observation Simulation --> Cosp Attributes
43. Observation Simulation --> Radar Inputs
44. Observation Simulation --> Lidar Inputs
45. Gravity Waves
46. Gravity Waves --> Orographic Gravity Waves
47. Gravity Waves --> Non Orographic Gravity Waves
48. Solar
49. Solar --> Solar Pathways
50. Solar --> Solar Constant
51. Solar --> Orbital Parameters
52. Solar --> Insolation Ozone
53. Volcanos
54. Volcanos --> Volcanoes Treatment
1. Key Properties --> Overview
Top level key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of atmospheric model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the atmosphere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Resolution
Characteristics of the model resolution
2.1. Horizontal Resolution Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.4. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on the computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 2.5. High Top
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping
Characteristics of the atmosphere model time stepping
3.1. Timestep Dynamics
Is Required: TRUE Type: STRING Cardinality: 1.1
Timestep for the dynamics, e.g. 30 min.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep Shortwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the shortwave radiative transfer, e.g. 1.5 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestep Longwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the longwave radiative transfer, e.g. 3 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Orography
Characteristics of the model orography
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the orography.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
"""
Explanation: 4.2. Changes
Is Required: TRUE Type: ENUM Cardinality: 1.N
If the orography type is modified describe the time adaptation changes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid --> Discretisation
Atmosphere grid discretisation
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of grid discretisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Discretisation --> Horizontal
Atmosphere discretisation in the horizontal
6.1. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
"""
Explanation: 6.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.3. Scheme Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation function order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. Horizontal Pole
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal discretisation pole singularity treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.5. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Discretisation --> Vertical
Atmosphere discretisation in the vertical
7.1. Coordinate Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type of vertical coordinate system
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Dynamical Core
Characteristics of the dynamical core
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere dynamical core
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the dynamical core of the model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Timestepping Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Timestepping framework type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of the model prognostic variables
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Dynamical Core --> Top Boundary
Type of boundary layer at the top of the model
9.1. Top Boundary Condition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Top boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Top Heat
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary heat treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Top Wind
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary wind treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Dynamical Core --> Lateral Boundary
Type of lateral boundary condition (if the model is a regional model)
10.1. Condition
Is Required: FALSE Type: ENUM Cardinality: 0.1
Type of lateral boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Dynamical Core --> Diffusion Horizontal
Horizontal diffusion scheme
11.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Horizontal diffusion scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal diffusion scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Dynamical Core --> Advection Tracers
Tracer advection scheme
12.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Tracer advection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.3. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.4. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracer advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamical Core --> Advection Momentum
Momentum advection scheme
13.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Momentum advection schemes name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Scheme Staggering Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiation
Characteristics of the atmosphere radiation process
14.1. Aerosols
Is Required: TRUE Type: ENUM Cardinality: 1.N
Aerosols whose radiative effect is taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Radiation --> Shortwave Radiation
Properties of the shortwave radiation scheme
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of shortwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Shortwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Shortwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Shortwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiation --> Shortwave GHG
Representation of greenhouse gases in the shortwave radiation scheme
16.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiation --> Shortwave Cloud Ice
Shortwave radiative properties of ice crystals in clouds
17.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiation --> Shortwave Cloud Liquid
Shortwave radiative properties of liquid droplets in clouds
18.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiation --> Shortwave Cloud Inhomogeneity
Cloud inhomogeneity in the shortwave radiation scheme
19.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiation --> Shortwave Aerosols
Shortwave radiative properties of aerosols
20.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiation --> Shortwave Gases
Shortwave radiative properties of gases
21.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Radiation --> Longwave Radiation
Properties of the longwave radiation scheme
22.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of longwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the longwave radiation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Longwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Longwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 22.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Longwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiation --> Longwave GHG
Representation of greenhouse gases in the longwave radiation scheme
23.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiation --> Longwave Cloud Ice
Longwave radiative properties of ice crystals in clouds
24.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.2. Physical Reprenstation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiation --> Longwave Cloud Liquid
Longwave radiative properties of liquid droplets in clouds
25.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiation --> Longwave Cloud Inhomogeneity
Cloud inhomogeneity in the longwave radiation scheme
26.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiation --> Longwave Aerosols
Longwave radiative properties of aerosols
27.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiation --> Longwave Gases
Longwave radiative properties of gases
28.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Turbulence Convection
Atmosphere Convective Turbulence and Clouds
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere convection and turbulence
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Turbulence Convection --> Boundary Layer Turbulence
Properties of the boundary layer turbulence scheme
30.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Boundary layer turbulence scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Boundary layer turbulence scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Closure Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Boundary layer turbulence scheme closure order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Counter Gradient
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Uses boundary layer turbulence scheme counter gradient
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31. Turbulence Convection --> Deep Convection
Properties of the deep convection scheme
31.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Deep convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of deep convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Turbulence Convection --> Shallow Convection
Properties of the shallow convection scheme
32.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Shallow convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
shallow convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
"""
Explanation: 32.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
shallow convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33. Microphysics Precipitation
Large Scale Cloud Microphysics and Precipitation
33.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of large scale cloud microphysics and precipitation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Microphysics Precipitation --> Large Scale Precipitation
Properties of the large scale precipitation scheme
34.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the large scale precipitation parameterisation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34.2. Hydrometeors
Is Required: TRUE Type: ENUM Cardinality: 1.N
Precipitating hydrometeors taken into account in the large scale precipitation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Properties of the large scale cloud microphysics scheme
35.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the microphysics parameterisation scheme used for large scale clouds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Large scale cloud microphysics processes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Cloud Scheme
Characteristics of the cloud scheme
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the atmosphere cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
"""
Explanation: 36.3. Atmos Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Atmosphere components that are linked to the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.4. Uses Separate Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Different cloud schemes for the different types of clouds (convective, stratiform and boundary layer)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.6. Prognostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a prognostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.7. Diagnostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a diagnostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.8. Prognostic Variables
Is Required: FALSE Type: ENUM Cardinality: 0.N
List the prognostic variables used by the cloud scheme, if applicable.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 37. Cloud Scheme --> Optical Cloud Properties
Optical cloud properties
37.1. Cloud Overlap Method
Is Required: FALSE Type: ENUM Cardinality: 0.1
Method for taking into account overlapping of cloud layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Cloud Inhomogeneity
Is Required: FALSE Type: STRING Cardinality: 0.1
Method for taking into account cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Sub-grid scale water distribution
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale water distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 38.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale water distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 38.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale water distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 38.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale water distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Sub-grid scale ice distribution
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale ice distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 39.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale ice distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 39.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale ice distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 39.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale ice distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40. Observation Simulation
Characteristics of observation simulation
40.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of observation simulator characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Observation Simulation --> Isscp Attributes
ISSCP Characteristics
41.1. Top Height Estimation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator ISSCP top height estimation methodUo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. Top Height Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator ISSCP top height direction
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 42. Observation Simulation --> Cosp Attributes
CFMIP Observational Simulator Package attributes
42.1. Run Configuration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator COSP run configuration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.2. Number Of Grid Points
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of grid points
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.3. Number Of Sub Columns
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of sub-cloumns used to simulate sub-grid variability
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.4. Number Of Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of levels
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 43. Observation Simulation --> Radar Inputs
Characteristics of the cloud radar simulator
43.1. Frequency
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Cloud simulator radar frequency (Hz)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 43.2. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator radar type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.3. Gas Absorption
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses gas absorption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.4. Effective Radius
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses effective radius
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44. Observation Simulation --> Lidar Inputs
Characteristics of the cloud lidar simulator
44.1. Ice Types
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator lidar ice type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44.2. Overlap
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator lidar overlap
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 45. Gravity Waves
Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.
45.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of gravity wave parameterisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.2. Sponge Layer
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sponge layer in the upper levels in order to avoid gravity wave reflection at the top.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.3. Background
Is Required: TRUE Type: ENUM Cardinality: 1.1
Background wave distribution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.4. Subgrid Scale Orography
Is Required: TRUE Type: ENUM Cardinality: 1.N
Subgrid scale orography effects taken into account.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 46. Gravity Waves --> Orographic Gravity Waves
Gravity waves generated due to the presence of orography
46.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 47. Gravity Waves --> Non Orographic Gravity Waves
Gravity waves generated by non-orographic processes.
47.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the non-orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
"""
Explanation: 47.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 48. Solar
Top of atmosphere solar insolation characteristics
48.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of solar insolation of the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 49. Solar --> Solar Pathways
Pathways for solar forcing of the atmosphere
49.1. Pathways
Is Required: TRUE Type: ENUM Cardinality: 1.N
Pathways for the solar forcing of the atmosphere model domain
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 50. Solar --> Solar Constant
Solar constant and top of atmosphere insolation characteristics
50.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the solar constant.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 50.2. Fixed Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If the solar constant is fixed, enter the value of the solar constant (W m-2).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 50.3. Transient Characteristics
Is Required: TRUE Type: STRING Cardinality: 1.1
solar constant transient characteristics (W m-2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 51. Solar --> Orbital Parameters
Orbital parameters and top of atmosphere insolation characteristics
51.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 51.2. Fixed Reference Date
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Reference date for fixed orbital parameters (yyyy)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 51.3. Transient Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Description of transient orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 51.4. Computation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used for computing orbital parameters.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 52. Solar --> Insolation Ozone
Impact of solar insolation on stratospheric ozone
52.1. Solar Ozone Impact
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does top of atmosphere insolation impact on stratospheric ozone?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 53. Volcanos
Characteristics of the implementation of volcanoes
53.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the implementation of volcanic effects in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 54. Volcanos --> Volcanoes Treatment
Treatment of volcanoes in the atmosphere
54.1. Volcanoes Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How volcanic effects are modeled in the atmosphere.
End of explanation
"""
|
phuongxuanpham/SelfDrivingCar | CarND-Behavioral-Cloning-Project3/model.ipynb | gpl-3.0 | import os
import csv
import cv2
import numpy as np
import sklearn
"""
Explanation: Behavioral Cloning
This is the Project 3 in Self Driving Car Nano degree from Udacity
The purpose of this project is using deep learning to train a deep neural network to drive a car automously in a simulator.
Behavioral Cloning Project
The goals / steps of this project are the following:
Use the simulator to collect data of good driving behavior
Build a convolution neural network in Keras that predicts steering angles from images
Train and validate the model with a training and validation set
Test that the model successfully drives around track one without leaving the road
Summarize the results with a written report
Code Scripts
End of explanation
"""
def flip_image(img, angle):
"""
Randomly flip the image and adjust the steering angle.
"""
if np.random.rand() < 0.5:
img = cv2.flip(img, 1)
angle = -angle
return img, angle
"""
Explanation: Image Augmentation
End of explanation
"""
# Read the driving_log.csv and get paths of images as samples
samples = []
with open('../../P3_Data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(samples, test_size=0.2, random_state=0)
"""
Explanation: Reading and spliting data
End of explanation
"""
def select_image(batch_sample, is_training=False):
"""
Randomly select an image among the center, left or right images, and adjust the steering angle.
This way, we can teach your model how to steer if the car drifts off to the left or the right.
"""
if is_training == True:
choice = np.random.choice(3)
else:
choice = 0
name = '../../P3_Data/IMG/'+batch_sample[choice].split('/')[-1]
image = cv2.imread(name)
steering_center = float(batch_sample[3])
# create adjusted steering measurements for the side camera images
correction = 0.2 # this is a parameter to tune
steering_left = steering_center + correction
steering_right = steering_center - correction
if choice == 0:
return image, steering_center
elif choice == 1:
return image, steering_left
return image, steering_right
def generator(samples, batch_size=32, is_training=False):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
image, angle = select_image(batch_sample, is_training=is_training)
images.append(image)
angles.append(angle)
# Get training data
X_train = np.array(images)
y_train = np.array(angles)
# Randomly flip image if in training mode
if is_training == True:
X_train_augmented, y_train_augmented = [], []
for x, y in zip(X_train, y_train):
x_augmented, y_augmented = flip_image(x, y)
X_train_augmented.append(x_augmented)
y_train_augmented.append(y_augmented)
X_train_augmented = np.array(X_train_augmented)
y_train_augmented = np.array(y_train_augmented)
yield sklearn.utils.shuffle(X_train_augmented, y_train_augmented)
else:
yield sklearn.utils.shuffle(X_train, y_train)
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=32, is_training=True)
validation_generator = generator(validation_samples, batch_size=32, is_training=False)
"""
Explanation: Using generator function to compile and train the model
End of explanation
"""
# Build network architecture
# for a regression network (need only 1 neuron at output)
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers.convolutional import Convolution2D, Cropping2D
from keras.optimizers import Adam
import matplotlib.pyplot as plt
from keras.backend import tf as ktf
from keras.callbacks import ModelCheckpoint
row, col, ch = 160, 320, 3 # image format
INPUT_SHAPE = (row,col,ch)
def resize(image):
from keras.backend import tf as ktf
resized = ktf.image.resize_images(image, (66, 200))
return resized
# Create the Sequential model
model = Sequential()
## Set up lambda layers for data preprocessing:
# Set up cropping2D layer: cropping (top, bottom) (left, right) pixels
model.add(Cropping2D(cropping=((60,25), (0,0)), input_shape=INPUT_SHAPE))
# Add Lambda layer for resizing image (image, height, width, data_format)
#model.add(Lambda(resize, input_shape=(75, 320, 3), output_shape=(66, 200, 3)))
# Add Lambda layer for normalization
model.add(Lambda(lambda x: (x / 127.5) - 1.0))
## Build a Multi-layer feedforward neural network with Keras here.
# 1st Layer - Add a convolution layer
model.add(Convolution2D(24, 5, 5, subsample=(2,2), activation='relu'))
# 2nd Layer - Add a convolution layer
model.add(Convolution2D(36, 5, 5, subsample=(2,2), activation='relu'))
# 3rd Layer - Add a convolution layer
model.add(Convolution2D(48, 5, 5, subsample=(2,2), activation='relu'))
# 4th Layer - Add a convolution layer
model.add(Convolution2D(64, 3, 3, activation='relu'))
# 5th Layer - Add a convolution layer
model.add(Convolution2D(64, 3, 3, activation='relu'))
# 6th Layer - Add a convolution layer
model.add(Dropout(0.5))
# 7th Layer - Add a flatten layer
model.add(Flatten())
# 8th Layer - Add a fully connected layer
model.add(Dense(100, activation='relu'))
# 9th Layer - Add a fully connected layer
model.add(Dense(50, activation='relu'))
# 10th Layer - Add a fully connected layer
model.add(Dense(10, activation='relu'))
# 11th Layer - Add a fully connected layer
model.add(Dense(1))
model.summary()
# saves the model weights after each epoch if the validation loss decreased
checkpointer = ModelCheckpoint('model-{epoch:02d}.h5',
monitor='val_loss',
verbose=0,
save_best_only=False,
mode='auto')
# Compile and train the model
model.compile(optimizer='adam', loss='mse', verbose = 1)
# history_object = model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=7, batch_size=128)
history_object = model.fit_generator(train_generator, samples_per_epoch=len(train_samples), \
validation_data=validation_generator, nb_val_samples=len(validation_samples), \
nb_epoch=10, callbacks=[checkpointer], verbose=1)
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
"""
Explanation: Train the network
End of explanation
"""
from IPython.display import HTML
HTML("""
<video width="320" height="160" controls>
<source src="{0}">
</video>
""".format('run1.mp4'))
"""
Explanation: Run the network
I run the network on the pre-trained model with the following command:
python drive.py model1.h5 run1
And run the simulator with the Autonomous mode. The output video was recorded with the following command
python video.py run1 --fps 48
Output
Video with model without resizing images.
End of explanation
"""
HTML("""
<video width="320" height="160" controls>
<source src="{0}">
</video>
""".format('run2.mp4'))
"""
Explanation: Video with model with resizing images.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/mpi-m/cmip6/models/sandbox-3/atmos.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mpi-m', 'sandbox-3', 'atmos')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmos
MIP Era: CMIP6
Institute: MPI-M
Source ID: SANDBOX-3
Topic: Atmos
Sub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos.
Properties: 156 (127 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:17
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Overview
2. Key Properties --> Resolution
3. Key Properties --> Timestepping
4. Key Properties --> Orography
5. Grid --> Discretisation
6. Grid --> Discretisation --> Horizontal
7. Grid --> Discretisation --> Vertical
8. Dynamical Core
9. Dynamical Core --> Top Boundary
10. Dynamical Core --> Lateral Boundary
11. Dynamical Core --> Diffusion Horizontal
12. Dynamical Core --> Advection Tracers
13. Dynamical Core --> Advection Momentum
14. Radiation
15. Radiation --> Shortwave Radiation
16. Radiation --> Shortwave GHG
17. Radiation --> Shortwave Cloud Ice
18. Radiation --> Shortwave Cloud Liquid
19. Radiation --> Shortwave Cloud Inhomogeneity
20. Radiation --> Shortwave Aerosols
21. Radiation --> Shortwave Gases
22. Radiation --> Longwave Radiation
23. Radiation --> Longwave GHG
24. Radiation --> Longwave Cloud Ice
25. Radiation --> Longwave Cloud Liquid
26. Radiation --> Longwave Cloud Inhomogeneity
27. Radiation --> Longwave Aerosols
28. Radiation --> Longwave Gases
29. Turbulence Convection
30. Turbulence Convection --> Boundary Layer Turbulence
31. Turbulence Convection --> Deep Convection
32. Turbulence Convection --> Shallow Convection
33. Microphysics Precipitation
34. Microphysics Precipitation --> Large Scale Precipitation
35. Microphysics Precipitation --> Large Scale Cloud Microphysics
36. Cloud Scheme
37. Cloud Scheme --> Optical Cloud Properties
38. Cloud Scheme --> Sub Grid Scale Water Distribution
39. Cloud Scheme --> Sub Grid Scale Ice Distribution
40. Observation Simulation
41. Observation Simulation --> Isscp Attributes
42. Observation Simulation --> Cosp Attributes
43. Observation Simulation --> Radar Inputs
44. Observation Simulation --> Lidar Inputs
45. Gravity Waves
46. Gravity Waves --> Orographic Gravity Waves
47. Gravity Waves --> Non Orographic Gravity Waves
48. Solar
49. Solar --> Solar Pathways
50. Solar --> Solar Constant
51. Solar --> Orbital Parameters
52. Solar --> Insolation Ozone
53. Volcanos
54. Volcanos --> Volcanoes Treatment
1. Key Properties --> Overview
Top level key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of atmospheric model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the atmosphere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Resolution
Characteristics of the model resolution
2.1. Horizontal Resolution Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.4. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on the computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 2.5. High Top
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping
Characteristics of the atmosphere model time stepping
3.1. Timestep Dynamics
Is Required: TRUE Type: STRING Cardinality: 1.1
Timestep for the dynamics, e.g. 30 min.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep Shortwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the shortwave radiative transfer, e.g. 1.5 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestep Longwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the longwave radiative transfer, e.g. 3 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Orography
Characteristics of the model orography
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the orography.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
"""
Explanation: 4.2. Changes
Is Required: TRUE Type: ENUM Cardinality: 1.N
If the orography type is modified describe the time adaptation changes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid --> Discretisation
Atmosphere grid discretisation
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of grid discretisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Discretisation --> Horizontal
Atmosphere discretisation in the horizontal
6.1. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
"""
Explanation: 6.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.3. Scheme Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation function order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. Horizontal Pole
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal discretisation pole singularity treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.5. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Discretisation --> Vertical
Atmosphere discretisation in the vertical
7.1. Coordinate Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type of vertical coordinate system
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Dynamical Core
Characteristics of the dynamical core
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere dynamical core
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the dynamical core of the model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Timestepping Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Timestepping framework type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of the model prognostic variables
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Dynamical Core --> Top Boundary
Type of boundary layer at the top of the model
9.1. Top Boundary Condition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Top boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Top Heat
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary heat treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Top Wind
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary wind treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Dynamical Core --> Lateral Boundary
Type of lateral boundary condition (if the model is a regional model)
10.1. Condition
Is Required: FALSE Type: ENUM Cardinality: 0.1
Type of lateral boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Dynamical Core --> Diffusion Horizontal
Horizontal diffusion scheme
11.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Horizontal diffusion scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal diffusion scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Dynamical Core --> Advection Tracers
Tracer advection scheme
12.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Tracer advection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.3. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.4. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracer advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamical Core --> Advection Momentum
Momentum advection scheme
13.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Momentum advection schemes name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Scheme Staggering Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiation
Characteristics of the atmosphere radiation process
14.1. Aerosols
Is Required: TRUE Type: ENUM Cardinality: 1.N
Aerosols whose radiative effect is taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Radiation --> Shortwave Radiation
Properties of the shortwave radiation scheme
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of shortwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Shortwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Shortwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Shortwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiation --> Shortwave GHG
Representation of greenhouse gases in the shortwave radiation scheme
16.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiation --> Shortwave Cloud Ice
Shortwave radiative properties of ice crystals in clouds
17.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiation --> Shortwave Cloud Liquid
Shortwave radiative properties of liquid droplets in clouds
18.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiation --> Shortwave Cloud Inhomogeneity
Cloud inhomogeneity in the shortwave radiation scheme
19.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiation --> Shortwave Aerosols
Shortwave radiative properties of aerosols
20.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiation --> Shortwave Gases
Shortwave radiative properties of gases
21.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Radiation --> Longwave Radiation
Properties of the longwave radiation scheme
22.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of longwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the longwave radiation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Longwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Longwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 22.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Longwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiation --> Longwave GHG
Representation of greenhouse gases in the longwave radiation scheme
23.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiation --> Longwave Cloud Ice
Longwave radiative properties of ice crystals in clouds
24.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.2. Physical Reprenstation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiation --> Longwave Cloud Liquid
Longwave radiative properties of liquid droplets in clouds
25.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiation --> Longwave Cloud Inhomogeneity
Cloud inhomogeneity in the longwave radiation scheme
26.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiation --> Longwave Aerosols
Longwave radiative properties of aerosols
27.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiation --> Longwave Gases
Longwave radiative properties of gases
28.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Turbulence Convection
Atmosphere Convective Turbulence and Clouds
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere convection and turbulence
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Turbulence Convection --> Boundary Layer Turbulence
Properties of the boundary layer turbulence scheme
30.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Boundary layer turbulence scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Boundary layer turbulence scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Closure Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Boundary layer turbulence scheme closure order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Counter Gradient
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Uses boundary layer turbulence scheme counter gradient
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31. Turbulence Convection --> Deep Convection
Properties of the deep convection scheme
31.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Deep convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of deep convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Turbulence Convection --> Shallow Convection
Properties of the shallow convection scheme
32.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Shallow convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
shallow convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
"""
Explanation: 32.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
shallow convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33. Microphysics Precipitation
Large Scale Cloud Microphysics and Precipitation
33.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of large scale cloud microphysics and precipitation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Microphysics Precipitation --> Large Scale Precipitation
Properties of the large scale precipitation scheme
34.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the large scale precipitation parameterisation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34.2. Hydrometeors
Is Required: TRUE Type: ENUM Cardinality: 1.N
Precipitating hydrometeors taken into account in the large scale precipitation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Properties of the large scale cloud microphysics scheme
35.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the microphysics parameterisation scheme used for large scale clouds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Large scale cloud microphysics processes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Cloud Scheme
Characteristics of the cloud scheme
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the atmosphere cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
"""
Explanation: 36.3. Atmos Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Atmosphere components that are linked to the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.4. Uses Separate Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Different cloud schemes for the different types of clouds (convective, stratiform and boundary layer)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.6. Prognostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a prognostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.7. Diagnostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a diagnostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.8. Prognostic Variables
Is Required: FALSE Type: ENUM Cardinality: 0.N
List the prognostic variables used by the cloud scheme, if applicable.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 37. Cloud Scheme --> Optical Cloud Properties
Optical cloud properties
37.1. Cloud Overlap Method
Is Required: FALSE Type: ENUM Cardinality: 0.1
Method for taking into account overlapping of cloud layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Cloud Inhomogeneity
Is Required: FALSE Type: STRING Cardinality: 0.1
Method for taking into account cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Sub-grid scale water distribution
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale water distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 38.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale water distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 38.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale water distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 38.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale water distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Sub-grid scale ice distribution
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale ice distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 39.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale ice distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 39.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale ice distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 39.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale ice distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40. Observation Simulation
Characteristics of observation simulation
40.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of observation simulator characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Observation Simulation --> Isscp Attributes
ISSCP Characteristics
41.1. Top Height Estimation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator ISSCP top height estimation methodUo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. Top Height Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator ISSCP top height direction
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 42. Observation Simulation --> Cosp Attributes
CFMIP Observational Simulator Package attributes
42.1. Run Configuration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator COSP run configuration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.2. Number Of Grid Points
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of grid points
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.3. Number Of Sub Columns
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of sub-cloumns used to simulate sub-grid variability
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.4. Number Of Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of levels
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 43. Observation Simulation --> Radar Inputs
Characteristics of the cloud radar simulator
43.1. Frequency
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Cloud simulator radar frequency (Hz)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 43.2. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator radar type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.3. Gas Absorption
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses gas absorption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.4. Effective Radius
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses effective radius
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44. Observation Simulation --> Lidar Inputs
Characteristics of the cloud lidar simulator
44.1. Ice Types
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator lidar ice type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44.2. Overlap
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator lidar overlap
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 45. Gravity Waves
Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.
45.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of gravity wave parameterisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.2. Sponge Layer
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sponge layer in the upper levels in order to avoid gravity wave reflection at the top.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.3. Background
Is Required: TRUE Type: ENUM Cardinality: 1.1
Background wave distribution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.4. Subgrid Scale Orography
Is Required: TRUE Type: ENUM Cardinality: 1.N
Subgrid scale orography effects taken into account.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 46. Gravity Waves --> Orographic Gravity Waves
Gravity waves generated due to the presence of orography
46.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 47. Gravity Waves --> Non Orographic Gravity Waves
Gravity waves generated by non-orographic processes.
47.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the non-orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
"""
Explanation: 47.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 48. Solar
Top of atmosphere solar insolation characteristics
48.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of solar insolation of the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 49. Solar --> Solar Pathways
Pathways for solar forcing of the atmosphere
49.1. Pathways
Is Required: TRUE Type: ENUM Cardinality: 1.N
Pathways for the solar forcing of the atmosphere model domain
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 50. Solar --> Solar Constant
Solar constant and top of atmosphere insolation characteristics
50.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the solar constant.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 50.2. Fixed Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If the solar constant is fixed, enter the value of the solar constant (W m-2).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 50.3. Transient Characteristics
Is Required: TRUE Type: STRING Cardinality: 1.1
solar constant transient characteristics (W m-2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 51. Solar --> Orbital Parameters
Orbital parameters and top of atmosphere insolation characteristics
51.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 51.2. Fixed Reference Date
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Reference date for fixed orbital parameters (yyyy)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 51.3. Transient Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Description of transient orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 51.4. Computation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used for computing orbital parameters.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 52. Solar --> Insolation Ozone
Impact of solar insolation on stratospheric ozone
52.1. Solar Ozone Impact
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does top of atmosphere insolation impact on stratospheric ozone?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 53. Volcanos
Characteristics of the implementation of volcanoes
53.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the implementation of volcanic effects in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 54. Volcanos --> Volcanoes Treatment
Treatment of volcanoes in the atmosphere
54.1. Volcanoes Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How volcanic effects are modeled in the atmosphere.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.14/_downloads/plot_compute_mne_inverse.ipynb | bsd-3-clause | # Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
from mne.minimum_norm import apply_inverse, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori=None)
# Save result in stc files
stc.save('mne_%s_inverse' % method)
"""
Explanation: Compute MNE-dSPM inverse solution on evoked data
Compute dSPM inverse solution on MNE evoked dataset
and stores the solution in stc files for visualisation.
End of explanation
"""
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
# Plot brain in 3D with PySurfer if available
brain = stc.plot(hemi='rh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# use peak getter to move vizualization to the time point of the peak
vertno_max, time_idx = stc.get_peak(hemi='rh', time_as_index=True)
brain.set_data_time_index(time_idx)
# draw marker at maximum peaking vertex
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6)
brain.save_image('dSPM_map.png')
"""
Explanation: View activation time-series
End of explanation
"""
|
ddtm/dl-course | Seminar9/Seminar9_ru.ipynb | mit | low_RAM_mode = True
very_low_RAM = False #если у вас меньше 3GB оперативки, включите оба флага
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: Использование глубокого обучения в NLP
Смотрите в этой серии:
* Простые способы работать с текстом, bag of words
* Word embedding и... нет, это не word2vec
* Как сделать лучше? Текстовые свёрточные сети
* Совмещение нескольких различных источников данных
* Решение +- реальной задачи нейронками
За помощь в организации свёрточной части спасибо Ирине Гольцман
NLTK
Для работы этого семинара вам потреюуется nltk v3.2
Важно, что именно v3.2, чтобы правильно работал токенизатор
Устаовить/обновиться до неё можно командой
* sudo pip install --upgrade nltk==3.2
* Если у вас старый pip, предварительно нужно сделать sudo pip install --upgrade pip
Если у вас нет доступа к этой версии - просто убедитесь, что токены в token_counts включают русские слова.
Для людей со слабым ПК
Этот семинар можно выполнить, имея относительно скромную машину (<= 4Gb RAM)
Для этого существует специальный флаг "low_RAM_mode" - если он True, семинар работает в режиме экономии вашей памяти
Если у вас 8GB и больше - проблем с памятью возникнуть не должно
Если включить режим very_low_ram, расход мамяти будет ещё меньше, но вам может быть более трудно научить нейронку.
End of explanation
"""
if not low_RAM_mode:
# Если у вас много оперативки
df = pd.read_csv("avito_train.tsv",sep='\t')
else:
#Если у вас меньше 4gb оперативки
df = pd.read_csv("avito_train_1kk.tsv",sep='\t')
print df.shape, df.is_blocked.mean()
df[:5]
"""
Explanation: Познакомимся с данными
Бывший kaggle-конкурс про выявление нежелательного контента.
Описание конкурса есть тут - https://www.kaggle.com/c/avito-prohibited-content
Скачать
Если много RAM,
* Из данных конкурса (вкладка Data) нужно скачать avito_train.tsv и распаковать в папку с тетрадкой
Если мало RAM,
* Cкачайте прореженную выборку отсюда
* Пожатая https://yadi.sk/d/l0p4lameqw3W8
* Непожатая https://yadi.sk/d/I1v7mZ6Sqw2WK
Много разных признаков:
* 2 вида текста - заголовок и описание
* Много специальных фичей - цена, количество телефонов/ссылок/e-mail адресов
* Категория и субкатегория - как ни странно, категориальные фичи
* Аттрибуты - много категориальных признаков
Нужно предсказать всего 1 бинарный признак - есть ли в рекламе нежелательный контент.
* Под нежелательным контентом понимается криминал, прон, афера, треска и прочие любимые нами темы.
* Да, если присмотреться к заблокированным объявлениям, можно потерять аппетит и сон на пару дней.
* Однако профессия аналитика данных обязывает вас смотреть на данные.
* А кто сказал, что будет легко? Data Science - опасная профессия.
End of explanation
"""
print "Доля заблокированных объявлений",df.is_blocked.mean()
print "Всего объявлений:",len(df)
"""
Explanation:
End of explanation
"""
#downsample
< выдели подвыборку, в которой отрицательных примеров примерно столько же, сколько положительных>
df = <уменьшенная подвыборка>
print "Доля заблокированных объявлений:",df.is_blocked.mean()
print "Всего объявлений:",len(df)
assert df.is_blocked.mean() < 0.51
assert df.is_blocked.mean() > 0.49
assert len(df) <= 560000
print "All tests passed"
#прореживаем данные ещё в 2 раза, если памяти не хватает
if very_low_ram:
data = data[::2]
"""
Explanation: Сбалансируем выборку
Выборка смещена в сторону незаблокированных объявлений
4 миллиона объявлений и только 250 тысяч заблокированы.
Давайте просто выберем случайные 250 тысяч незаблокированных объявлений и сократим выборку до полумилиона.
В последствии можно испоьзовать более умные способы сбалансировать выборку
Если у вас слабый ПК и вы видите OutOfMemory, попробуйте уменьшить размер выборки до 100 000 примеров
Алсо если вы не хотите ждать чтения всех данных каждый раз - сохраните уменьшенную выборку и читайте её
End of explanation
"""
from nltk.tokenize import RegexpTokenizer
from collections import Counter,defaultdict
tokenizer = RegexpTokenizer(r"\w+")
#словарь для всех токенов
token_counts = Counter()
#все заголовки и описания
all_texts = np.hstack([df.description.values,df.title.values])
#считаем частоты слов
for s in all_texts:
if type(s) is not str:
continue
s = s.decode('utf8').lower()
tokens = tokenizer.tokenize(s)
for token in tokens:
token_counts[token] +=1
"""
Explanation: Токенизируем примеры
Сначала соберём словарь всех возможных слов.
Поставим каждому слову в соответствие целое число - его id
End of explanation
"""
#распределение частот слов - большинство слов встречаются очень редко - для нас это мусор
_=plt.hist(token_counts.values(),range=[0,50],bins=50)
#возьмём только те токены, которые встретились хотя бы 10 раз в обучающей выборке
#информацию о том, сколько раз встретился каждый токен, можно найти в словаре token_counts
min_count = 10
tokens = <список слов(ключей) из token_counts, которые встретились в выборке не менее min_count раз>
token_to_id = {t:i+1 for i,t in enumerate(tokens)}
null_token = "NULL"
token_to_id[null_token] = 0
print "Всего токенов:",len(token_to_id)
if len(token_to_id) < 30000:
print "Алярм! Мало токенов. Проверьте, есть ли в token_to_id юникодные символы, если нет - обновите nltk или возьмите другой токенизатор"
if len(token_to_id) < 1000000:
print "Алярм! Много токенов. Если вы знаете, что делаете - всё ок, если нет - возможно, вы слижком слабо обрезали токены по количеству"
"""
Explanation: Вырежем редкие токены
End of explanation
"""
def vectorize(strings, token_to_id, max_len=150):
token_matrix = []
for s in strings:
if type(s) is not str:
token_matrix.append([0]*max_len)
continue
s = s.decode('utf8').lower()
tokens = tokenizer.tokenize(s)
token_ids = map(lambda token: token_to_id.get(token,0), tokens)[:max_len]
token_ids += [0]*(max_len - len(token_ids))
token_matrix.append(token_ids)
return np.array(token_matrix)
desc_tokens = vectorize(df.description.values,token_to_id,max_len = 150)
title_tokens = vectorize(df.title.values,token_to_id,max_len = 15)
"""
Explanation: Заменим слова на их id
Для каждого описания установим максимальную длину.
* Если описание больше длины - обрежем, если меньше - дополним нулями.
* Таким образом, у нас получится матрица размера (число объявлений)x(максимальная длина)
* Элемент под индексами i,j - номер j-того слова i-того объявления
End of explanation
"""
print "Размер матрицы:",title_tokens.shape
for title, tokens in zip(df.title.values[:3],title_tokens[:3]):
print title,'->', tokens[:10],'...'
"""
Explanation: Пример формата данных
End of explanation
"""
#Возьмём числовые признаки
df_numerical_features = df[["phones_cnt","emails_cnt","urls_cnt","price"]]
#Возьмём one-hot encoding категорий товара.
#Для этого можно использовать DictVectorizer (или другой ваш любимый препроцессор)
from sklearn.feature_extraction import DictVectorizer
categories = []
for cat_str, subcat_str in df[["category","subcategory"]].values:
cat_dict = {"category":cat_str,"subcategory":subcat_str}
categories.append(cat_dict)
vectorizer = DictVectorizer(sparse=False)
cat_one_hot = vectorizer.fit_transform(categories)
cat_one_hot = pd.DataFrame(cat_one_hot,columns=vectorizer.feature_names_)
df_non_text = pd.merge(
df_numerical_features,cat_one_hot,on = np.arange(len(cat_one_hot))
)
del df_non_text["key_0"]
"""
Explanation: Как вы видите, всё довольно грязно. Посмотрим, сожрёт ли это нейронка
Нетекстовые признаки
Часть признаков не являются строками текста: цена, количество телефонов, категория товара.
Их можно обработать отдельно.
End of explanation
"""
#целевая переменная - есть заблокирован ли контент
target = df.is_blocked.values.astype('int32')
#закодированное название
title_tokens = title_tokens.astype('int32')
#закодированное описание
desc_tokens = desc_tokens.astype('int32')
#все нетекстовые признаки
df_non_text = df_non_text.astype('float32')
#поделим всё это на обучение и тест
from sklearn.cross_validation import train_test_split
data_tuple = train_test_split(title_tokens,desc_tokens,df_non_text.values,target)
title_tr,title_ts,desc_tr,desc_ts,nontext_tr,nontext_ts,target_tr,target_ts = data_tuple
"""
Explanation: Поделим данные на обучение и тест
End of explanation
"""
save_prepared_data = True #сохранить
read_prepared_data = False #cчитать
#за 1 раз данные можно либо записать, либо прочитать, но не и то и другое вместе
assert not (save_prepared_data and read_prepared_data)
if save_prepared_data:
print "Сохраняем подготовленные данные... (может занять до 3 минут)"
import pickle
with open("preprocessed_data.pcl",'w') as fout:
pickle.dump(data_tuple,fout)
with open("token_to_id.pcl",'w') as fout:
pickle.dump(token_to_id,fout)
print "готово"
elif read_prepared_data:
print "Читаем сохранённые данные..."
import pickle
with open("preprocessed_data.pcl",'r') as fin:
data_tuple = pickle.load(fin)
title_tr,title_ts,desc_tr,desc_ts,nontext_tr,nontext_ts,target_tr,target_ts = data_tuple
with open("token_to_id.pcl",'r') as fin:
token_to_id = pickle.load(fin)
#повторно импортируем библиотеки, чтобы было удобно перезапускать тетрадку с этой клетки
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
print "готово"
"""
Explanation: Сохраним данные [опционально]
В этот момент вы можете сохранить все НУЖНЫЕ данные на диск и перезапусатить тетрадку, после чего считать их - чтобы выкинуть всё ненужное.
рекомендуется, если у вас мало памяти
Для этого нужно один раз выполнить эту клетку с save_prepared_data=True. После этого можно начинать тетрадку с ЭТОЙ табы в режиме read_prepared_data=True
End of explanation
"""
#загрузим библиотеки
import lasagne
from theano import tensor as T
import theano
#3 входа и 1 выход
title_token_ids = T.matrix("title_token_ids",dtype='int32')
desc_token_ids = T.matrix("desc_token_ids",dtype='int32')
categories = T.matrix("categories",dtype='float32')
target_y = T.ivector("is_blocked")
"""
Explanation: Поучим нейронку
Поскольку у нас есть несколько источников данных, наша нейронная сеть будет немного отличаться от тех, что вы тренировали раньше.
Отдельный вход для заголовка
свёртка + global max pool или RNN
Отдельный вход для описания
свёртка + global max pool или RNN
Отдельный вход для категориальных признаков
обычные полносвязные слои или какие-нибудь трюки
Всё это нужно как-то смешать - например, сконкатенировать
Выход - обычный двухклассовый выход
1 сигмоидальный нейрон и binary_crossentropy
2 нейрона с softmax и categorical_crossentropy - то же самое, что 1 сигмоидальный
1 нейрон без нелинейности (lambda x: x) и hinge loss
End of explanation
"""
title_inp = lasagne.layers.InputLayer((None,title_tr.shape[1]),input_var=title_token_ids)
descr_inp = lasagne.layers.InputLayer((None,desc_tr.shape[1]),input_var=desc_token_ids)
cat_inp = lasagne.layers.InputLayer((None,nontext_tr.shape[1]), input_var=categories)
# Описание
descr_nn = lasagne.layers.EmbeddingLayer(descr_inp,input_size=len(token_to_id)+1,output_size=128)
#поменять порядок осей с [batch, time, unit] на [batch,unit,time], чтобы свёртки шли по оси времени, а не по нейронам
descr_nn = lasagne.layers.DimshuffleLayer(descr_nn, [0,2,1])
# 1D свёртка на ваш вкус
descr_nn = lasagne.layers.Conv1DLayer(descr_nn,num_filters=?,filter_size=?)
# максимум по времени для каждого нейрона
descr_nn = lasagne.layers.GlobalPoolLayer(descr_nn,pool_function=T.max)
#А ещё можно делать несколько параллельных свёрток разного размера или стандартный пайплайн
#1dconv -> 1d max pool ->1dconv и в конце global pool
# Заголовок
title_nn = <текстовая свёрточная сеть для заголовков (title_inp)>
# Нетекстовые признаки
cat_nn = <простая полносвязная сеть для нетекстовых признаков (cat_inp)>
nn = <объединение всех 3 сетей в одну (например lasagne.layers.concat) >
nn = lasagne.layers.DenseLayer(nn,1024)
nn = lasagne.layers.DropoutLayer(nn,p=0.05)
nn = lasagne.layers.DenseLayer(nn,1,nonlinearity=lasagne.nonlinearities.linear)
"""
Explanation: Архитектура нейронной сети
End of explanation
"""
#Все обучаемые параметры сети
weights = lasagne.layers.get_all_params(nn,trainable=True)
#Обычное предсказание нейронки
prediction = lasagne.layers.get_output(nn)[:,0]
#функция потерь для prediction
loss = lasagne.objectives.binary_hinge_loss(prediction,target_y,delta = 1.0).mean()
#Шаг оптимизации весов
updates = <Ваш любимый метод оптимизации весов>
"""
Explanation: Целевая функция и обновления весов
Делаем всё стандартно:
получаем предсказание
считаем функцию потерь
вычисляем обновления весов
компилируем итерацию обучения и оценки весов
Hinge loss
$ L_i = \max(0, \delta - t_i p_i) $
Важный параметр - delta - насколько глубоко пример должен быть в правильном классе, чтобы перестать нас волновать
В описании функции в документации может быть что-то про ограничения на +-1 - не верьте этому - главное, чтобы в функции по умолчанию стоял флаг binary = True
End of explanation
"""
#Предсказание нейронки без учёта dropout и прочего шума - если он есть
det_prediction = lasagne.layers.get_output(nn,deterministic=True)[:,0]
#функция потерь для det_prediction
det_loss = lasagne.objectives.binary_hinge_loss(det_prediction,target_y,delta = 1.0).mean()
"""
Explanation: Чтобы оценивать качество сети, в которой есть элемент случайности
Dropout, например,
Нужно отдельно вычислить ошибку для случая, когда dropout выключен (deterministic = True)
К слову, неплохо бы убедиться, что droput нам вообще нужен
End of explanation
"""
train_fun = theano.function([desc_token_ids,title_token_ids,categories,target_y],[loss,prediction],updates = updates)
eval_fun = theano.function([desc_token_ids,title_token_ids,categories,target_y],[det_loss,det_prediction])
"""
Explanation: Скомпилируем функции обучения и оценки качества
End of explanation
"""
#average precision at K
from oracle import APatK, score
# наш старый знакомый - итератор по корзинкам - теперь умеет работать с произвольным числом каналов (название, описание, категории, таргет)
def iterate_minibatches(*arrays,**kwargs):
batchsize=kwargs.get("batchsize",100)
shuffle = kwargs.get("shuffle",True)
if shuffle:
indices = np.arange(len(arrays[0]))
np.random.shuffle(indices)
for start_idx in range(0, len(arrays[0]) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield [arr[excerpt] for arr in arrays]
"""
Explanation: Главный цикл обучения
Всё как обычно - в цикле по минибатчам запускаем функцию обновления весов.
Поскольку выборка огромна, а чашки чая хватает в среднем на 100к примеров, будем на каждой эпохе пробегать только часть примеров.
End of explanation
"""
from sklearn.metrics import roc_auc_score, accuracy_score
n_epochs = 100
batch_size = 100
minibatches_per_epoch = 100
for i in range(n_epochs):
#training
epoch_y_true = []
epoch_y_pred = []
b_c = b_loss = 0
for j, (b_desc,b_title,b_cat, b_y) in enumerate(
iterate_minibatches(desc_tr,title_tr,nontext_tr,target_tr,batchsize=batch_size,shuffle=True)):
if j > minibatches_per_epoch:break
loss,pred_probas = train_fun(b_desc,b_title,b_cat,b_y)
b_loss += loss
b_c +=1
epoch_y_true.append(b_y)
epoch_y_pred.append(pred_probas)
epoch_y_true = np.concatenate(epoch_y_true)
epoch_y_pred = np.concatenate(epoch_y_pred)
print "Train:"
print '\tloss:',b_loss/b_c
print '\tacc:',accuracy_score(epoch_y_true,epoch_y_pred>0.)
print '\tauc:',roc_auc_score(epoch_y_true,epoch_y_pred)
print '\tap@k:',APatK(epoch_y_true,epoch_y_pred,K = int(len(epoch_y_pred)*0.025)+1)
#evaluation
epoch_y_true = []
epoch_y_pred = []
b_c = b_loss = 0
for j, (b_desc,b_title,b_cat, b_y) in enumerate(
iterate_minibatches(desc_ts,title_ts,nontext_tr,target_ts,batchsize=batch_size,shuffle=True)):
if j > minibatches_per_epoch: break
loss,pred_probas = eval_fun(b_desc,b_title,b_cat,b_y)
b_loss += loss
b_c +=1
epoch_y_true.append(b_y)
epoch_y_pred.append(pred_probas)
epoch_y_true = np.concatenate(epoch_y_true)
epoch_y_pred = np.concatenate(epoch_y_pred)
print "Val:"
print '\tloss:',b_loss/b_c
print '\tacc:',accuracy_score(epoch_y_true,epoch_y_pred>0.)
print '\tauc:',roc_auc_score(epoch_y_true,epoch_y_pred)
print '\tap@k:',APatK(epoch_y_true,epoch_y_pred,K = int(len(epoch_y_pred)*0.025)+1)
print "Если ты видишь это сообщение, самое время сделать резервную копию ноутбука. \nНет, честно, здесь очень легко всё сломать"
"""
Explanation: Что можно покрутить?
batch_size - сколько примеров обрабатывается за 1 раз
Чем больше, тем оптимизация стабильнее, но тем и медленнее на начальном этапе
Возможно имеет смысл увеличивать этот параметр на поздних этапах обучения
minibatches_per_epoch - количество минибатчей, после которых эпоха принудительно завершается
Не влияет на обучение - при малых значениях просто будет чаще печататься отчёт
Ставить 10 или меньше имеет смысл только для того, чтобы убедиться, что ваша сеть не упала с ошибкой
n_epochs - сколько всего эпох сеть будет учиться
Никто не отменял n_epochs = 10**10 и остановку процесса вручную по возвращению с дачи/из похода.
Tips:
Если вы выставили небольшой minibatches_per_epoch, качество сети может сильно скакать возле 0.5 на первых итерациях, пока сеть почти ничему не научилась.
На первых этапах попытки стоит сравнивать в первую очередь по AUC, как по самой стабильной метрике.
Метрика Average Precision at top 2.5% (APatK) - сама по себе очень нестабильная на маленьких выборках, поэтому её имеет смысл оценивать на на всех примерах (см. код ниже). Для менее, чем 10000 примеров она вовсе неинформативна.
Для сравнения методов оптимизации и регуляризаторов будет очень полезно собирать метрики качества после каждой итерации и строить график по ним после обучения
Как только вы убедились, что сеть не упала - имеет смысл дать ей покрутиться - на стандартном ноутбуке хотя бы пару часов.
End of explanation
"""
#evaluation
epoch_y_true = []
epoch_y_pred = []
b_c = b_loss = 0
for j, (b_desc,b_title,b_cat, b_y) in enumerate(
iterate_minibatches(desc_ts,title_ts,nontext_tr,target_ts,batchsize=batch_size,shuffle=True)):
loss,pred_probas = eval_fun(b_desc,b_title,b_cat,b_y)
b_loss += loss
b_c +=1
epoch_y_true.append(b_y)
epoch_y_pred.append(pred_probas)
epoch_y_true = np.concatenate(epoch_y_true)
epoch_y_pred = np.concatenate(epoch_y_pred)
final_accuracy = accuracy_score(epoch_y_true,epoch_y_pred>0)
final_auc = roc_auc_score(epoch_y_true,epoch_y_pred)
final_apatk = APatK(epoch_y_true,epoch_y_pred,K = int(len(epoch_y_pred)*0.025)+1)
print "Scores:"
print '\tloss:',b_loss/b_c
print '\tacc:',final_accuracy
print '\tauc:',final_auc
print '\tap@k:',final_apatk
score(final_accuracy,final_auc,final_apatk)
"""
Explanation: Final evaluation
Оценим качество модели по всей тестовой выборке.
End of explanation
"""
|
dipanjank/ml | data_analysis/computer_hardware_uci.ipynb | gpl-3.0 | import numpy as np
import pandas as pd
%pylab inline
pylab.style.use('ggplot')
import seaborn as sns
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/cpu-performance/machine.data'
data = pd.read_csv(url, header=None)
data.head()
"""
Explanation: Computer Hardware Dataset Analysis - UCI
This is a regression analysis of the <a href="https://archive.ics.uci.edu/ml/datasets/Computer+Hardware">UCI Computer Analysis Dataset.</a>
End of explanation
"""
data.columns = ['VENDOR', 'MODEL', 'MYCT', 'MMIN',
'MMAX', 'CACH', 'CHMIN', 'CHMAX', 'PRP', 'ERP']
# Drop the ERP column - this is an estimate
data = data.drop('ERP', axis=1)
data.VENDOR.value_counts().plot(kind='barh')
# Drop the model column as well
data = data.drop('MODEL', axis=1)
"""
Explanation: Attribute Information
Attribute Information:
vendor name: 30
(adviser, amdahl,apollo, basf, bti, burroughs, c.r.d, cambex, cdc, dec,
dg, formation, four-phase, gould, honeywell, hp, ibm, ipl, magnuson,
microdata, nas, ncr, nixdorf, perkin-elmer, prime, siemens, sperry,
sratus, wang)
Model Name: many unique symbols
MYCT: machine cycle time in nanoseconds (integer)
MMIN: minimum main memory in kilobytes (integer)
MMAX: maximum main memory in kilobytes (integer)
CACH: cache memory in kilobytes (integer)
CHMIN: minimum channels in units (integer)
CHMAX: maximum channels in units (integer)
PRP: published relative performance (integer)
ERP: estimated relative performance from the original article (integer)
End of explanation
"""
feature_names = data.columns.drop('VENDOR')
for fname in feature_names:
_ = pylab.figure()
_ = data.loc[:, fname].plot(kind='hist', title=fname)
"""
Explanation: Univariate Analysis
End of explanation
"""
_, axes = pylab.subplots(6, figsize=(10, 21))
n_columns = data.columns.drop(['VENDOR', 'PRP'])
for i, fname in enumerate(n_columns):
sns.regplot(x=fname, y='PRP', data=data, ax=axes[i])
pylab.tight_layout()
"""
Explanation: Bivariate Analysis
End of explanation
"""
corrs = data.loc[:, n_columns].corrwith(data.loc[:, 'PRP'])
corrs.plot(kind='barh')
"""
Explanation: Correlations with Target Column
End of explanation
"""
f_corrs = data.loc[:, n_columns].corr()
sns.heatmap(f_corrs, annot=True)
"""
Explanation: Feature Correlations
End of explanation
"""
import statsmodels.formula.api as sm
model = sm.ols(formula='PRP ~ MMAX + MMIN + CACH + CHMAX', data=data)
result = model.fit()
result.summary()
"""
Explanation: The Regression Model
End of explanation
"""
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
n_splits = 3
fold = KFold(n_splits=n_splits, shuffle=True)
scores = []
for train_idx, test_idx in fold.split(data):
model = sm.ols(formula='PRP ~ MMAX + MMIN + CACH + CHMAX', data=data.loc[train_idx])
result = model.fit()
test_features = data.loc[test_idx].drop('PRP', axis=1)
predictions = result.predict(test_features)
actual = data.loc[test_idx, 'PRP']
score = r2_score(actual, predictions)
scores.append(score)
scores = pd.Series(scores)
scores.plot(kind='bar')
"""
Explanation: Cross Validation
End of explanation
"""
|
ercanezin/ce888labs | lab3/facebook_regression.ipynb | gpl-3.0 | df = pd.read_csv("./dataset_Facebook.csv", delimiter = ";")
features = ["Category",
"Page total likes",
"Type",
"Post Month",
"Post Hour",
"Post Weekday",
"Paid"]
df[features].head()
outcomes= ["Lifetime Post Total Reach",
"Lifetime Post Total Impressions",
"Lifetime Engaged Users",
"Lifetime Post Consumers",
"Lifetime Post Consumptions",
"Lifetime Post Impressions by people who have liked your Page",
"Lifetime Post reach by people who like your Page",
"Lifetime People who have liked your Page and engaged with your post",
"comment",
"like",
"share",
"Total Interactions"]
df[outcomes].head()
# convert a string variable to a categorical one
#types = list(set(df["Type"]))
#to_categorical = {types[i]:i for i in range(len(types))}
#df["Type"] = df["Type"].apply(lambda x: to_categorical[x])
df[["Type"]] = df[["Type"]].apply(LabelEncoder().fit_transform)
df.head()
"""
Explanation: We have loaded the necessary libraries above
Now let's load the data
End of explanation
"""
df = df.dropna()
#df = df[df.apply(lambda x: np.abs(x - x.mean()) / x.std() < 3).all(axis=1)]
outcomes_of_interest = ["Lifetime Post Consumers", "like"]
X_df = df[features].copy()
y_df = df[outcomes_of_interest].copy()
cat_features = ["Category",
"Type",
"Paid"]
X_df = pd.get_dummies(X_df, columns = cat_features)
print X_df.head()[["Category_1", "Category_2","Category_3"]].to_latex()
X = X_df.values
y = y_df.values.T[0]
y = (y-y.min())/(y.max() - y.min())
# # # import seaborn as sns
# y_df['id'] = range(1, len(df) + 1)
#y_df.head()
# sns_plot = sns.lmplot(x="id", y= attribute, data=y_df, fit_reg=False, aspect = 2)
# sns_plot.savefig("scaterplot_lpc.png",bbox_inches='tight')
# sns_plot.savefig("scaterplot_lpc.pdf",bbox_inches='tight')
# sns_plot = sns.jointplot(x="Lifetime Post Consumers", y="like", data=y_df, ratio = 2)
# sns_plot.savefig("joint_plot.png",bbox_inches='tight')
# sns_plot.savefig("joint_plot.pdf",bbox_inches='tight')
# sns.distplot(y, kde=False, rug=True)
# sns_plot.savefig("histogram_lpc.png",bbox_inches='tight')
# sns_plot.savefig("histogram_lpc.pdf",bbox_inches='tight')
n_test = 100
n_repeat = 1000
#estimator = DecisionTreeRegressor()
estimator = RandomForestRegressor()
#estimator = BayesianRidge(normalize = True)
# Compute predictions
y_predicts = np.zeros((n_repeat, len(X)))
#stdy = y/y.max()
for i in range(n_repeat):
sample = np.random.choice(range(len(X)),replace = True, size = len(X))
train_ids = sample[:-n_test]
test_ids = sample[-n_test:]
test_ids = np.setdiff1d(test_ids, train_ids)
if(len(test_ids) == 0 ):
continue
X_train,y_train = X[train_ids], y[train_ids]
X_test, y_test = X[test_ids], y[test_ids]
estimator.fit(X_train, y_train)
y_predict = estimator.predict(X_test)
y_predicts[i,test_ids] = y_predict
y_bias = (y - np.mean(y_predicts, axis=0)) **2
y_error = ((y - y_predicts) **2).mean()
y_var = np.var(y_predicts, axis=0, ddof = 1)
print np.mean(y_bias) + np.mean(y_var)
clf_type = "Decision tree"
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
"+ {3:.4f} (var)".format(clf_type,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var)))
print("{0}: {1:.4f} ((bias^2) + (var)) = {2:.4f} (bias^2) "
"+ {3:.4f} (var)".format(clf_type,
np.mean(y_bias) + np.mean(y_var),
np.mean(y_bias),
np.mean(y_var)))
clf = RandomForestRegressor(n_estimators = 1000,max_depth = 2)
from sklearn.linear_model import SGDRegressor, BayesianRidge
clf = BayesianRidge(normalize = True)
dummy_clf = DummyRegressor()
scores = cross_val_score(clf, X, y, cv=10,scoring = make_scorer(mse))
dummy_scores = cross_val_score(dummy_clf, X, y, cv=10, scoring = make_scorer(mse))
print("MSE: %0.8f (+/- %0.8f)" % (scores.mean(), scores.std()))
print("Dummy MSE: %0.8f (+/- %0.8f)" % (dummy_scores.mean(), dummy_scores.std()))
#print clf
"""
Explanation: Now let's prepare the data by cleaning it up and choosing the relevant column we would like to predict
We can now use the bootstrap to find an approximation of the bias and the variance
End of explanation
"""
#clf = RandomForestRegressor(n_estimators = 500, criterion = "mse")
#clf = DecisionTreeRegressor()
# from sklearn.ensemble import BaggingRegressor
# from sklearn.ensemble import AdaBoostRegressor
clf =BayesianRidge()
print X.shape
# clf = AdaBoostRegressor(DecisionTreeRegressor(),n_estimators = 1000)#
print X.shape, y.shape
stdy = y
clf.fit(X,stdy)
print mse(stdy,clf.predict(X))
"""
Explanation: Now let's train the regressor on the whole dataset
End of explanation
"""
|
rajul/tvb-library | tvb/simulator/demos/region_deterministic_larterbreakspear.ipynb | gpl-2.0 | # Third party python libraries
import numpy
# Try and import from "The Virtual Brain"
from tvb.simulator.lab import *
from tvb.datatypes.time_series import TimeSeriesRegion
import tvb.analyzers.fmri_balloon as bold
from tvb.simulator.plot import timeseries_interactive as timeseries_interactive
"""
Explanation: Explore LarterBreakspear model.
Run time: 20 min (workstation circa 2012 Intel Xeon W3520@2.67Ghz)
Memory requirement: ~300 MB
Storage requirement: ~150MB
NOTE: stats were made for a simulation using the 998 region Hagmann
connectivity matrix.
End of explanation
"""
LOG.info("Configuring...")
#Initialise a Model, Coupling, and Connectivity.
lb = models.LarterBreakspear(QV_max=1.0, QZ_max=1.0,
d_V=0.65, d_Z=0.65,
aee=0.36, ani=0.4, ane=1.0, C=0.1)
lb.variables_of_interest = ["V", "W", "Z"]
white_matter = connectivity.Connectivity(load_default=True)
white_matter.speed = numpy.array([7.0])
white_matter_coupling = coupling.HyperbolicTangent(a=0.5*lb.QV_max,
midpoint=lb.VT,
sigma=lb.d_V)
#Initialise an Integrator
heunint = integrators.HeunDeterministic(dt=0.2)
#Initialise some Monitors with period in physical time
mon_tavg = monitors.TemporalAverage(period=2.)
mon_bold = monitors.Bold(period=2000.)
#Bundle them
what_to_watch = (mon_bold, mon_tavg)
#Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim = simulator.Simulator(model = lb,
connectivity = white_matter,
coupling = white_matter_coupling,
integrator = heunint,
monitors = what_to_watch)
sim.configure()
LOG.info("Starting simulation...")
#Perform the simulation
bold_data, bold_time = [], []
tavg_data, tavg_time = [], []
for raw, tavg in sim(simulation_length=480000):
if not raw is None:
bold_time.append(raw[0])
bold_data.append(raw[1])
if not tavg is None:
tavg_time.append(tavg[0])
tavg_data.append(tavg[1])
LOG.info("Finished simulation.")
"""
Explanation: Perform the simulation
End of explanation
"""
#Make the lists numpy.arrays for easier use.
LOG.info("Converting result to array...")
TAVG_TIME = numpy.array(tavg_time)
BOLD_TIME = numpy.array(bold_time)
BOLD = numpy.array(bold_data)
TAVG = numpy.array(tavg_data)
#Create TimeSeries instance
tsr = TimeSeriesRegion(data = TAVG,
time = TAVG_TIME,
sample_period = 2.)
tsr.configure()
#Create and run the monitor/analyser
bold_model = bold.BalloonModel(time_series = tsr)
bold_data = bold_model.evaluate()
bold_tsr = TimeSeriesRegion(connectivity = white_matter,
data = bold_data.data,
time = bold_data.time)
#Prutty puctures...
tsi = timeseries_interactive.TimeSeriesInteractive(time_series = bold_tsr)
tsi.configure()
tsi.show()
"""
Explanation: Plot pretty pictures of what we just did
End of explanation
"""
|
facaiy/book_notes | machine_learning/tree/decision_tree/demo.ipynb | cc0-1.0 | from sklearn.datasets import load_iris
data = load_iris()
# 准备特征数据
X = pd.DataFrame(data.data,
columns=["sepal_length", "sepal_width", "petal_length", "petal_width"])
X.head(2)
# 准备标签数据
y = pd.DataFrame(data.target, columns=['target'])
y.replace(to_replace=range(3), value=data.target_names, inplace=True)
y.head(3)
# 组建样本 [特征,标签]
samples = pd.concat([X, y], axis=1) #, keys=["x", "y"])
samples.head(3)
"""
Explanation: 决策树简介和 Python 实现
参考:
Building a decision tree from scratch - a beginner tutorial
9.2 Tree-Based Methods - The Elements of Statistical Learning
Classification and Regression Trees (CART) Theory and Applications
0. 基本介绍
本文主要是参照 Tree-Based Methods - The Elements of Statistical Learning 来实现一个简化版范例,其算法是 CART。
决策树的思想本身非常朴素,关于它的基本介绍在网上已经非常丰富,比如:
算法杂货铺——分类算法之决策树(Decision tree)
其主要问题是在每次决策时找到一个分割点,让生成的子集尽可能地纯净。这里涉及到四个问题:
如何分割样本?
如何评价子集的纯净度?
如何找到单个最佳的分割点,其子集最为纯净?
如何找到最佳的分割点序列,其最终分割子集总体最为纯净?
接下来,围绕上述问题,一一概要说明,并加以演示。
加载数据
古话说,「三军未动,粮草先行」。
我们先加载演示数据,使用的是 sklearn 自带的测试用例。
End of explanation
"""
def splitter(samples, feature, threshold):
# 按特征 f 和阈值 t 分割样本
left_nodes = samples.query("{f} < {t}".format(f=feature, t=threshold))
right_nodes = samples.query("{f} >= {t}".format(f=feature, t=threshold))
return {"left_nodes": left_nodes, "right_nodes": right_nodes}
split = splitter(samples, "sepal_length", 5)
# 左子集
x_l = split["left_nodes"].loc[:, "target"].value_counts()
x_l
# 右子集
x_r = split["right_nodes"].loc[:, "target"].value_counts()
x_r
"""
Explanation: 1.0 如何分割样本
决策树的分割方法是取一个特征 $f$ 和阈值 $t$,以此为界将样本 $X$ 拆分为两个子集 $X_l, X_r$。其数学表达形同:
\begin{align}
X = \begin{cases}
X_l, \ \text{if } X[f] < t \
X_r, \ \text{if } X[f] \geq t
\end{cases}
\end{align}
End of explanation
"""
def calc_class_proportion(node):
# 计算各标签在集合中的占比
y = node["target"]
return y.value_counts() / y.count()
calc_class_proportion(split["left_nodes"])
calc_class_proportion(split["right_nodes"])
"""
Explanation: 2. 如何评价子集的纯净度?
从常理来说,我们希望分割子集尽可能地纯净,最好是单个子集就只含有一类标签,从而保证决策结果精准。
那么什么样的评价函数,可以用来度量各子集的纯净度呢?
以刚才计算结果为例, $x_l$ 主要标签是 setosa,非常纯净,而 $x_r$ 则三种标签势均力敌,非常混杂。所以思路是,若一种标签在子集中占比非常大,则此子集就较纯净;若各标签占比差别不大,就较为混杂。
常用的评价函数正是计算各标签 $c_k$ 在子集中的占比 $p_k = c_k / \sum (c_k)$,并通过组合 $p_k$ 来描述占比集中或分散。
End of explanation
"""
def misclassification_error(node):
p_mk = calc_class_proportion(node)
return 1 - p_mk.max()
misclassification_error(split["left_nodes"])
misclassification_error(split["right_nodes"])
"""
Explanation: 主要的评价函数有三种,它们评价的是集合的不纯度(值越大,集合越混杂)。
先做些数学定义以便于描述:
假设对于集合 $m$ 有 $N_m$ 个样本,可分割成 $R_m$ 子集。
若总的标签类别有 $K$ 种,则标签 $k$ 在此集合中的占比为:
\begin{equation}
\hat{p}{m k} = \frac{1}{N_m} \displaystyle \sum{x_i \in R_m} I(y_i = k)
\end{equation}
且令标签 $k$ 是占比最大的标签,即 $k(m) = \operatorname{arg max}k \hat{p}{m k}$.
1. Misclassification error
我们一般把集合的分类结果定义为占比最大的标签,那么落在此集合中的其它标签就是误分类。其比率是 $1 - \hat{p}_{m k}(m)$.
End of explanation
"""
binary_class = pd.Series(np.arange(0, 1.01, 0.01)).to_frame(name="p")
binary_class["1-p"] = 1 - binary_class["p"]
binary_class.head(3)
"""
Explanation: 对于二分类问题,
End of explanation
"""
binary_class["misclass"] = binary_class.apply(lambda x: 1 - x.max(), axis=1)
binary_class.plot(x="p", y="misclass")
"""
Explanation: 误分类率和占比 $p$ 的关系可划图为:
End of explanation
"""
def gini_index(node):
p_mk = calc_class_proportion(node)
return (p_mk * (1 - p_mk)).sum()
gini_index(split["left_nodes"])
gini_index(split["right_nodes"])
"""
Explanation: 当 $p=0.5$,两种标签各占一半,不纯度最高;当 $p=0$ 或 $p=1$, 只含有其中一种标签时,不纯度最低。
2. Gini index
这里的基尼系数并非是经济上测量分配公平的指标。
它的思路是从集合中随机抽取元素 $a \in K_p$,再以 $K_p$ 在集合中的分布为参考随机给 $a$ 分配标签,那么误分配率就是基尼系数。
具体到决策树的节点 $m$ 上,标签 $k_i$ 的占比为 $p_{k_i m}$。则抽中属于标签 $k_i$ 的元素概率是 $p_{k_i m}$,误分配到其它标签的概率是 $\sum_{k' \neq k_i} p_{k_i m} p_{k' m}$。对于整个集合的标签则是:
\begin{equation}
G(m) = \displaystyle \sum_{k \neq k'} p_{k m} p_{k' m} \, \overset{乘法分配律}{=} \sum_{k = 1}^{K} p_{k m} (1 - p_{k m})
\end{equation}
End of explanation
"""
binary_class["gini"] = (binary_class["p"] * binary_class["1-p"] * 2)
binary_class.plot(x="p", y="gini")
"""
Explanation: 在二分类中,基尼系数和占比 $p$ 的关系可划图为:
End of explanation
"""
def cross_entropy(node):
p_mk = calc_class_proportion(node)
return - (p_mk * p_mk.apply(np.log)).sum()
cross_entropy(split["left_nodes"])
cross_entropy(split["right_nodes"])
"""
Explanation: 3. Cross-entropy
ref:
Qualitively what is Cross Entropy
这个损失函数的思路来源于信息论:若某事件的发生概率是 $p$,则需至少 $\log_2 (1/p)$ 位编码。那么对于所有事件,其最优编码的平均字长为 $\sum_i p_i \log_2 (1 / p_i)$。
借用其思路,对于节点来说,其内容越混杂,就需要越多字长来区分。所以这里 cross-entropy 定义为:
\begin{equation}
C(m) = \displaystyle \sum_{k=1}^K p_{m k} \log (1 / p_{m k}) \, = - \sum_{k=1}^K p_{m k} \log p_{m k}
\end{equation}
End of explanation
"""
x = binary_class[["p", "1-p"]]
binary_class["cross_entropy"] = -(x * np.log(x)).sum(axis=1)
binary_class.plot(x="p", y="cross_entropy")
"""
Explanation: 在二分类中,cross-entropy 和占比 $p$ 的关系可划图为:
End of explanation
"""
binary_class.plot(x="p", y=["misclass", "gini", "cross_entropy"])
"""
Explanation: 在二分类问题中,三种评价函数的比较如图:
End of explanation
"""
binary_class["cross_entropy_scaled"] = binary_class["cross_entropy"] / binary_class["cross_entropy"].max() * 0.5
binary_class.plot(x="p", y=["misclass", "gini", "cross_entropy_scaled"], ylim=[0,0.55])
"""
Explanation: 为了便于比较,我们将 cross_entropy 也放缩到 $(0.5, 0.5)$。
End of explanation
"""
def calc_impurity_measure(node, feathure, threshold, measure, min_nodes=5):
child = splitter(node, feathure, threshold)
left = child["left_nodes"]
right = child["right_nodes"]
if left.shape[0] <= min_nodes or right.shape[0] <= min_nodes:
return 0
impurity = pd.DataFrame([],
columns=["score", "rate"],
index=[])
impurity.loc["all"] = [measure(node), node.shape[0]]
impurity.loc["left"] = [-measure(left), left.shape[0]]
impurity.loc["right"] = [-measure(right), right.shape[0]]
impurity["rate"] /= impurity.at["all", "rate"]
logger.info(impurity)
return (impurity["score"] * impurity["rate"]).sum()
calc_impurity_measure(samples, "sepal_length", 5, gini_index)
calc_impurity_measure(samples, "sepal_length", 1, gini_index)
"""
Explanation: 可以看到,识分类率在整个区间是均一的,而 cross_entropy 越靠近纯净,其值变化越剧烈。所以 cross_entropy 对纯净更敏感的特性,有利于让结果子集更纯净,其使用相对较多。
3. 如何找到单个最佳的分割点,其子集最为纯净?
单个最佳分割点,涉及三个问题:
对于单次分割,分割前和分割后,集合的纯净度提升了多少?
给定一个特征,纯净度提升最大的阈值是多少?
对于多个特征,哪一个特征的最佳阈值对纯净度提升最大?
3.1 对于单次分割,分割前和分割后,集合的纯净度提升了多少?
令测量不纯度的函数为 $G$,
对一个节点 $m$ 来说,若其按分割方法 $f$ 得到子集 $m_l$ 和 $m_r$,则总的不纯度减少量为:
\begin{equation}
G(m) - G(m_l) - G(m_r)
\end{equation}
End of explanation
"""
def find_best_threshold(node, feature, measure):
threshold_candidates = node[feature].quantile(np.arange(0, 1, 0.2))
res = pd.Series([], name=feature)
for t in threshold_candidates:
res[t] = calc_impurity_measure(node, feature, t, measure)
logger.info(res)
if res.max() == 0:
return None
else:
return res.argmax()
find_best_threshold(samples, "sepal_width", gini_index)
find_best_threshold(samples, "sepal_length", gini_index)
"""
Explanation: 3.2. 给定一个特征,纯净度提升最大的阈值是多少?
对于一个给定的特征,理论上通过枚取所有可能的阈值,从中找到最大减少量的阈值点,就是此特征的最佳分隔点。
但现实中,很多特征是连续的,或者阈值点太多,全部穷尽并不现实,往往需要用到最优化的寻优方法。这里为了简易起见,我们对特征的值由小到大设了10个分位点,进行计算。
End of explanation
"""
def find_best_split(node, measure):
if node["target"].unique().shape[0] <= 1:
return None
purity_gain = pd.Series([], name="feature")
for f in node.drop("target", axis=1).columns:
purity_gain[f] = find_best_threshold(node, f, measure)
if pd.isnull(purity_gain.max()):
return None
else:
best_split = {"feature": purity_gain.argmax(), "threshold": purity_gain.max()}
best_split["child"] = splitter(node, **best_split)
return best_split
best_split = find_best_split(samples, gini_index)
[best_split[x] for x in ["feature", "threshold"]]
"""
Explanation: 3.3. 对于多个特征,哪一个特征的最佳阈值对纯净度提升最大?
显然,最暴力的方法是:每次分割,我们穷尽所有特征,即可找到对此节点最佳分割点
End of explanation
"""
class BinaryNode:
def __init__(self, samples, max_depth, measure=gini_index):
self.samples = samples
self.max_depth = max_depth
self.measure = measure
self.is_leaf = False
self.class_ = None
self.left = None
self.right = None
self.best_split = None
def split(self, depth):
if depth > self.max_depth:
self.is_leaf = True
self.class_ = self.samples["target"].value_counts().argmax()
return
best_split = find_best_split(self.samples, self.measure)
if pd.isnull(best_split):
self.is_leaf = True
self.class_ = self.samples["target"].value_counts().argmax()
return
self.best_split = best_split
left = self.best_split["child"]["left_nodes"]
self.left = BinaryNode(left.drop(best_split["feature"], axis=1), self.max_depth)
right = self.best_split["child"]["right_nodes"]
self.right = BinaryNode(right.drop(best_split["feature"], axis=1), self.max_depth)
# 先序深度优先
self.left.split(depth+1)
self.right.split(depth+1)
binaryNode = BinaryNode(samples, 3)
binaryNode.split(0)
def show(node, depth):
if node.left:
show(node.left, depth+1)
if node.is_leaf:
print("{}{}".format("\t"*(depth+2), node.class_))
return
else:
print("{}{}: {}".format("\t"*depth,
node.best_split["feature"],
node.best_split["threshold"]))
if node.right:
show(node.right, depth+1)
show(binaryNode, 0)
"""
Explanation: 4. 如何找到最佳的分割点序列,其最终分割子集总体最为纯净?
搜索全局最优解在目前还没有有效的方法,所以退一步,我们用贪婪的思想,在每次分割时取最优,希望由局部最优的分割序列能够达到全局最优的效果。
我们使用递归的方法由上而下依次计算,在处理节点顺序时使用深度优先方法组建出决策树。
End of explanation
"""
|
statsmodels/statsmodels.github.io | v0.13.2/examples/notebooks/generated/exponential_smoothing.ipynb | bsd-3-clause | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
%matplotlib inline
data = [
446.6565,
454.4733,
455.663,
423.6322,
456.2713,
440.5881,
425.3325,
485.1494,
506.0482,
526.792,
514.2689,
494.211,
]
index = pd.date_range(start="1996", end="2008", freq="A")
oildata = pd.Series(data, index)
data = [
17.5534,
21.86,
23.8866,
26.9293,
26.8885,
28.8314,
30.0751,
30.9535,
30.1857,
31.5797,
32.5776,
33.4774,
39.0216,
41.3864,
41.5966,
]
index = pd.date_range(start="1990", end="2005", freq="A")
air = pd.Series(data, index)
data = [
263.9177,
268.3072,
260.6626,
266.6394,
277.5158,
283.834,
290.309,
292.4742,
300.8307,
309.2867,
318.3311,
329.3724,
338.884,
339.2441,
328.6006,
314.2554,
314.4597,
321.4138,
329.7893,
346.3852,
352.2979,
348.3705,
417.5629,
417.1236,
417.7495,
412.2339,
411.9468,
394.6971,
401.4993,
408.2705,
414.2428,
]
index = pd.date_range(start="1970", end="2001", freq="A")
livestock2 = pd.Series(data, index)
data = [407.9979, 403.4608, 413.8249, 428.105, 445.3387, 452.9942, 455.7402]
index = pd.date_range(start="2001", end="2008", freq="A")
livestock3 = pd.Series(data, index)
data = [
41.7275,
24.0418,
32.3281,
37.3287,
46.2132,
29.3463,
36.4829,
42.9777,
48.9015,
31.1802,
37.7179,
40.4202,
51.2069,
31.8872,
40.9783,
43.7725,
55.5586,
33.8509,
42.0764,
45.6423,
59.7668,
35.1919,
44.3197,
47.9137,
]
index = pd.date_range(start="2005", end="2010-Q4", freq="QS-OCT")
aust = pd.Series(data, index)
"""
Explanation: Exponential smoothing
Let us consider chapter 7 of the excellent treatise on the subject of Exponential Smoothing By Hyndman and Athanasopoulos [1].
We will work through all the examples in the chapter as they unfold.
[1] Hyndman, Rob J., and George Athanasopoulos. Forecasting: principles and practice. OTexts, 2014.
Loading data
First we load some data. We have included the R data in the notebook for expedience.
End of explanation
"""
ax = oildata.plot()
ax.set_xlabel("Year")
ax.set_ylabel("Oil (millions of tonnes)")
print("Figure 7.1: Oil production in Saudi Arabia from 1996 to 2007.")
"""
Explanation: Simple Exponential Smoothing
Lets use Simple Exponential Smoothing to forecast the below oil data.
End of explanation
"""
fit1 = SimpleExpSmoothing(oildata, initialization_method="heuristic").fit(
smoothing_level=0.2, optimized=False
)
fcast1 = fit1.forecast(3).rename(r"$\alpha=0.2$")
fit2 = SimpleExpSmoothing(oildata, initialization_method="heuristic").fit(
smoothing_level=0.6, optimized=False
)
fcast2 = fit2.forecast(3).rename(r"$\alpha=0.6$")
fit3 = SimpleExpSmoothing(oildata, initialization_method="estimated").fit()
fcast3 = fit3.forecast(3).rename(r"$\alpha=%s$" % fit3.model.params["smoothing_level"])
plt.figure(figsize=(12, 8))
plt.plot(oildata, marker="o", color="black")
plt.plot(fit1.fittedvalues, marker="o", color="blue")
(line1,) = plt.plot(fcast1, marker="o", color="blue")
plt.plot(fit2.fittedvalues, marker="o", color="red")
(line2,) = plt.plot(fcast2, marker="o", color="red")
plt.plot(fit3.fittedvalues, marker="o", color="green")
(line3,) = plt.plot(fcast3, marker="o", color="green")
plt.legend([line1, line2, line3], [fcast1.name, fcast2.name, fcast3.name])
"""
Explanation: Here we run three variants of simple exponential smoothing:
1. In fit1 we do not use the auto optimization but instead choose to explicitly provide the model with the $\alpha=0.2$ parameter
2. In fit2 as above we choose an $\alpha=0.6$
3. In fit3 we allow statsmodels to automatically find an optimized $\alpha$ value for us. This is the recommended approach.
End of explanation
"""
fit1 = Holt(air, initialization_method="estimated").fit(
smoothing_level=0.8, smoothing_trend=0.2, optimized=False
)
fcast1 = fit1.forecast(5).rename("Holt's linear trend")
fit2 = Holt(air, exponential=True, initialization_method="estimated").fit(
smoothing_level=0.8, smoothing_trend=0.2, optimized=False
)
fcast2 = fit2.forecast(5).rename("Exponential trend")
fit3 = Holt(air, damped_trend=True, initialization_method="estimated").fit(
smoothing_level=0.8, smoothing_trend=0.2
)
fcast3 = fit3.forecast(5).rename("Additive damped trend")
plt.figure(figsize=(12, 8))
plt.plot(air, marker="o", color="black")
plt.plot(fit1.fittedvalues, color="blue")
(line1,) = plt.plot(fcast1, marker="o", color="blue")
plt.plot(fit2.fittedvalues, color="red")
(line2,) = plt.plot(fcast2, marker="o", color="red")
plt.plot(fit3.fittedvalues, color="green")
(line3,) = plt.plot(fcast3, marker="o", color="green")
plt.legend([line1, line2, line3], [fcast1.name, fcast2.name, fcast3.name])
"""
Explanation: Holt's Method
Lets take a look at another example.
This time we use air pollution data and the Holt's Method.
We will fit three examples again.
1. In fit1 we again choose not to use the optimizer and provide explicit values for $\alpha=0.8$ and $\beta=0.2$
2. In fit2 we do the same as in fit1 but choose to use an exponential model rather than a Holt's additive model.
3. In fit3 we used a damped versions of the Holt's additive model but allow the dampening parameter $\phi$ to be optimized while fixing the values for $\alpha=0.8$ and $\beta=0.2$
End of explanation
"""
fit1 = SimpleExpSmoothing(livestock2, initialization_method="estimated").fit()
fit2 = Holt(livestock2, initialization_method="estimated").fit()
fit3 = Holt(livestock2, exponential=True, initialization_method="estimated").fit()
fit4 = Holt(livestock2, damped_trend=True, initialization_method="estimated").fit(
damping_trend=0.98
)
fit5 = Holt(
livestock2, exponential=True, damped_trend=True, initialization_method="estimated"
).fit()
params = [
"smoothing_level",
"smoothing_trend",
"damping_trend",
"initial_level",
"initial_trend",
]
results = pd.DataFrame(
index=[r"$\alpha$", r"$\beta$", r"$\phi$", r"$l_0$", "$b_0$", "SSE"],
columns=["SES", "Holt's", "Exponential", "Additive", "Multiplicative"],
)
results["SES"] = [fit1.params[p] for p in params] + [fit1.sse]
results["Holt's"] = [fit2.params[p] for p in params] + [fit2.sse]
results["Exponential"] = [fit3.params[p] for p in params] + [fit3.sse]
results["Additive"] = [fit4.params[p] for p in params] + [fit4.sse]
results["Multiplicative"] = [fit5.params[p] for p in params] + [fit5.sse]
results
"""
Explanation: Seasonally adjusted data
Lets look at some seasonally adjusted livestock data. We fit five Holt's models.
The below table allows us to compare results when we use exponential versus additive and damped versus non-damped.
Note: fit4 does not allow the parameter $\phi$ to be optimized by providing a fixed value of $\phi=0.98$
End of explanation
"""
for fit in [fit2, fit4]:
pd.DataFrame(np.c_[fit.level, fit.trend]).rename(
columns={0: "level", 1: "slope"}
).plot(subplots=True)
plt.show()
print(
"Figure 7.4: Level and slope components for Holt’s linear trend method and the additive damped trend method."
)
"""
Explanation: Plots of Seasonally Adjusted Data
The following plots allow us to evaluate the level and slope/trend components of the above table's fits.
End of explanation
"""
fit1 = SimpleExpSmoothing(livestock2, initialization_method="estimated").fit()
fcast1 = fit1.forecast(9).rename("SES")
fit2 = Holt(livestock2, initialization_method="estimated").fit()
fcast2 = fit2.forecast(9).rename("Holt's")
fit3 = Holt(livestock2, exponential=True, initialization_method="estimated").fit()
fcast3 = fit3.forecast(9).rename("Exponential")
fit4 = Holt(livestock2, damped_trend=True, initialization_method="estimated").fit(
damping_trend=0.98
)
fcast4 = fit4.forecast(9).rename("Additive Damped")
fit5 = Holt(
livestock2, exponential=True, damped_trend=True, initialization_method="estimated"
).fit()
fcast5 = fit5.forecast(9).rename("Multiplicative Damped")
ax = livestock2.plot(color="black", marker="o", figsize=(12, 8))
livestock3.plot(ax=ax, color="black", marker="o", legend=False)
fcast1.plot(ax=ax, color="red", legend=True)
fcast2.plot(ax=ax, color="green", legend=True)
fcast3.plot(ax=ax, color="blue", legend=True)
fcast4.plot(ax=ax, color="cyan", legend=True)
fcast5.plot(ax=ax, color="magenta", legend=True)
ax.set_ylabel("Livestock, sheep in Asia (millions)")
plt.show()
print(
"Figure 7.5: Forecasting livestock, sheep in Asia: comparing forecasting performance of non-seasonal methods."
)
"""
Explanation: Comparison
Here we plot a comparison Simple Exponential Smoothing and Holt's Methods for various additive, exponential and damped combinations. All of the models parameters will be optimized by statsmodels.
End of explanation
"""
fit1 = ExponentialSmoothing(
aust,
seasonal_periods=4,
trend="add",
seasonal="add",
use_boxcox=True,
initialization_method="estimated",
).fit()
fit2 = ExponentialSmoothing(
aust,
seasonal_periods=4,
trend="add",
seasonal="mul",
use_boxcox=True,
initialization_method="estimated",
).fit()
fit3 = ExponentialSmoothing(
aust,
seasonal_periods=4,
trend="add",
seasonal="add",
damped_trend=True,
use_boxcox=True,
initialization_method="estimated",
).fit()
fit4 = ExponentialSmoothing(
aust,
seasonal_periods=4,
trend="add",
seasonal="mul",
damped_trend=True,
use_boxcox=True,
initialization_method="estimated",
).fit()
results = pd.DataFrame(
index=[r"$\alpha$", r"$\beta$", r"$\phi$", r"$\gamma$", r"$l_0$", "$b_0$", "SSE"]
)
params = [
"smoothing_level",
"smoothing_trend",
"damping_trend",
"smoothing_seasonal",
"initial_level",
"initial_trend",
]
results["Additive"] = [fit1.params[p] for p in params] + [fit1.sse]
results["Multiplicative"] = [fit2.params[p] for p in params] + [fit2.sse]
results["Additive Dam"] = [fit3.params[p] for p in params] + [fit3.sse]
results["Multiplica Dam"] = [fit4.params[p] for p in params] + [fit4.sse]
ax = aust.plot(
figsize=(10, 6),
marker="o",
color="black",
title="Forecasts from Holt-Winters' multiplicative method",
)
ax.set_ylabel("International visitor night in Australia (millions)")
ax.set_xlabel("Year")
fit1.fittedvalues.plot(ax=ax, style="--", color="red")
fit2.fittedvalues.plot(ax=ax, style="--", color="green")
fit1.forecast(8).rename("Holt-Winters (add-add-seasonal)").plot(
ax=ax, style="--", marker="o", color="red", legend=True
)
fit2.forecast(8).rename("Holt-Winters (add-mul-seasonal)").plot(
ax=ax, style="--", marker="o", color="green", legend=True
)
plt.show()
print(
"Figure 7.6: Forecasting international visitor nights in Australia using Holt-Winters method with both additive and multiplicative seasonality."
)
results
"""
Explanation: Holt's Winters Seasonal
Finally we are able to run full Holt's Winters Seasonal Exponential Smoothing including a trend component and a seasonal component.
statsmodels allows for all the combinations including as shown in the examples below:
1. fit1 additive trend, additive seasonal of period season_length=4 and the use of a Box-Cox transformation.
1. fit2 additive trend, multiplicative seasonal of period season_length=4 and the use of a Box-Cox transformation..
1. fit3 additive damped trend, additive seasonal of period season_length=4 and the use of a Box-Cox transformation.
1. fit4 additive damped trend, multiplicative seasonal of period season_length=4 and the use of a Box-Cox transformation.
The plot shows the results and forecast for fit1 and fit2.
The table allows us to compare the results and parameterizations.
End of explanation
"""
fit1 = ExponentialSmoothing(
aust,
seasonal_periods=4,
trend="add",
seasonal="add",
initialization_method="estimated",
).fit()
fit2 = ExponentialSmoothing(
aust,
seasonal_periods=4,
trend="add",
seasonal="mul",
initialization_method="estimated",
).fit()
df = pd.DataFrame(
np.c_[aust, fit1.level, fit1.trend, fit1.season, fit1.fittedvalues],
columns=[r"$y_t$", r"$l_t$", r"$b_t$", r"$s_t$", r"$\hat{y}_t$"],
index=aust.index,
)
df.append(fit1.forecast(8).rename(r"$\hat{y}_t$").to_frame(), sort=True)
df = pd.DataFrame(
np.c_[aust, fit2.level, fit2.trend, fit2.season, fit2.fittedvalues],
columns=[r"$y_t$", r"$l_t$", r"$b_t$", r"$s_t$", r"$\hat{y}_t$"],
index=aust.index,
)
df.append(fit2.forecast(8).rename(r"$\hat{y}_t$").to_frame(), sort=True)
"""
Explanation: The Internals
It is possible to get at the internals of the Exponential Smoothing models.
Here we show some tables that allow you to view side by side the original values $y_t$, the level $l_t$, the trend $b_t$, the season $s_t$ and the fitted values $\hat{y}_t$. Note that these values only have meaningful values in the space of your original data if the fit is performed without a Box-Cox transformation.
End of explanation
"""
states1 = pd.DataFrame(
np.c_[fit1.level, fit1.trend, fit1.season],
columns=["level", "slope", "seasonal"],
index=aust.index,
)
states2 = pd.DataFrame(
np.c_[fit2.level, fit2.trend, fit2.season],
columns=["level", "slope", "seasonal"],
index=aust.index,
)
fig, [[ax1, ax4], [ax2, ax5], [ax3, ax6]] = plt.subplots(3, 2, figsize=(12, 8))
states1[["level"]].plot(ax=ax1)
states1[["slope"]].plot(ax=ax2)
states1[["seasonal"]].plot(ax=ax3)
states2[["level"]].plot(ax=ax4)
states2[["slope"]].plot(ax=ax5)
states2[["seasonal"]].plot(ax=ax6)
plt.show()
"""
Explanation: Finally lets look at the levels, slopes/trends and seasonal components of the models.
End of explanation
"""
fit = ExponentialSmoothing(
aust,
seasonal_periods=4,
trend="add",
seasonal="mul",
initialization_method="estimated",
).fit()
simulations = fit.simulate(8, repetitions=100, error="mul")
ax = aust.plot(
figsize=(10, 6),
marker="o",
color="black",
title="Forecasts and simulations from Holt-Winters' multiplicative method",
)
ax.set_ylabel("International visitor night in Australia (millions)")
ax.set_xlabel("Year")
fit.fittedvalues.plot(ax=ax, style="--", color="green")
simulations.plot(ax=ax, style="-", alpha=0.05, color="grey", legend=False)
fit.forecast(8).rename("Holt-Winters (add-mul-seasonal)").plot(
ax=ax, style="--", marker="o", color="green", legend=True
)
plt.show()
"""
Explanation: Simulations and Confidence Intervals
By using a state space formulation, we can perform simulations of future values. The mathematical details are described in Hyndman and Athanasopoulos [2] and in the documentation of HoltWintersResults.simulate.
Similar to the example in [2], we use the model with additive trend, multiplicative seasonality, and multiplicative error. We simulate up to 8 steps into the future, and perform 1000 simulations. As can be seen in the below figure, the simulations match the forecast values quite well.
[2] Hyndman, Rob J., and George Athanasopoulos. Forecasting: principles and practice, 2nd edition. OTexts, 2018.
End of explanation
"""
fit = ExponentialSmoothing(
aust,
seasonal_periods=4,
trend="add",
seasonal="mul",
initialization_method="estimated",
).fit()
simulations = fit.simulate(
16, anchor="2009-01-01", repetitions=100, error="mul", random_errors="bootstrap"
)
ax = aust.plot(
figsize=(10, 6),
marker="o",
color="black",
title="Forecasts and simulations from Holt-Winters' multiplicative method",
)
ax.set_ylabel("International visitor night in Australia (millions)")
ax.set_xlabel("Year")
fit.fittedvalues.plot(ax=ax, style="--", color="green")
simulations.plot(ax=ax, style="-", alpha=0.05, color="grey", legend=False)
fit.forecast(8).rename("Holt-Winters (add-mul-seasonal)").plot(
ax=ax, style="--", marker="o", color="green", legend=True
)
plt.show()
"""
Explanation: Simulations can also be started at different points in time, and there are multiple options for choosing the random noise.
End of explanation
"""
|
wd15/chimad-phase-field | hackathons/hackathon1/fipy/1c.ipynb | mit | %matplotlib inline
import sympy
import fipy as fp
import numpy as np
A, c, c_m, B, c_alpha, c_beta = sympy.symbols("A c_var c_m B c_alpha c_beta")
f_0 = - A / 2 * (c - c_m)**2 + B / 4 * (c - c_m)**4 + c_alpha / 4 * (c - c_alpha)**4 + c_beta / 4 * (c - c_beta)**4
print f_0
sympy.diff(f_0, c, 2)
"""
Explanation: Table of Contents
1c. Fixed flux spinodal decomposition on a T shaped domain
Use Binder For Live Examples
Define $f_0$
Define the Equation
Solve the Equation
Run the Example Locally
Movie of Evolution
1c. Fixed flux spinodal decomposition on a T shaped domain
Use Binder For Live Examples
The free energy is given by,
$$ f_0\left[ c \left( \vec{r} \right) \right] =
- \frac{A}{2} \left(c - c_m\right)^2
+ \frac{B}{4} \left(c - c_m\right)^4
+ \frac{c_{\alpha}}{4} \left(c - c_{\alpha} \right)^4
+ \frac{c_{\beta}}{4} \left(c - c_{\beta} \right)^4 $$
In FiPy we write the evolution equation as
$$ \frac{\partial c}{\partial t} = \nabla \cdot \left[
D \left( c \right) \left( \frac{ \partial^2 f_0 }{ \partial c^2} \nabla c - \kappa \nabla \nabla^2 c \right)
\right] $$
Let's start by calculating $ \frac{ \partial^2 f_0 }{ \partial c^2} $ using sympy. It's easy for this case, but useful in the general case for taking care of difficult book keeping in phase field problems.
End of explanation
"""
mesh = fp.Grid2D(dx=0.5, dy=0.5, nx=40, ny=200) + (fp.Grid2D(dx=0.5, dy=0.5, nx=200, ny=40) + [[-40],[100]])
"""
Explanation: The first step in implementing any problem in FiPy is to define the mesh. For Problem 1a the solution domain is just a square domain, but the boundary conditions are periodic, so a PeriodicGrid2D object is used. No other boundary conditions are required.
End of explanation
"""
c_alpha = 0.05
c_beta = 0.95
A = 2.0
kappa = 2.0
c_m = (c_alpha + c_beta) / 2.
B = A / (c_alpha - c_m)**2
D = D_alpha = D_beta = 2. / (c_beta - c_alpha)
c_0 = 0.45
q = np.sqrt((2., 3.))
epsilon = 0.01
c_var = fp.CellVariable(mesh=mesh, name=r"$c$", hasOld=True)
"""
Explanation: The next step is to define the parameters and create a solution variable.
End of explanation
"""
r = np.array((mesh.x, mesh.y))
c_var[:] = c_0 + epsilon * np.cos((q[:, None] * r).sum(0))
viewer = fp.Viewer(c_var)
"""
Explanation: Now we need to define the initial conditions given by,
Set $c\left(\vec{r}, t\right)$ such that
$$ c\left(\vec{r}, 0\right) = \bar{c}_0 + \epsilon \cos \left( \vec{q} \cdot \vec{r} \right) $$
End of explanation
"""
out = sympy.diff(f_0, c, 2)
exec "f_0_var = " + repr(out)
#f_0_var = -A + 3*B*(c_var - c_m)**2 + 3*c_alpha*(c_var - c_alpha)**2 + 3*c_beta*(c_var - c_beta)**2
f_0_var
"""
Explanation: Define $f_0$
To define the equation with FiPy first define f_0 in terms of FiPy. Recall f_0 from above calculated using Sympy. Here we use the string representation and set it equal to f_0_var using the exec command.
End of explanation
"""
eqn = fp.TransientTerm(coeff=1.) == fp.DiffusionTerm(D * f_0_var) - fp.DiffusionTerm((D, kappa))
eqn
"""
Explanation: Define the Equation
End of explanation
"""
elapsed = 0.0
steps = 0
dt = 0.01
total_sweeps = 2
tolerance = 1e-1
total_steps = 10
c_var[:] = c_0 + epsilon * np.cos((q[:, None] * r).sum(0))
c_var.updateOld()
from fipy.solvers.pysparse import LinearLUSolver as Solver
solver = Solver()
while steps < total_steps:
res0 = eqn.sweep(c_var, dt=dt, solver=solver)
for sweeps in range(total_sweeps):
res = eqn.sweep(c_var, dt=dt, solver=solver)
if res < res0 * tolerance:
steps += 1
elapsed += dt
dt *= 1.1
c_var.updateOld()
else:
dt *= 0.8
c_var[:] = c_var.old
viewer.plot()
print 'elapsed_time:',elapsed
"""
Explanation: Solve the Equation
To solve the equation a simple time stepping scheme is used which is decreased or increased based on whether the residual decreases or increases. A time step is recalculated if the required tolerance is not reached.
End of explanation
"""
%%writefile fipy_hackathon_1c.py
import fipy as fp
import numpy as np
mesh = fp.Grid2D(dx=0.5, dy=0.5, nx=40, ny=200) + (fp.Grid2D(dx=0.5, dy=0.5, nx=200, ny=40) + [[-40],[100]])
c_alpha = 0.05
c_beta = 0.95
A = 2.0
kappa = 2.0
c_m = (c_alpha + c_beta) / 2.
B = A / (c_alpha - c_m)**2
D = D_alpha = D_beta = 2. / (c_beta - c_alpha)
c_0 = 0.45
q = np.sqrt((2., 3.))
epsilon = 0.01
c_var = fp.CellVariable(mesh=mesh, name=r"$c$", hasOld=True)
r = np.array((mesh.x, mesh.y))
c_var[:] = c_0 + epsilon * np.cos((q[:, None] * r).sum(0))
f_0_var = -A + 3*B*(c_var - c_m)**2 + 3*c_alpha*(c_var - c_alpha)**2 + 3*c_beta*(c_var - c_beta)**2
eqn = fp.TransientTerm(coeff=1.) == fp.DiffusionTerm(D * f_0_var) - fp.DiffusionTerm((D, kappa))
elapsed = 0.0
steps = 0
dt = 0.01
total_sweeps = 2
tolerance = 1e-1
total_steps = 600
c_var[:] = c_0 + epsilon * np.cos((q[:, None] * r).sum(0))
c_var.updateOld()
from fipy.solvers.pysparse import LinearLUSolver as Solver
solver = Solver()
viewer = fp.Viewer(c_var)
while steps < total_steps:
res0 = eqn.sweep(c_var, dt=dt, solver=solver)
for sweeps in range(total_sweeps):
res = eqn.sweep(c_var, dt=dt, solver=solver)
print ' '
print 'steps',steps
print 'res',res
print 'sweeps',sweeps
print 'dt',dt
if res < res0 * tolerance:
steps += 1
elapsed += dt
dt *= 1.1
if steps % 1 == 0:
viewer.plot('image{0}.png'.format(steps))
c_var.updateOld()
else:
dt *= 0.8
c_var[:] = c_var.old
"""
Explanation: Run the Example Locally
The following cell will dumpy a file called fipy_hackathon1c.py to the local file system to be run. The images are saved out at each time step.
End of explanation
"""
from IPython.display import YouTubeVideo
scale = 1.5
YouTubeVideo('aZk38E7OxcQ', width=420 * scale, height=315 * scale, rel=0)
"""
Explanation: Movie of Evolution
The movie of the evolution for 600 steps.
The movie was generated with the output files of the form image*.png using the following commands,
$ rename 's/\d+/sprintf("%05d",$&)/e' image*
$ ffmpeg -f image2 -r 6 -i 'image%05d.png' output.mp4
End of explanation
"""
|
sourabhrohilla/ds-masterclass-hands-on | session-2/python/TopicModel.ipynb | mit | PATH_NEWS_ARTICLES = ""
from nltk.corpus import stopwords
from nltk.tokenize import TweetTokenizer
from nltk.stem.snowball import SnowballStemmer
import re
import pickle
import pandas as pd
import gensim
from gensim import corpora, models
"""
Explanation: Topic Modeling using LDA
Topic Modeling Using LDA
Text Processing
Generating dictionary of vocabulary
Mapping corpus using dictionary
Training and saving the Topic Model
Describing parameters:
1. PATH_NEWS_ARTICLES: specify the path where 'news_article.csv' is present <br/>
End of explanation
"""
df=pd.read_csv(PATH_NEWS_ARTICLES)
df.head(5)
stop_words = set(stopwords.words('english'))
tknzr = TweetTokenizer()
stemmer = SnowballStemmer("english")
def clean_text(text):
cleaned_text=re.sub('[^\w_\s-]', ' ', text) #remove punctuation marks
return cleaned_text #and other symbols
def tokenize(text):
word = tknzr.tokenize(text) #tokenization
filtered_sentence = [w for w in word if not w.lower() in stop_words] #removing stop words
stemmed_filtered_tokens = [stemmer.stem(plural) for plural in filtered_sentence] #stemming
tokens = [i for i in stemmed_filtered_tokens if i.isalpha() and len(i) not in [0, 1]]
return tokens
# Cleaning all articles
# Returns a list containing list of words of each article
def text_processing():
news_articles = df['Content'].tolist()
cleaned_text = list(map(clean_text, news_articles))
article_vocabulary = list(map(tokenize, cleaned_text))
return article_vocabulary
article_vocabulary = text_processing()
"""
Explanation: 1. TEXT PROCESSING
1.1 Clean the article - Remove punctuation marks, special characters <br/>
1.2 Tokenize each article <br/>
1.3 Stem each token <br/>
1.4 Remove numberical tokens <br/>
End of explanation
"""
#Parameters for LDA :-
#NUMBER_OF_TOPICS is the number of requested latent topics to be extracted from the training corpus.
NUMBER_OF_TOPICS = 5
#PASSES refers to number of iterations
PASSES = 1
#NUMBER_OF_WORDS is the number of words for which you want to check the topic-word distribution
NUMBER_OF_WORDS = 10
#Mapping vocabulary with IDs
dictionary = corpora.Dictionary(article_vocabulary)
pickle.dump(dictionary, open("dictionary_of_vocabulary.p", "wb"))
zip(dictionary.keys(),dictionary.values())
#Mapping Vocabulary to Corpus
corpus = [dictionary.doc2bow(text) for text in article_vocabulary]
#Training LDA Model
lda = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=NUMBER_OF_TOPICS,passes=PASSES)
lda.save('lda.model')
#Topic-Word Distribution
topic_words = lda.show_topics(num_topics=NUMBER_OF_TOPICS, num_words=NUMBER_OF_WORDS) # narray of Shape: n_topics*vocab
topic_words
#Article - Topic Distribution for first Article
def get_article_topic_distribution(article):
return lda.get_document_topics(article)
#Returns a list containing a list of tuple
#Each inner list corresponds to an article and each tuple refers to topicID and its corresponding probability
map(get_article_topic_distribution, corpus)
"""
Explanation: 2. TOPIC MODELING
2.1 Create dictionary mapping word to ID <br/>
2.2 Map IDs to corpus <br/>
2.3 Train LDA Model <br/>
End of explanation
"""
new_article = """At the dawn of history India started on her unending quest, and trackless centuries are filled with her
striving and the grandeur of her success and her failures. Through good and ill fortune alike she has
never lost sight of that quest or forgotten the ideals which gave her strength. We end today a period of
ill fortune and India discovers herself again. The achievement we celebrate today is but a step, an opening
of opportunity, to the greater triumphs and achievements that await us.
Are we brave enough and wise enough to grasp this opportunity and accept the challenge of the future?"""
"""
Explanation: Generate Topics for a new Article
This is new article not used for training Topic Modeling
End of explanation
"""
DICTIONARY_PATH = "dictionary_of_vocabulary.p"
LDA_MODEL_PATH = "lda.model"
#Cleaning the article
cleaned_text = clean_text(new_article)
article_vocabulary = tokenize(cleaned_text)
#Load model dictionary
model_dictionary = pickle.load(open(DICTIONARY_PATH,"rb"))
#Generate article maping using IDs associated with vocab
corpus = [model_dictionary.doc2bow(text) for text in [article_vocabulary]]
#Load LDA Model
lda = models.LdaModel.load(LDA_MODEL_PATH)
#Article-Topic Distribution
article_topic_distribution=lda.get_document_topics(corpus[0])
article_topic_distribution
"""
Explanation: Describing parameters:
1. DICTIONARY_PATH: specify the path where 'dictionary_of_vocabulary.p' is present. <br/>
2. LDA_MODEL_PATH: specify the path where 'lda.model' is present. <br/>
End of explanation
"""
|
dtamayo/rebound | ipython_examples/RemovingParticlesFromSimulation.ipynb | gpl-3.0 | import rebound
import numpy as np
sim = rebound.Simulation()
sim.add(m=1., hash=0)
for i in range(1,10):
sim.add(a=i, hash=i)
sim.move_to_com()
print("Particle hashes:{0}".format([sim.particles[i].hash for i in range(sim.N)]))
"""
Explanation: Removing particles from the simulation
This tutorial shows the differnet ways to remove particles from a REBOUND simulation. Let us start by setting up a simple simulation with 10 bodies, and assign them unique hashes so we can keep track of them (see UniquelyIdentifyingParticlesWithHashes.ipynb).
End of explanation
"""
sim.add(a=10, hash="Saturn")
print("Particle hashes:{0}".format([sim.particles[i].hash for i in range(sim.N)]))
"""
Explanation: Let us add one more particle, this time with a custom name:
End of explanation
"""
Noutputs = 1000
xs = np.zeros((sim.N, Noutputs))
ys = np.zeros((sim.N, Noutputs))
times = np.linspace(0.,50*2.*np.pi, Noutputs, endpoint=False)
for i, time in enumerate(times):
sim.integrate(time)
xs[:,i] = [sim.particles[j].x for j in range(sim.N)]
ys[:,i] = [sim.particles[j].y for j in range(sim.N)]
%matplotlib inline
import matplotlib.pyplot as plt
fig,ax = plt.subplots(figsize=(15,5))
for i in range(sim.N):
plt.plot(xs[i,:], ys[i,:])
ax.set_aspect('equal')
"""
Explanation: Now let us run perform a short integration to isolate the particles that interest us for a longer simulation:
End of explanation
"""
print("Hash\t\tx")
for i in range(sim.N):
print("{0}\t{1}".format(sim.particles[i].hash, xs[i,-1]))
"""
Explanation: At this stage, we might be interested in particles that remained within some semimajor axis range, particles that were in resonance with a particular planet, etc. Let's imagine a simple (albeit arbitrary) case where we only want to keep particles that had $x < 0$ at the end of the preliminary integration. Let's first print out the particle hashes and x positions.
End of explanation
"""
for i in reversed(range(1,sim.N)):
if xs[i,-1] > 0:
sim.remove(i)
print("Number of particles after cut = {0}".format(sim.N))
print("Hashes of remaining particles = {0}".format([p.hash for p in sim.particles]))
"""
Explanation: Note that 4066125545 is the hash corresponding to the string "Saturn" we added above. We can use the remove() function to filter out particles. As an argument, we pass the corresponding index in the particles array.
End of explanation
"""
sim.remove(2, keepSorted=0)
print("Number of particles after cut = {0}".format(sim.N))
print("Hashes of remaining particles = {0}".format([p.hash for p in sim.particles]))
"""
Explanation: By default, the remove() function removes the i-th particle from the particles array, and shifts all particles with higher indices down by 1. This ensures that the original order in the particles array is preserved. Note that this is helpful for example if you use an integrator such as WHFast which uses Jacobi coordinates.
By running through the planets in reverse order, we are guaranteed that when a particle with index i gets removed, the particle replacing it doesn't need to also be removed (we already checked it).
If you have many particles and many removals (or you don't care about the ordering), you can save the reshuffling of all particles with higher indices with the flag keepSorted=0:
End of explanation
"""
sim.remove(hash="Saturn")
print("Number of particles after cut = {0}".format(sim.N))
print("Hashes of remaining particles = {0}".format([p.hash for p in sim.particles]))
"""
Explanation: We see that the order of the particles array has changed.
Because in general particles can change positions in the particles array, a more robust way of referring to particles (rather than through their index) is through their hash, which won't change. You can pass sim.remove either the hash directly, or if you pass a string, it will be automatically converted to its corresponding hash:
End of explanation
"""
try:
sim.remove(hash="Planet 9")
except RuntimeError as e:
print("A runtime error occured: {0}".format(e))
"""
Explanation: If you try to remove a particle with an invalid index or hash, an exception is thrown, which might be catch using the standard python syntax:
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.18/_downloads/8e91c4d84fe688d78859cf6274554a8b/plot_compute_csd.ipynb | bsd-3-clause | # Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
# License: BSD (3-clause)
from matplotlib import pyplot as plt
import mne
from mne.datasets import sample
from mne.time_frequency import csd_fourier, csd_multitaper, csd_morlet
print(__doc__)
"""
Explanation: ==================================================
Compute a cross-spectral density (CSD) matrix
==================================================
A cross-spectral density (CSD) matrix is similar to a covariance matrix, but in
the time-frequency domain. It is the first step towards computing
sensor-to-sensor coherence or a DICS beamformer.
This script demonstrates the three methods that MNE-Python provides to compute
the CSD:
Using short-term Fourier transform: :func:mne.time_frequency.csd_fourier
Using a multitaper approach: :func:mne.time_frequency.csd_multitaper
Using Morlet wavelets: :func:mne.time_frequency.csd_morlet
End of explanation
"""
n_jobs = 1
"""
Explanation: In the following example, the computation of the CSD matrices can be
performed using multiple cores. Set n_jobs to a value >1 to select the
number of cores to use.
End of explanation
"""
data_path = sample.data_path()
fname_raw = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
"""
Explanation: Loading the sample dataset.
End of explanation
"""
picks = mne.pick_types(raw.info, meg='grad')
# Make some epochs, based on events with trigger code 1
epochs = mne.Epochs(raw, events, event_id=1, tmin=0, tmax=1,
picks=picks, baseline=(None, 0),
reject=dict(grad=4000e-13), preload=True)
"""
Explanation: By default, CSD matrices are computed using all MEG/EEG channels. When
interpreting a CSD matrix with mixed sensor types, be aware that the
measurement units, and thus the scalings, differ across sensors. In this
example, for speed and clarity, we select a single channel type:
gradiometers.
End of explanation
"""
csd_fft = csd_fourier(epochs, fmin=15, fmax=20, n_jobs=n_jobs)
csd_mt = csd_multitaper(epochs, fmin=15, fmax=20, adaptive=True, n_jobs=n_jobs)
"""
Explanation: Computing CSD matrices using short-term Fourier transform and (adaptive)
multitapers is straightforward:
End of explanation
"""
frequencies = [16, 17, 18, 19, 20]
csd_wav = csd_morlet(epochs, frequencies, decim=10, n_jobs=n_jobs)
"""
Explanation: When computing the CSD with Morlet wavelets, you specify the exact
frequencies at which to compute it. For each frequency, a corresponding
wavelet will be constructed and convolved with the signal, resulting in a
time-frequency decomposition.
The CSD is constructed by computing the correlation between the
time-frequency representations between all sensor-to-sensor pairs. The
time-frequency decomposition originally has the same sampling rate as the
signal, in our case ~600Hz. This means the decomposition is over-specified in
time and we may not need to use all samples during our CSD computation, just
enough to get a reliable correlation statistic. By specifying decim=10,
we use every 10th sample, which will greatly speed up the computation and
will have a minimal effect on the CSD.
End of explanation
"""
csd_fft.mean().plot()
plt.suptitle('short-term Fourier transform')
csd_mt.mean().plot()
plt.suptitle('adaptive multitapers')
csd_wav.mean().plot()
plt.suptitle('Morlet wavelet transform')
"""
Explanation: The resulting :class:mne.time_frequency.CrossSpectralDensity objects have a
plotting function we can use to compare the results of the different methods.
We're plotting the mean CSD across frequencies.
End of explanation
"""
|
zzsza/Datascience_School | 06. 기초 선형대수/04. NumPy를 활용한 선형대수 입문.ipynb | mit | x = np.array([1, 2, 3, 4])
x
x = np.array([[1], [2], [3], [4]])
x
"""
Explanation: NumPy를 활용한 선형대수 입문
선형대수(linear algebra)는 데이터 분석에 필요한 각종 계산을 위한 기본적인 학문이다.
데이터 분석을 하기 위해서는 실제로 수많은 숫자의 계산이 필요하다. 하나의 데이터 레코드(record)가 수십개에서 수천개의 숫자로 이루어져 있을 수도 있고 수십개에서 수백만개의 이러한 데이터 레코드를 조합하여 계산하는 과정이 필요할 수 있다.
선형대수를 사용하는 첫번째 장점은 이러한 데이터 계산의 과정을 아주 단순한 수식으로 서술할 수 있다는 점이다. 그러기 위해서는 선형대수에서 사용되는 여러가지 기호와 개념을 익혀야 한다.
데이터의 유형
선형대수에서 다루는 데이터는 크게 스칼라(scalar), 벡터(vector), 행렬(matrix), 이 세가지 이다.
간단하게 말하자면 스칼라는 숫자 하나로 이루어진 데이터이고 벡터는 여러개의 숫자로 이루어진 데이터 레코드이며 행렬은 벡터, 즉 데이터 레코드가 여러개 있는 데이터 집합이라고 볼 수 있다.
스칼라
스칼라는 하나의 숫자를 말한다. 예를 들어 어떤 붓꽃(iris) 샘플의 꽃잎의 길이를 측정하는 하나의 숫자가 나올 것이다. 이 숫자는 스칼라이다.
스칼라는 보통 $x$와 같이 알파벳 소문자로 표기하며 실수(real number)인 숫자 중의 하나이므로 실수 집합의 원소라는 의미에서 다음과 같이 표기한다.
$$ x \in \mathbb{R} $$
벡터
벡터는 복수개의 숫자가 특정 순서대로 모여 있는 것을 말한다. 사실 대부분의 데이터 분석에서 하나의 데이터 레코드는 여러개의 숫자로 이루어진 경우가 많다. 예를 들어 붓꽃의 종을 알아내기 위해 크기를 측정하게 되면 꽃잎의 길이 $x_1$ 뿐 아니라 꽃잎의 폭 $x_2$ , 꽃받침의 길이 $x_3$ , 꽃받침의 폭 $x_4$ 이라는 4개의 숫자를 측정할 수 있다. 이렇게 측정된 4개의 숫자를 하나의 쌍(tuple) $x$ 로 생각하여 다음과 같이 표기한다.
$$
x = \begin{bmatrix}
x_{1} \
x_{2} \
x_{3} \
x_{4} \
\end{bmatrix}
$$
여기에서 주의할 점은 벡터는 복수의 행(row)을 가지고 하나의 열(column)을 가지는 형태로 위에서 아래로 표기한다는 점이다.
이 때 $x$는 4개의 실수(real number)로 이루어져 있기 때문에 4차원 벡터라고 하고 다음과 같이 4차원임을 표기한다.
$$ x \in \mathbb{R}^4 $$
만약 이 데이터를 이용하여 붓꽃의 종을 결정하는 예측 문제를 풀고 있다면 이 벡터를 feature vector라고 하기도 한다.
만약 4개가 아니라 $N$개의 숫자가 모여 있는 경우의 표기는 다음과 같다.
$$
x = \begin{bmatrix}
x_{1} \
x_{2} \
\vdots \
x_{N} \
\end{bmatrix}
,\;\;\;\;
x \in \mathbb{R}^N
$$
NumPy를 사용하면 벡터는 1차원 ndarray 객체 혹은 열의 갯수가 1개인 2차원 ndarray 객체로 표현한다. 벡터를 처리하는 프로그램에 따라서는 두 가지 중 특정한 형태만 원하는 경우도 있을 수 있기 때문에 주의해야 한다. 예를 들어 파이썬 scikit-learn 패키지에서는 벡터를 요구하는 경우에 열의 갯수가 1개인 2차원 ndarray 객체를 선호한다.
End of explanation
"""
X = np.array([[11,12,13],[21,22,23]])
X
"""
Explanation: 행렬
행렬은 복수의 차원을 가지는 데이터 레코드가 다시 여러개 있는 경우의 데이터를 합쳐서 표기한 것이다. 예를 들어 앞서 말한 붓꽃의 예에서 6개의 붓꽃에 대해 크기를 측정하였다면 4차원 붓꽃 데이터가 6개가 있다. 즉, $4 \times 6 = 24$개의 실수 숫자가 있는 것이다. 이 숫자 집합을
행렬로 나타내면 다음과 같다. 행렬은 보통 $X$와 같이 알파벳 대문자로 표기한다.
$$X =
\begin{bmatrix}
x_{1, 1} & x_{1, 2} & x_{1, 3} & x_{1, 4} \
x_{2, 1} & x_{2, 2} & x_{2, 3} & x_{2, 4} \
x_{3, 1} & x_{3, 2} & x_{3, 3} & x_{3, 4} \
x_{4, 1} & x_{4, 2} & x_{4, 3} & x_{4, 4} \
x_{5, 1} & x_{5, 2} & x_{5, 3} & x_{5, 4} \
x_{6, 1} & x_{6, 2} & x_{6, 3} & x_{6, 4} \
\end{bmatrix}
$$
행렬 안에서 원소의 위치를 표기할 때는 $x_{2, 3}$ 처럼 두 개의 숫자 쌍을 아랫 첨자(sub-script)로 붙여서 표기한다. 첫번째 숫자가 행(row)을 뜻하고 두번째 숫자가 열(column)을 뜻한다. 예를 들어 $x_{2, 3}$ 는 두번째 행(위에서 아래로 두번째), 세번째 열(왼쪽에서 오른쪽으로 세번째)의 숫자를 뜻한다.
붓꽃의 예에서는 하나의 데이터 레코드가 4차원이였다는 점을 기억하자. 따라서 이 행렬 표기에서는 하나의 행(row)이 붓꽃 하나에 대한 데이터 레코드가 된다.
하나의 데이터 레코드를 나타낼 때는 하나의 열(column)로 나타내고 복수의 데이터 레코드 집합을 나타낼 때는 하나의 데이터 레코드가 하나의 행(row)으로 표기하는 것은 일관성이 없어 보지만 데이터 분석에서 쓰는 일반적인 관례이므로 익히고 있어야 한다.
만약 이 데이터를 이용하여 붓꽃의 종을 결정하는 예측 문제를 풀고 있다면 이 행렬를 feature matrix라고 하기도 한다.
이 행렬의 크기를 수식으로 표시할 때는 행의 크기 곱하기 열의 크기의 형태로 다음과 같이 나타낸다.
$$ X \in \mathbb{R}^{6\times 4} $$
벡터도 열의 수가 1인 특수한 행렬이기 때문에 벡터의 크기를 표시할 때 행렬 표기에 맞추어 다음과 같이 쓰기도 한다.
$$ x \in \mathbb{R}^{4\times 1} $$
NumPy를 이용하여 행렬을 표기할 때는 2차원 ndarray 객체를 사용한다.
End of explanation
"""
np.diag([1, 2, 3])
"""
Explanation: 특수한 행렬
몇가지 특수한 행렬에 대해서는 별도의 이름이 붙어있다.
행렬에서 행의 숫자와 열의 숫자가 같은 위치를 대각(diagonal)이라고 하고 대각 위치에 있지 않은 것들은 비대각(off-diagonal)이라고 한다. 모든 비대각 요소가 0인 행렬을 대각 행렬(diagonal matrix)이라고 한다.
$$ D \in \mathbb{R}^{N \times N} $$
$$
D =
\begin{bmatrix}
D_{1} & 0 & \cdots & 0 \
0 & D_{2} & \cdots & 0 \
\vdots & \vdots & \ddots & \vdots \
0 & 0 & \cdots & D_{N} \
\end{bmatrix}
$$
NumPy로 대각행렬을 생성하려면 diag 명령을 사용한다.
End of explanation
"""
np.identity(3)
np.eye(4)
"""
Explanation: 대각 행렬 중에서도 모든 대각 성분의 값이 1인 대각 행렬을 단위 행렬(identity matrix)이라고 한다. 단위 행렬은 보통 알파벳 대문자 $I$로 표기하는 경우가 많다.
$$ I \in \mathbb{R}^{N \times N} $$
$$
I =
\begin{bmatrix}
1 & 0 & \cdots & 0 \
0 & 1 & \cdots & 0 \
\vdots & \vdots & \ddots & \vdots \
0 & 0 & \cdots & 1 \
\end{bmatrix}
$$
NumPy로 단위행렬을 생성하려면 identity 혹은 eye 명령을 사용한다.
End of explanation
"""
X = np.array([[11,12,13],[21,22,23]])
X
X.T
"""
Explanation: 연산
행렬의 연산을 이용하면 대량의 데이터에 대한 계산을 간단한 수식으로 나타낼 수 있다. 물론 행렬에 대한 연산은 보통의 숫자 즉, 스칼라에 대한 사칙 연산과는 다른 규칙을 적용하므로 이 규칙을 외워야 한다.
전치 연산
전치(transpose) 연산은 행렬의 행과 열을 바꾸는 연산을 말한다. 벡터 기호에 $T$라는 윗첨자(super-script)를 붙어서 표기한다. 예를 들어 앞에서 보인 $4\times 6$ 차원의 행렬을 전치 연산하면 $6\times 4$ 차원의 행렬이 된다.
$$
X =
\begin{bmatrix}
x_{1, 1} & x_{1, 2} & x_{1, 3} & x_{1, 4} \
x_{2, 1} & x_{2, 2} & x_{2, 3} & x_{2, 4} \
x_{3, 1} & x_{3, 2} & x_{3, 3} & x_{3, 4} \
x_{4, 1} & x_{4, 2} & x_{4, 3} & x_{4, 4} \
x_{5, 1} & x_{5, 2} & x_{5, 3} & x_{5, 4} \
x_{6, 1} & x_{6, 2} & x_{6, 3} & x_{6, 4} \
\end{bmatrix}
\;\;\;
\rightarrow
\;\;\;
X^T =
\begin{bmatrix}
x_{1, 1} & x_{2, 1} & x_{3, 1} & x_{4, 1} & x_{5, 1} & x_{6, 1} \
x_{1, 2} & x_{2, 2} & x_{3, 2} & x_{4, 2} & x_{5, 2} & x_{6, 2} \
x_{1, 3} & x_{2, 3} & x_{3, 3} & x_{4, 3} & x_{5, 3} & x_{6, 3} \
x_{1, 4} & x_{2, 4} & x_{3, 4} & x_{4, 4} & x_{5, 4} & x_{6, 4} \
\end{bmatrix}
$$
벡터도 열의 수가 1인 특수한 행렬이므로 벡터에 대해서도 전치 연산을 적용할 수 있다. 이 때 $x$와 같이 열의 수가 1인 행렬을 열 벡터(column vector)라고 하고 $x^T$와 같이 행의 수가 1인 행렬을 행 벡터(row vector)라고 한다.
$$
x =
\begin{bmatrix}
x_{1} \
x_{2} \
\vdots \
x_{N} \
\end{bmatrix}
\; \rightarrow \;
x^T =
\begin{bmatrix}
x_{1} & x_{2} & \cdots & x_{N}
\end{bmatrix}
$$
NumPy에서는 ndarray 객체의 T라는 속성을 이용하여 전치 행렬을 구한다. 이 때 T는 메서드(method)가 아닌 속성(attribute)에 유의한다.
End of explanation
"""
x = np.array([10, 11, 12, 13, 14])
x
y = np.array([0, 1, 2, 3, 4])
y
x + y
x - y
"""
Explanation: 행렬의 행 표기법과 열 표기법
전치 연산과 행 벡터, 열 벡터를 이용하면 행렬을 다음과 같이 복수의 열 벡터들 $c_i$, 또는 복수의 열 벡터들 $r_j^T$ 을 합친(concatenated) 형태로 표기할 수도 있다.
$$
X
=
\begin{bmatrix}
c_1 & c_2 & \cdots & c_M
\end{bmatrix}
=
\begin{bmatrix}
r_1^T \ r_2^T \ \vdots \ r_N^T
\end{bmatrix}
$$
$$ X \in \mathbb{R}^{N\times M} ,\;\;\; c_i \in R^{N \times 1} \; (i=1,\cdots,M) ,\;\;\; r_j \in R^{M \times 1} \; (j=1,\cdots,N) $$
행렬 덧셈과 뺄셈
행렬의 덧셈과 뺄셈은 같은 크기를 가진 두개의 행렬에 대해 정의되며 각각의 원소에 대해 덧셈과 뺄셈을 하면 된다. 이러한 연산을 element-wise 연산이라고 한다.
End of explanation
"""
x = np.array([1, 2, 3])
y = np.array([4, 5, 6])
np.dot(x, y)
x.dot(y)
x = np.array([[1], [2], [3]])
y = np.array([[4], [5], [6]])
np.dot(x.T, y)
"""
Explanation: 벡터 곱셈
두 행렬의 곱셈을 정의하기 전에 우선 두 벡터의 곱셈을 알아보자. 벡터의 곱셈에는 내적(inner product)과 외적(outer product) 두 가지가 있다 여기에서는 내적에 대해서만 설명한다. 내적은 dot product라고 하기도 한다.
두 벡터의 곱(내적)이 정의되려면 우선 두 벡터의 길이가 같으며 앞의 벡터가 행 벡터이고 뒤의 벡터가 열 벡터이어야 한다. 이때 두 벡터의 곱은 다음과 같이 각 원소들을 element-by-element로 곱한 다음에 그 값들을 다시 모두 합해서 하나의 스칼라값으로 계산된다.
$$
x^T y =
\begin{bmatrix}
x_{1} & x_{2} & \cdots & x_{N}
\end{bmatrix}
\begin{bmatrix}
y_{1} \
y_{2} \
\vdots \
y_{N} \
\end{bmatrix}
= x_1 y_1 + \cdots + x_N y_N
= \sum_{i=1}^N x_i y_i
$$
$$ x \in \mathbb{R}^{N \times 1} , \; y \in \mathbb{R}^{N \times 1} \; \rightarrow \; x^T y \in \mathbb{R} $$
벡터의 곱은 왜 이렇게 복잡하게 정의된 것일까. 벡터의 곱을 사용한 예를 몇가지 살펴보자
가중합
가중합(weighted sum)이란 복수의 데이터를 단순히 합하는 것이 아니라 각각의 수에 중요도에 따른 어떤 가중치를 곱한 후 이 값을 합하는 것을 말한다. 만약 데이터가 $x_1, \cdots, x_N$ 이고 가중치가 $w_1, \cdots, w_N$ 이면 가중합은 다음과 같다.
$$ w_1 x_1 + \cdots + w_N x_N = \sum_{i=1}^N w_i x_i $$
이를 벡터의 곱으로 나타내면 다음과 같이 $w^Tx$ 또는 $x^Tw$ 라는 간단한 수식으로 표시할 수 있다.
$$ w_1 x_1 + \cdots + w_N x_N = \sum_{i=1}^N w_i x_i =
\begin{bmatrix}
w_{1} && w_{2} && \cdots && w_{N}
\end{bmatrix}
\begin{bmatrix}
x_1 \ x_2 \ \vdots \ x_N
\end{bmatrix}
= w^Tx =
\begin{bmatrix}
x_{1} && x_{2} && \cdots && x_{N}
\end{bmatrix}
\begin{bmatrix}
w_1 \ w_2 \ \vdots \ w_N
\end{bmatrix}
= x^Tw $$
NumPy에서 벡터 혹은 이후에 설명할 행렬의 곱은 dot이라는 명령으로 계산한다. 2차원 행렬로 표시한 벡터의 경우에는 결과값이 스칼라가 아닌 2차원 행렬값임에 유의한다.
End of explanation
"""
A = np.array([[1, 2, 3], [4, 5, 6]])
B = np.array([[1, 2], [3, 4], [5, 6]])
C = np.dot(A, B)
A
B
C
"""
Explanation: 제곱합
데이터 분석시에 분산(variance), 표준 편차(standard deviation)을 구하는 경우에는 각각의 데이터를 제곱한 값을 모두 더하는 계산 즉 제곱합(sum of squares)을 계산하게 된다. 이 경우에도 벡터의 곱을 사용하여 $x^Tx$로 쓸 수 있다.
$$
x^T x =
\begin{bmatrix}
x_{1} & x_{2} & \cdots & x_{N}
\end{bmatrix}
\begin{bmatrix}
x_{1} \
x_{2} \
\vdots \
x_{N} \
\end{bmatrix} = \sum_{i=1}^{N} x_i^2
$$
행렬의 곱셈
벡터의 곱셈을 정의한 후에는 다음과 같이 행렬의 곱셈을 정의할 수 있다.
$A$ 행렬과 $B$ 행렬을 곱한 결과인 $C$ 행렬의 $i$번째 행, $j$번째 열의 원소의 값은 $A$ 행렬의 $i$번째 행 벡터 $a_i^T$와 $B$ 행렬의 $j$번째 열 벡터 $b_j$의 곱으로 계산된 숫자이다.
$$ C = AB \; \rightarrow \; [c_{ij}] = a_i^T b_j $$
이 정의가 성립하려면 앞의 행렬 $A$의 열의 수가 뒤의 행렬 $B$의 행의 수와 일치해야만 한다.
$$ A \in \mathbb{R}^{N \times L} , \; B \in \mathbb{R}^{L \times M} \; \rightarrow \; AB \in \mathbb{R}^{N \times M} $$
End of explanation
"""
from sklearn.datasets import make_regression
X, y = make_regression(4,3)
X
y
w = np.linalg.lstsq(X, y)[0] #역행렬
w
e = y - np.dot(X, w)
e
"""
Explanation: 그럼 이러한 행렬의 곱셈은 데이터 분석에서 어떤 경우에 사용될까. 몇가지 예를 살펴본다.
가중 벡터합
어떤 데이터 레코드 즉, 벡터의 가중합은 $w^Tx$ 또는 $x^Tw$로 표시할 수 있다는 것을 배웠다. 그런데 만약 이렇게 $w$ 가중치를 사용한 가중합을 하나의 벡터 $x$가 아니라 여러개의 벡터 $x_1, \cdots, x_M$개에 대해서 모두 계산해야 한다면 이 계산을 다음과 같이 $Xw$라는 기호로 간단하게 표시할 수 있다.
$$
\begin{bmatrix}
w_1 x_{1,1} + w_2 x_{1,2} + \cdots + w_N x_{1,N} \
w_1 x_{2,1} + w_2 x_{2,2} + \cdots + w_N x_{2,N} \
\vdots \
w_1 x_{M,1} + w_2 x_{M,2} + \cdots + w_N x_{M,N} \
\end{bmatrix}
=
\begin{bmatrix}
x_{1,1} & x_{1,2} & \cdots & x_{1,N} \
x_{2,1} & x_{2,2} & \cdots & x_{2,N} \
\vdots & \vdots & \vdots & \vdots \
x_{M,1} & x_{M,2} & \cdots & x_{M,N} \
\end{bmatrix}
\begin{bmatrix}
w_1 \ w_2 \ \vdots \ w_N
\end{bmatrix}
=
\begin{bmatrix}
x_1^T \
x_2^T \
\vdots \
x_M^T \
\end{bmatrix}
\begin{bmatrix}
w_1 \ w_2 \ \vdots \ w_N
\end{bmatrix}
= X w
$$
잔차
선형 회귀 분석(linear regression)을 한 결과는 가중치 벡터 $w$라는 형태로 나타나고 예측치는 이 가중치 벡터를 사용한 독립 변수 데이터 레코드 즉, 벡터 $x_i$의 가중합 $w^Tx_i$이 된다. 이 예측치와 실제 값 $y_i$의 차이를 오차(error) 혹은 잔차(residual) $e_i$ 이라고 한다. 이러한 잔차 값을 모든 독립 변수 벡터에 대해 구하면 잔차 벡터 $e$가 된다.
$$ e_i = y_i - w^Tx_i $$
잔차 벡터는 다음과 같이 $y-Xw$로 간단하게 표기할 수 있다.
$$
e =
\begin{bmatrix}
e_{1} \
e_{2} \
\vdots \
e_{M} \
\end{bmatrix}
=
\begin{bmatrix}
y_{1} \
y_{2} \
\vdots \
y_{M} \
\end{bmatrix}
-
\begin{bmatrix}
w^T x_{1} \
w^T x_{2} \
\vdots \
w^T x_{M} \
\end{bmatrix}
=
\begin{bmatrix}
y_{1} \
y_{2} \
\vdots \
y_{M} \
\end{bmatrix}
-
\begin{bmatrix}
x^T_{1}w \
x^T_{2}w \
\vdots \
x^T_{M}w \
\end{bmatrix}
=
\begin{bmatrix}
y_{1} \
y_{2} \
\vdots \
y_{M} \
\end{bmatrix}
-
\begin{bmatrix}
x^T_{1} \
x^T_{2} \
\vdots \
x^T_{M} \
\end{bmatrix}
w
= y - Xw
$$
$$
e = y - Xw
$$
End of explanation
"""
np.dot(e.T,e)
"""
Explanation: 잔차 제곱합
잔차의 크기는 잔차 벡터의 각 원소를 제곱한 후 더한 잔차 제곱합(RSS: Residual Sum of Squares)를 이용하여 구한다. 이 값은 $e^Te$로 간단하게 쓸 수 있으며 그 값은 다음과 같이 계산한다.
$$
e^Te = \sum_{i=1}^{N} (y_i - w^Tx_i)^2 = (y - Xw)^T (y - Xw)
$$
End of explanation
"""
x = np.array([1,2,3])
x
A = np.arange(1, 10).reshape(3,3)
A
np.dot(np.dot(x, A), x)
"""
Explanation: 이차 형식
벡터의 이차 형식(Quadratic Form) 이란 어떤 벡터의 각 원소에 대해 가능한 모든 쌍의 조합 $(x_i, x_j)$을 구한 다음 그 곱셈$x_ix_j$을 더한 것을 말한다. 이 때 각 쌍에 대해 서로 다른 가중치 $a_{i,j}$를 적용하여 $a_{i,j}x_ix_j$의 합을 구한다면 다음과 같이 $x^TAx$라는 간단한 식으로 쓸 수 있다.
$$
x^T A x =
\begin{bmatrix}
x_{1} & x_{2} & \cdots & x_{N}
\end{bmatrix}
\begin{bmatrix}
a_{1,1} & a_{1,2} & \cdots & a_{1,N} \
a_{2,1} & a_{2,2} & \cdots & a_{2,N} \
\vdots & \vdots & \ddots & \vdots \
a_{N,1} & a_{N,2} & \cdots & a_{N,N} \
\end{bmatrix}
\begin{bmatrix}
x_{1} \
x_{2} \
\vdots \
x_{N} \
\end{bmatrix} = \sum_{i=1}^{N} \sum_{j=1}^{N} a_{i,j} x_i x_j
$$
예를 들어 $ x = [1, 2, 3]^T $ 이고 A가 다음과 같다면
$$ A =
\begin{pmatrix}
1 & 2 & 3 \
4 & 5 & 6 \
7 & 8 & 9 \
\end{pmatrix}
$$
NumPy 에서 벡터의 이차 형식은 다음과 같이 계산한다.
End of explanation
"""
|
tensorflow/docs-l10n | site/ko/guide/tf_numpy.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2020 The TensorFlow Authors.
End of explanation
"""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow.experimental.numpy as tnp
import timeit
print("Using TensorFlow version %s" % tf.__version__)
"""
Explanation: TensorFlow 기반의 NumPy API
<table class="tfo-notebook-buttons" align="left">
<td> <a target="_blank" href="https://www.tensorflow.org/guide/tf_numpy"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org에서 보기</a> </td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/guide/tf_numpy.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab에서 실행</a></td>
<td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/guide/tf_numpy.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub에서 소스 보기</a> </td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/guide/tf_numpy.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">노트북 다운로드</a></td>
</table>
개요
TensorFlow는 tf.experimental.numpy로 사용할 수 있는 NumPy API의 하위 집합을 구현합니다. 이를 통해 TensorFlow에서 NumPy 코드를 빠르게 실행할 수 있으며 TensorFlow의 모든 API에 액세스할 수 있습니다.
설정
End of explanation
"""
tnp.experimental_enable_numpy_behavior()
"""
Explanation: NumPy 동작 사용
tnp를 NumPy로 사용하려면 TensorFlow에 대해 NumPy 동작을 활성화합니다.
End of explanation
"""
# Create an ND array and check out different attributes.
ones = tnp.ones([5, 3], dtype=tnp.float32)
print("Created ND array with shape = %s, rank = %s, "
"dtype = %s on device = %s\n" % (
ones.shape, ones.ndim, ones.dtype, ones.device))
# `ndarray` is just an alias to `tf.Tensor`.
print("Is `ones` an instance of tf.Tensor: %s\n" % isinstance(ones, tf.Tensor))
# Try commonly used member functions.
print("ndarray.T has shape %s" % str(ones.T.shape))
print("narray.reshape(-1) has shape %s" % ones.reshape(-1).shape)
"""
Explanation: 이 호출은 TensorFlow에서 유형 승격을 활성화하고, 리터럴을 텐서로 변환할 때 유형 추론을 변경하여 NumPy 표준을 보다 엄격하게 따릅니다.
참고: 이 호출은 tf.experimental.numpy 모듈뿐만 아니라 전체 TensorFlow의 동작을 변경합니다.
TensorFlow NumPy ND 배열
ND 배열이라는 tf.experimental.numpy.ndarray의 인스턴스는 특정 기기에 배치된 주어진 dtype의 다차원 고밀도 배열을 나타냅니다. 이것은 tf.Tensor에 대한 별칭입니다. ndarray.T, ndarray.reshape, ndarray.ravel 등과 같은 유용한 메서드를 위한 ND 배열 클래스를 확인해 보세요.
먼저 ND 배열 객체를 만든 다음, 다양한 메서드를 호출합니다.
End of explanation
"""
print("Type promotion for operations")
values = [tnp.asarray(1, dtype=d) for d in
(tnp.int32, tnp.int64, tnp.float32, tnp.float64)]
for i, v1 in enumerate(values):
for v2 in values[i + 1:]:
print("%s + %s => %s" %
(v1.dtype.name, v2.dtype.name, (v1 + v2).dtype.name))
"""
Explanation: 형식 승격
TensorFlow NumPy API에는 리터럴을 ND 배열로 변환하고 ND 배열 입력에 대해 형식 승격을 수행하기 위한 잘 정의된 의미 체계가 있습니다. 자세한 내용은 np.result_type을 참조하세요.
TensorFlow API는 tf.Tensor 입력을 변경하지 않고 유형 승격을 수행하지 않는 반면, TensorFlow NumPy API는 NumPy 유형 승격 규칙에 따라 모든 입력을 승격합니다. 다음 예에서는 유형 승격을 수행합니다. 먼저, 서로 다른 유형의 ND 배열 입력에 추가를 실행하고 출력 유형을 기록합니다. 이러한 유형의 승격은 TensorFlow API에서 허용되지 않습니다.
End of explanation
"""
print("Type inference during array creation")
print("tnp.asarray(1).dtype == tnp.%s" % tnp.asarray(1).dtype.name)
print("tnp.asarray(1.).dtype == tnp.%s\n" % tnp.asarray(1.).dtype.name)
"""
Explanation: 마지막으로, ndarray.asarray를 사용하여 리터럴을 ND 배열로 변환하고 결과 유형을 확인합니다.
End of explanation
"""
tnp.experimental_enable_numpy_behavior(prefer_float32=True)
print("When prefer_float32 is True:")
print("tnp.asarray(1.).dtype == tnp.%s" % tnp.asarray(1.).dtype.name)
print("tnp.add(1., 2.).dtype == tnp.%s" % tnp.add(1., 2.).dtype.name)
tnp.experimental_enable_numpy_behavior(prefer_float32=False)
print("When prefer_float32 is False:")
print("tnp.asarray(1.).dtype == tnp.%s" % tnp.asarray(1.).dtype.name)
print("tnp.add(1., 2.).dtype == tnp.%s" % tnp.add(1., 2.).dtype.name)
"""
Explanation: 리터럴을 ND 배열로 변환할 때 NumPy는 tnp.int64 및 tnp.float64와 같은 넓은 유형을 선호합니다. 반대로 tf.convert_to_tensor는 상수를 tf.Tensor로 변환하기 위해 tf.int32 및 tf.float32 유형을 선호합니다. TensorFlow NumPy API는 정수에 대한 NumPy 동작을 준수합니다. 부동 소수점의 경우, experimental_enable_numpy_behavior의 prefer_float32 인수를 사용하여 tf.float64에 비해 tf.float32를 선호할지 여부를 제어할 수 있습니다(기본적으로 False). 예를 들면 다음과 같습니다.
End of explanation
"""
x = tnp.ones([2, 3])
y = tnp.ones([3])
z = tnp.ones([1, 2, 1])
print("Broadcasting shapes %s, %s and %s gives shape %s" % (
x.shape, y.shape, z.shape, (x + y + z).shape))
"""
Explanation: 브로드캐스팅
TensorFlow와 유사하게 NumPy는 "브로드캐스팅" 값에 대한 풍부한 의미 체계를 정의합니다. 자세한 내용은 NumPy 브로드캐스팅 가이드를 확인하고 TensorFlow 브로드캐스팅 의미 체계와 비교할 수 있습니다.
End of explanation
"""
x = tnp.arange(24).reshape(2, 3, 4)
print("Basic indexing")
print(x[1, tnp.newaxis, 1:3, ...], "\n")
print("Boolean indexing")
print(x[:, (True, False, True)], "\n")
print("Advanced indexing")
print(x[1, (0, 0, 1), tnp.asarray([0, 1, 1])])
# Mutation is currently not supported
try:
tnp.arange(6)[1] = -1
except TypeError:
print("Currently, TensorFlow NumPy does not support mutation.")
"""
Explanation: 인덱싱
NumPy는 매우 정교한 인덱싱 규칙을 정의합니다. NumPy 인덱싱 가이드를 참조하세요. 아래 인덱스로 ND 배열을 사용합니다.
End of explanation
"""
class Model(object):
"""Model with a dense and a linear layer."""
def __init__(self):
self.weights = None
def predict(self, inputs):
if self.weights is None:
size = inputs.shape[1]
# Note that type `tnp.float32` is used for performance.
stddev = tnp.sqrt(size).astype(tnp.float32)
w1 = tnp.random.randn(size, 64).astype(tnp.float32) / stddev
bias = tnp.random.randn(64).astype(tnp.float32)
w2 = tnp.random.randn(64, 2).astype(tnp.float32) / 8
self.weights = (w1, bias, w2)
else:
w1, bias, w2 = self.weights
y = tnp.matmul(inputs, w1) + bias
y = tnp.maximum(y, 0) # Relu
return tnp.matmul(y, w2) # Linear projection
model = Model()
# Create input data and compute predictions.
print(model.predict(tnp.ones([2, 32], dtype=tnp.float32)))
"""
Explanation: 예시 모델
다음으로, 모델을 만들고 추론을 실행하는 방법을 볼 수 있습니다. 이 간단한 모델은 relu 레이어와 직선 투영법(linear projection)을 적용합니다. 이후 섹션에서는 TensorFlow의 GradientTape를 사용하여 모델의 그래디언트를 계산하는 방법을 보여줍니다.
End of explanation
"""
# ND array passed into NumPy function.
np_sum = np.sum(tnp.ones([2, 3]))
print("sum = %s. Class: %s" % (float(np_sum), np_sum.__class__))
# `np.ndarray` passed into TensorFlow NumPy function.
tnp_sum = tnp.sum(np.ones([2, 3]))
print("sum = %s. Class: %s" % (float(tnp_sum), tnp_sum.__class__))
# It is easy to plot ND arrays, given the __array__ interface.
labels = 15 + 2 * tnp.random.randn(1, 1000)
_ = plt.hist(labels)
"""
Explanation: TensorFlow NumPy 및 NumPy
TensorFlow NumPy는 전체 NumPy 사양의 하위 집합을 구현합니다. 시간이 지남에 따라 더 많은 기호가 추가되지만, 가까운 장래에 지원되지 않는 체계적인 기능이 있습니다. 여기에는 NumPy C API 지원, Swig 통합, Fortran 저장 순서, 뷰 및 stride_tricks 및 일부 dtype(예: np.recarray 및 np.object)이 포함됩니다. 자세한 내용은 TensorFlow NumPy API 설명서를 참조하세요.
NumPy 상호 운용성
TensorFlow ND 배열은 NumPy 함수와 상호 운용될 수 있습니다. 이러한 객체는 __array__ 인터페이스를 구현합니다. NumPy는 이 인터페이스를 사용하여 함수 인수를 처리하기 전에 np.ndarray 값으로 변환합니다.
마찬가지로, TensorFlow NumPy 함수는 np.ndarray를 포함하여 다양한 형식의 입력을 받을 수 있습니다. 이러한 입력은 ndarray.asarray를 호출하여 ND 배열로 변환됩니다.
ND 배열과 np.ndarray 간의 변환은 실제 데이터 복사를 트리거할 수 있습니다. 자세한 내용은 버퍼 복사본 섹션을 참조하세요.
End of explanation
"""
x = tnp.ones([2]) + np.ones([2])
print("x = %s\nclass = %s" % (x, x.__class__))
"""
Explanation: 버퍼 복사본
TensorFlow NumPy와 NumPy 코드를 혼합하면 데이터 복사가 트리거될 수 있습니다. 이는 TensorFlow NumPy가 NumPy보다 메모리 정렬에 대한 요구 사항이 더 엄격하기 때문입니다.
np.ndarray가 TensorFlow Numpy에 전달되면 정렬 요구 사항을 확인하고 필요한 경우 복사본을 트리거합니다. ND 배열 CPU 버퍼를 NumPy에 전달할 때 일반적으로 버퍼는 정렬 요구 사항을 충족하며 NumPy는 복사본을 만들 필요가 없습니다.
ND 배열은 로컬 CPU 메모리가 아닌 기기에 배치된 버퍼를 참조할 수 있습니다. 이러한 경우, NumPy 함수를 호출하면 필요에 따라 네트워크 또는 기기에서 복사본이 트리거됩니다.
따라서 NumPy API 호출과의 혼합은 일반적으로 주의해서 수행해야 하며 사용자는 데이터 복사 오버헤드에 주의해야 합니다. TensorFlow NumPy 호출을 TensorFlow 호출과 인터리빙하는 것은 일반적으로 안전하며 데이터 복사를 방지합니다. 자세한 내용은 tensorflow 상호 운용성 섹션을 참조하세요.
연산자 우선 순위
TensorFlow NumPy는 NumPy보다 높은 __array_priority__를 정의합니다. 즉, ND 배열과 np.ndarray를 둘 다 포함하는 연산자의 경우, 전자가 우선합니다. 즉, np.ndarray 입력이 ND 배열로 변환되고 연산자의 TensorFlow NumPy 구현이 호출됩니다.
End of explanation
"""
x = tf.constant([1, 2])
print(x)
# `asarray` and `convert_to_tensor` here are no-ops.
tnp_x = tnp.asarray(x)
print(tnp_x)
print(tf.convert_to_tensor(tnp_x))
# Note that tf.Tensor.numpy() will continue to return `np.ndarray`.
print(x.numpy(), x.numpy().__class__)
"""
Explanation: TF NumPy 및 TensorFlow
TensorFlow NumPy는 TensorFlow를 기반으로 하므로 TensorFlow와 원활하게 상호 운용됩니다.
tf.Tensor 및 ND 배열
ND 배열은 tf.Tensor에 대한 별칭이므로 실제 데이터 복사를 트리거하지 않고 서로 혼합될 수 있습니다.
End of explanation
"""
# ND array passed into TensorFlow function.
tf_sum = tf.reduce_sum(tnp.ones([2, 3], tnp.float32))
print("Output = %s" % tf_sum)
# `tf.Tensor` passed into TensorFlow NumPy function.
tnp_sum = tnp.sum(tf.ones([2, 3]))
print("Output = %s" % tnp_sum)
"""
Explanation: TensorFlow 상호 운용성
ND 배열은 단지 tf.Tensor에 대한 별칭이기 때문에 ND 배열을 TensorFlow API에 전달할 수 있습니다. 앞서 언급했듯이, 이러한 상호 연산은 가속기 또는 원격 기기에 있는 데이터의 경우에도 실제로 데이터 복사를 수행하지 않습니다.
반대로, tf.Tensor 객체는 데이터 복사를 수행하지 않고 tf.experimental.numpy API로 전달할 수 있습니다.
End of explanation
"""
def create_batch(batch_size=32):
"""Creates a batch of input and labels."""
return (tnp.random.randn(batch_size, 32).astype(tnp.float32),
tnp.random.randn(batch_size, 2).astype(tnp.float32))
def compute_gradients(model, inputs, labels):
"""Computes gradients of squared loss between model prediction and labels."""
with tf.GradientTape() as tape:
assert model.weights is not None
# Note that `model.weights` need to be explicitly watched since they
# are not tf.Variables.
tape.watch(model.weights)
# Compute prediction and loss
prediction = model.predict(inputs)
loss = tnp.sum(tnp.square(prediction - labels))
# This call computes the gradient through the computation above.
return tape.gradient(loss, model.weights)
inputs, labels = create_batch()
gradients = compute_gradients(model, inputs, labels)
# Inspect the shapes of returned gradients to verify they match the
# parameter shapes.
print("Parameter shapes:", [w.shape for w in model.weights])
print("Gradient shapes:", [g.shape for g in gradients])
# Verify that gradients are of type ND array.
assert isinstance(gradients[0], tnp.ndarray)
# Computes a batch of jacobians. Each row is the jacobian of an element in the
# batch of outputs w.r.t. the corresponding input batch element.
def prediction_batch_jacobian(inputs):
with tf.GradientTape() as tape:
tape.watch(inputs)
prediction = model.predict(inputs)
return prediction, tape.batch_jacobian(prediction, inputs)
inp_batch = tnp.ones([16, 32], tnp.float32)
output, batch_jacobian = prediction_batch_jacobian(inp_batch)
# Note how the batch jacobian shape relates to the input and output shapes.
print("Output shape: %s, input shape: %s" % (output.shape, inp_batch.shape))
print("Batch jacobian shape:", batch_jacobian.shape)
"""
Explanation: 그래디언트 및 야고비 행렬식: tf.GradientTape
TensorFlow의 GradientTape는 TensorFlow 및 TensorFlow NumPy 코드를 통한 역전파에 사용할 수 있습니다.
예시 모델 섹션에서 생성된 모델을 사용하고 그래디언트와 야고비 행렬식을 계산합니다.
End of explanation
"""
inputs, labels = create_batch(512)
print("Eager performance")
compute_gradients(model, inputs, labels)
print(timeit.timeit(lambda: compute_gradients(model, inputs, labels),
number=10) * 100, "ms")
print("\ntf.function compiled performance")
compiled_compute_gradients = tf.function(compute_gradients)
compiled_compute_gradients(model, inputs, labels) # warmup
print(timeit.timeit(lambda: compiled_compute_gradients(model, inputs, labels),
number=10) * 100, "ms")
"""
Explanation: 추적 컴파일: tf.function
Tensorflow의 tf.function은 코드를 "추적 컴파일"한 다음 해당 추적을 최적화하여 훨씬 빠른 성능을 제공합니다. 그래프 및 함수 소개를 참조하세요.
tf.function은 TensorFlow NumPy 코드를 최적화하는 데에도 사용할 수 있습니다. 다음은 속도 향상을 보여주는 간단한 예입니다. tf.function 코드의 본문에는 TensorFlow NumPy API에 대한 호출이 포함됩니다.
End of explanation
"""
@tf.function
def vectorized_per_example_gradients(inputs, labels):
def single_example_gradient(arg):
inp, label = arg
return compute_gradients(model,
tnp.expand_dims(inp, 0),
tnp.expand_dims(label, 0))
# Note that a call to `tf.vectorized_map` semantically maps
# `single_example_gradient` over each row of `inputs` and `labels`.
# The interface is similar to `tf.map_fn`.
# The underlying machinery vectorizes away this map loop which gives
# nice speedups.
return tf.vectorized_map(single_example_gradient, (inputs, labels))
batch_size = 128
inputs, labels = create_batch(batch_size)
per_example_gradients = vectorized_per_example_gradients(inputs, labels)
for w, p in zip(model.weights, per_example_gradients):
print("Weight shape: %s, batch size: %s, per example gradient shape: %s " % (
w.shape, batch_size, p.shape))
# Benchmark the vectorized computation above and compare with
# unvectorized sequential computation using `tf.map_fn`.
@tf.function
def unvectorized_per_example_gradients(inputs, labels):
def single_example_gradient(arg):
inp, label = arg
return compute_gradients(model,
tnp.expand_dims(inp, 0),
tnp.expand_dims(label, 0))
return tf.map_fn(single_example_gradient, (inputs, labels),
fn_output_signature=(tf.float32, tf.float32, tf.float32))
print("Running vectorized computation")
print(timeit.timeit(lambda: vectorized_per_example_gradients(inputs, labels),
number=10) * 100, "ms")
print("\nRunning unvectorized computation")
per_example_gradients = unvectorized_per_example_gradients(inputs, labels)
print(timeit.timeit(lambda: unvectorized_per_example_gradients(inputs, labels),
number=10) * 100, "ms")
"""
Explanation: 벡터화: tf.vectorized_map
TensorFlow는 병렬 루프를 벡터화하는 기능을 내장하여 속도를 1~2배 높일 수 있습니다. 이러한 속도 향상은 tf.vectorized_map API를 통해 액세스할 수 있으며 TensorFlow NumPy 코드에도 적용됩니다.
해당 입력 배치 요소에 대해 배치에서 각 출력의 그래디언트를 계산하는 것이 때때로 유용합니다. 이러한 계산은 아래와 같이 tf.vectorized_map 을 사용하여 효율적으로 수행할 수 있습니다.
End of explanation
"""
print("All logical devices:", tf.config.list_logical_devices())
print("All physical devices:", tf.config.list_physical_devices())
# Try to get the GPU device. If unavailable, fallback to CPU.
try:
device = tf.config.list_logical_devices(device_type="GPU")[0]
except IndexError:
device = "/device:CPU:0"
"""
Explanation: 기기 배치
TensorFlow NumPy는 CPU, GPU, TPU 및 원격 기기에 연산을 배치할 수 있습니다. 기기 배치를 위한 표준 TensorFlow 메커니즘을 사용합니다. 아래의 간단한 예는 모든 기기를 나열한 다음 특정 기기에 계산을 배치하는 방법을 보여줍니다.
TensorFlow에는 또한 기기 간에 계산을 복제하고 여기에서 다루지 않을 집단 감소(collective reduction)를 수행하기 위한 API가 있습니다.
기기 나열하기
tf.config.list_logical_devices 및 tf.config.list_physical_devices를 사용하여 사용할 기기를 찾을 수 있습니다.
End of explanation
"""
print("Using device: %s" % str(device))
# Run operations in the `tf.device` scope.
# If a GPU is available, these operations execute on the GPU and outputs are
# placed on the GPU memory.
with tf.device(device):
prediction = model.predict(create_batch(5)[0])
print("prediction is placed on %s" % prediction.device)
"""
Explanation: 연산 배치하기: tf.device
tf.device 범위에서 호출하여 기기에 연산을 배치할 수 있습니다.
End of explanation
"""
with tf.device("/device:CPU:0"):
prediction_cpu = tnp.copy(prediction)
print(prediction.device)
print(prediction_cpu.device)
"""
Explanation: 기기 간에 ND 배열 복사하기: tnp.copy
특정 기기 범위에 배치된 tnp.copy를 호출하면 데이터가 해당 기기에 이미 있는 경우를 제외하고 해당 기기에 데이터를 복사합니다.
End of explanation
"""
def benchmark(f, inputs, number=30, force_gpu_sync=False):
"""Utility to benchmark `f` on each value in `inputs`."""
times = []
for inp in inputs:
def _g():
if force_gpu_sync:
one = tnp.asarray(1)
f(inp)
if force_gpu_sync:
with tf.device("CPU:0"):
tnp.copy(one) # Force a sync for GPU case
_g() # warmup
t = timeit.timeit(_g, number=number)
times.append(t * 1000. / number)
return times
def plot(np_times, tnp_times, compiled_tnp_times, has_gpu, tnp_times_gpu):
"""Plot the different runtimes."""
plt.xlabel("size")
plt.ylabel("time (ms)")
plt.title("Sigmoid benchmark: TF NumPy vs NumPy")
plt.plot(sizes, np_times, label="NumPy")
plt.plot(sizes, tnp_times, label="TF NumPy (CPU)")
plt.plot(sizes, compiled_tnp_times, label="Compiled TF NumPy (CPU)")
if has_gpu:
plt.plot(sizes, tnp_times_gpu, label="TF NumPy (GPU)")
plt.legend()
# Define a simple implementation of `sigmoid`, and benchmark it using
# NumPy and TensorFlow NumPy for different input sizes.
def np_sigmoid(y):
return 1. / (1. + np.exp(-y))
def tnp_sigmoid(y):
return 1. / (1. + tnp.exp(-y))
@tf.function
def compiled_tnp_sigmoid(y):
return tnp_sigmoid(y)
sizes = (2 ** 0, 2 ** 5, 2 ** 10, 2 ** 15, 2 ** 20)
np_inputs = [np.random.randn(size).astype(np.float32) for size in sizes]
np_times = benchmark(np_sigmoid, np_inputs)
with tf.device("/device:CPU:0"):
tnp_inputs = [tnp.random.randn(size).astype(np.float32) for size in sizes]
tnp_times = benchmark(tnp_sigmoid, tnp_inputs)
compiled_tnp_times = benchmark(compiled_tnp_sigmoid, tnp_inputs)
has_gpu = len(tf.config.list_logical_devices("GPU"))
if has_gpu:
with tf.device("/device:GPU:0"):
tnp_inputs = [tnp.random.randn(size).astype(np.float32) for size in sizes]
tnp_times_gpu = benchmark(compiled_tnp_sigmoid, tnp_inputs, 100, True)
else:
tnp_times_gpu = None
plot(np_times, tnp_times, compiled_tnp_times, has_gpu, tnp_times_gpu)
"""
Explanation: 성능 비교
TensorFlow NumPy는 CPU, GPU, TPU에서 디스패치될 수 있는 고도로 최적화된 TensorFlow 커널을 사용합니다. TensorFlow는 또한 연산 융합과 같은 많은 컴파일러 최적화를 수행하며, 이는 성능 및 메모리 개선으로 이어집니다. 자세한 내용은 Gradler를 사용한 TensorFlow 그래프 최적화를 참조하세요.
그러나 TensorFlow는 NumPy와 비교하여 디스패치 연산에 대한 오버헤드가 더 높습니다. 소규모 연산(약 10마이크로초 미만)으로 구성된 워크로드의 경우 이러한 오버헤드가 런타임에서 우세할 수 있으며 NumPy가 더 나은 성능을 제공할 수 있습니다. 다른 경우에는 일반적으로 TensorFlow가 더 나은 성능을 제공합니다.
아래 벤치마크를 실행하여 다양한 입력 크기에서 NumPy와 TensorFlow Numpy의 성능을 비교해 보세요.
End of explanation
"""
|
AshleySetter/optoanalysis | Damping_radius_relation.ipynb | mit | # constants
k_B = Boltzmann
eta_air = 18.27e-6 # Pa # (J.T.R.Watson (1995)).
d_gas = 0.372e-9 #m #(Sone (2007)), ρSiO2
rho_SiO2 = 1800 # #kg/m^3 - Number told to us by
T0 = 300
R = 50e-9 # m
def mfp(P_gas):
mfp_val = k_B*T0/(2**0.5*pi*d_gas**2*P_gas)
return mfp_val
"""
Explanation: Relation 1
The form of the damping is given as:
$$ \Gamma_0 = \dfrac{6 \pi \eta_{air} r}{m} \dfrac{0.619}{0.619 + K_n} (1+ c_k)$$
(Li et al. 2011 - https://arxiv.org/pdf/1101.1283.pdf)
Where:
$\eta_{air}$ is the viscosity of air
$r$ is the radius of the silica nanoparticles
$m$ is the mass of the silica nanoparticles
$K_n$ is the Knudsen number $\dfrac{s}{r}$ where $s$ is the mean free path of the air particles
$c_k$ is a small positive function of $K_n$ which takes the form $(0.31K_n)/(0.785+1.152K_n+K_n^2)$
The mean free path is dependant upon the pressure of the system. The mathematical
form the mean free path, is dependant upon whether the particles under study are con- sidered to be hard like spheres colliding or as “soft” spheres following Lennard-Jones Potential. In this case assuming the gas particles to be hard spheres yields the following form,
$$s = \dfrac{k_B T_0}{ \sqrt{2} \pi d_{gas}^2 P_{gas}} $$
(Muddassar - Thesis - Cooling and Squeezing in Levitated Optomechanics 2016)
Where:
$d_{gas}$ is the diameter |of the gas particles
$T_0$ is the temperature of the gas
$P_{gas}$ is the pressure of the gas
End of explanation
"""
m_gas = 4.81e-26
def mfp_2(P_gas):
mfp_val = eta_air/P_gas * (pi*k_B*T0/(2*m_gas))**0.5
return mfp_val
s = mfp(300) # 3mbar = 300 Pascals
print(s)
s2 = mfp_2(300) # 3mbar = 300 Pascals
print(s2)
def Gamma_env(radius, Pressure_mbar):
mass = rho_SiO2 * 4/3*pi*radius**3
Pressure_pascals = 100*Pressure_mbar
s = mfp(Pressure_pascals)
K_n = s/radius
c_K = 0.31*K_n/(0.785 + 1.152*K_n + K_n**2)
Gamma_0 = 6*pi*eta_air*radius/mass * 0.619/(0.619 + K_n) * (1+c_K)
return Gamma_0
Gamma_env(R, 3)
"""
Explanation: Alternativity one can use:
$$ s = \dfrac{\eta_{air}}{P_{gas}} \sqrt{\dfrac{\pi k_B T_0}{2m}} $$
this produces the same result as the previous form
https://en.wikipedia.org/wiki/Mean_free_path
Where
- $\eta_{air}$ is the viscosity of air
- $m$ is the molecualar mass of air
- $T_0$ is the temperature of the gas
- $P_{gas}$ is the pressure of the gas
molecular mass of air is $28.97 g/mol$ and the number of molecules in a mole is Avogadro's Number $6.0221409e^{23}$ therefore we get the molecular mass of air to be $4.81e^{-26} Kg$
End of explanation
"""
def Gamma_env_simple(radius, Pressure_mbar):
Pressure_pascals = 100*Pressure_mbar
#Gamma_0 = 0.619*9*pi*eta_air*d_gas**2*Pressure_pascals/(2**0.5*rho_SiO2*k_B*T0*radius)
Gamma_0 = 0.619*9*pi*eta_air*d_gas**2*Pressure_pascals/(2**0.5*rho_SiO2*k_B*T0*radius)
return Gamma_0
Gamma_env_simple(R, 3)
"""
Explanation: Muddassar and Gieseler's simplified formula for the environmental damping is:
$$ \Gamma_0 = 0.619 \dfrac{9 \pi}{\sqrt{2}} \dfrac{\eta_{air}d_{gas}^2}{\rho_{SiO_2} k_B T_0} \dfrac{P_{gas}}{r}$$
This produces the same result as the full unsimplified form for all pressures in range of interest.
Where:
$\eta_{air}$ is the viscosity of air
$d_{gas}$ is the diameter of the gas particles
$\rho_{SiO_2}$ is the density of the silica nanoparticles
$r$ is the radius of the silica nanoparticles
$T_0$ is the temperature of the gas
$P_{gas}$ is the pressure of the gas
End of explanation
"""
def Gamma_alternative(radius, Pressure_mbar):
Pressure = 100*Pressure_mbar
ave_velocity = (8*k_B*T0/(pi*m_gas))**0.5
mass= rho_SiO2*4/3*pi*radius**3
Gamma0 = 64*radius**2*Pressure/(3*mass*ave_velocity)
return Gamma0
Gamma_alternative(R, 3)
"""
Explanation: Relation 2
In Gieseler's Thermal Nonlinearities paper he has the following equation for $\Gamma_0$
$$ \Gamma_0 = \dfrac{64a^2}{3m\bar{v}}P $$
https://www.nature.com/nphys/journal/v9/n12/full/nphys2798.html
This appears to be incorrect as it is exactly double that which you get with Chang's formula and James Millen's formula
Where:
- $a$ is the radius of the particle
- $m$ is the mass of the particle
- $\bar{v}$ is the average verlocity of the gas particles
Where we can use the following formula for $\bar{v}$
$$ \bar{v} = \sqrt{\dfrac{8k_B T_0}{\pi \mu}} $$
Where:
- $T_0$ is the temperature of the gas
- $\mu$ is the mass of the air molecules
End of explanation
"""
ave_velocity = (8*k_B*T0/(pi*m_gas))**0.5
ave_velocity
def Gamma_chang(radius, Pressure_mbar):
Pressure = 100*Pressure_mbar
ave_velocity = (8*k_B*T0/(pi*m_gas))**0.5
Gamma0 = 8*Pressure/(pi*ave_velocity*radius*rho_SiO2)/2
return 2*Gamma0
Gamma_chang(R, 3)
"""
Explanation: Relation 3
In Chang et al. paper "Cavity opto-mechanics using an optically levitated nanosphere"
They have $\Gamma_0 = \dfrac{\gamma_g}{2} = \dfrac{8}{\pi}\dfrac{P}{\bar{v}r\rho}$
Where
- $\rho$ is the density of the nanoparticle
- $P$ is the pressure of the gas
- $\bar{v}$ is the mean speed of the gas particles
- $r$ is the radius of the nanoparticle
End of explanation
"""
def Gamma_Millen_imp(radius, Pressure_mbar):
Pressure = 100*Pressure_mbar
ave_velocity = (8*k_B*T0/(pi*m_gas))**0.5
mass = rho_SiO2*4/3*pi*radius**3
N = Pressure/(k_B*T0)
Gamma0 = 4*pi*m_gas*N*radius**2*ave_velocity/(3*mass)
return Gamma0
Gamma_Millen_imp(R, 3)
"""
Explanation: Also relation 3 (different derivation by Millen et al.)
James Millen derives the following form of the damping due to impinging particles:
$$ \Gamma^{imp} = \dfrac{4\pi}{3}\dfrac{mNr^2 \bar{v}{T{imp}}}{M} $$
https://arxiv.org/abs/1309.3990 -
https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.114.123602
However in their earlier paper http://iopscience.iop.org/article/10.1088/1367-2630/15/1/015001/meta they get double this, which is what Gieseler gets in his thermal non-linearities paper.
Where:
- $m$ is the molecular mass of the gas
- $N$ is the particle density of the gas
- $r$ is the radius of the nanoparticle
- $M$ is the mass of the nanoparticle
- $\bar{v}{T{imp}}$ is the mean thermal velocity $\sqrt{\dfrac{8 k_B T^{imp}}{\pi m}}$
Using the ideal gas equation $P = R\rho T$ and $N= \dfrac{\rho}{m}$ with $R=\dfrac{k_B}{m}$ we get $N = \dfrac{P}{k_BT}$
End of explanation
"""
Gamma_chang(R, 3)
"""
Explanation: This agrees exactly with Chang's result
End of explanation
"""
def Gamma_Millen_em(radius, Pressure_mbar, T_em):
Pressure = 100*Pressure_mbar
h_prime = m_gas/(k_B*T_em)
mass = rho_SiO2*4/3*pi*radius**3
N = Pressure/(k_B*T_em)
Gamma0 = (m_gas*N*radius**2*pi**(3/2))/(3*np.sqrt(h_prime)*mass)
return Gamma0
def calc_surface_temp_Millen(T_em, T_imp=300):
accomodation_coef = 0.777 # accomodation coefficient of silica (from Nanoscale temp measurement paper)
T_surf = T_imp + (T_em + T_imp)/accomodation_coef
return T_surf
"""
Explanation: Relation 3+ (more damping due to considering emerging particles)
James Millen derives the following form of the damping due to emerging particles:
$\Gamma^{em} = \dfrac{mNr^2\pi^{\frac{3}{2}}}{3\sqrt{h'}M}$
https://arxiv.org/abs/1309.3990 -
https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.114.123602
Where:
- $m$ is the molecular mass of the gas
- $N$ is the particle density of the gas
- $r$ is the radius of the nanoparticle
- $h'$ is $\dfrac{m}{2k_B T_0}$ where $T_0$ is the temperature of the gas
- $M$ is the mass of the nanoparticle
Using the ideal gas equation $P = R\rho T$ and $N= \dfrac{\rho}{m}$ with $R=\dfrac{k_B}{m}$ we get $N = \dfrac{P}{k_BT}$
He also says that this leads to $\Gamma^{em} = \dfrac{\pi}{8}\dfrac{T^{em}}{T^{imp}}$
From this you get the total effective damping rate is
$$ \Gamma_0 = \Gamma^{em} + \Gamma^{imp} = \dfrac{\pi}{8}\sqrt{\dfrac{T^{em}}{T^{imp}}}\Gamma^{imp} + \Gamma^{imp} $$
Therefore damping rate is higher if you consider this
End of explanation
"""
P_exp = np.load("Pressure_mbar.npy")
Gamma_exp = np.load("Gamma_radians.npy")
P_G_Dict = dict(zip(P_exp, Gamma_exp))
r = np.linspace(5e-9, 1000e-9, 1000)
P = 3.6 # mbar
alpha=0.5
plt.figure(figsize=[10, 10])
plt.loglog(r, Gamma_env_simple(r, P), 'k', label="Rashid/Gieseler Full form", alpha=alpha)
#plt.semilogy(r, Gamma_env_simple(r, P), 'grey', label="Rashid/Gieseler simplfied form", alpha=alpha)
plt.loglog(r, Gamma_alternative(r, P), label="Gieseler Thermal Non-linearities form", alpha=alpha)
plt.loglog(r, Gamma_chang(r, P), label="Chang form", alpha=alpha)
plt.loglog(r, Gamma_Millen_imp(r, P), label="Millen (imp) form", alpha=alpha)
plt.xlabel("radius (nm)")
plt.ylabel("Γ (radians/s)")
plt.legend(loc='best')
plt.show()
r = 50e-9
P = np.linspace(1e-2, 1000, 1000)
plt.figure(figsize=[10, 10])
plt.loglog(P, Gamma_env_simple(r, P), 'k', label="Rashid/Gieseler Full form", alpha=alpha)
#plt.loglog(P, Gamma_env_simple(r, P), 'grey', label="Rashid/Gieseler simplfied form", alpha=alpha)
plt.loglog(P, Gamma_alternative(r, P), label="Gieseler Thermal Non-linearities form", alpha=alpha)
plt.loglog(P, Gamma_chang(r, P), label="Chang form", alpha=alpha)
plt.loglog(P, Gamma_Millen_imp(r, P), label="Millen (imp) form", alpha=alpha)
plt.loglog(P_exp, Gamma_exp, label="Experiment", alpha=alpha)
plt.xlabel("P (mbar)")
plt.ylabel("Γ (radians/s)")
plt.legend(loc='best')
plt.show()
"""
Explanation: Plot of all 3 relations and measured data
End of explanation
"""
|
beangoben/HistoriaDatos_Higgs | Dia1/.ipynb_checkpoints/3_Intro a Matplotlib-checkpoint.ipynb | gpl-2.0 | import numpy as np # modulo de computo numerico
import matplotlib.pyplot as plt # modulo de graficas
import pandas as pd # modulo de datos
# esta linea hace que las graficas salgan en el notebook
%matplotlib inline
"""
Explanation: Intro a Matplotlib
Matplotlib = Libreria para graficas cosas matematicas
Que es Matplotlib?
Matplotlin es un libreria para crear imagenes 2D de manera facil.
Checate mas en :
Pagina oficial : http://matplotlib.org/
Galleria de ejemplo: http://matplotlib.org/gallery.html
Una libreria mas avanzada que usa matplotlib, Seaborn: http://stanford.edu/~mwaskom/software/seaborn/
Libreria de visualizacion interactiva: http://bokeh.pydata.org/
Buenisimo Tutorial: http://www.labri.fr/perso/nrougier/teaching/matplotlib/
Para usar matplotlib, solo tiene que importar el modulo ..tambien te conviene importar numpy pues es muy util
End of explanation
"""
x = np.array([0,1,2,3,4])
y = x**2 #cuadramos x
plt.plot(x,y)
plt.title("Grafica sencilla")
plt.show()
"""
Explanation: Crear graficas (plot)
Crear graficas es muy facil en matplotlib, si tienes una lista de valores X y otra y..solo basta usar :
End of explanation
"""
x = np.linspace(0,10,100)
y = x**2 #cuadramos x
plt.plot(x,y)
plt.title("Grafica sencilla")
plt.show()
"""
Explanation: Podemos usar la funcion np.linspace para crear valores en un rango, por ejemplo si queremos 100 numeros entre 0 y 10 usamos:
End of explanation
"""
x = np.linspace(0,10,100)
y1 = x # una linea
y2 = x**2 # cuadramos x
plt.plot(x,y1)
plt.plot(x,y2)
plt.title("Dos graficas sencillas")
plt.show()
"""
Explanation: Y podemos graficar dos cosas al mismo tiempo:
End of explanation
"""
x = np.linspace(0,10,100)
y1 = x # una linea
y2 = x**2 # cuadramos x
plt.plot(x,y1,label="Linea")
plt.plot(x,y2,label="Cuadrado")
plt.legend()
plt.title("Dos graficas sencillas")
plt.show()
"""
Explanation: Que tal si queremos distinguir cada linea? Pues usamos legend(), de leyenda..tambien tenemos que agregarles nombres a cada plot
End of explanation
"""
x = np.linspace(0,10,100)
y1 = x # una linea
y2 = x**2 # cuadramos x
y3 = np.sqrt(x) # sacamos raiz cuadrada a x
y4 = np.power(x,1.5) # elevamos x a la potencia 1.5
plt.plot(x,y1,label="Linea",linestyle='-') # linea
plt.plot(x,y2,label="Cuadrado",linestyle=':') # puntitos
plt.plot(x,y3,label="Raiz",linestyle='-.') # linea y punto
plt.plot(x,y4,label="potencia 1.5",linestyle='--') # lineas salteadas
plt.legend()
plt.title("Dos graficas sencillas")
plt.show()
"""
Explanation: Tambien podemos hacer mas cosas, como dibujar solamente los puntos, o las lineas con los puntos usando linestyle:
End of explanation
"""
N = 50 # numero de puntos
x = np.random.rand(N) # numeros aleatorios entre 0 y 1
y = np.random.rand(N)
plt.scatter(x, y)
plt.title("Scatter de puntos aleatorios")
plt.show()
"""
Explanation: Dibujando puntos (scatter)
Aveces no queremos dibujar lineas, sino puntos, esto nos da informacion de donde se encuentras datos de manera espacial. Para esto podemos usarlo de la siguiente manera:
End of explanation
"""
N = 50 # numero de puntos
x = np.random.rand(N) # numeros aleatorios entre 0 y 1
y = np.random.rand(N)
colores = np.random.rand(N) # colores aleatorios
radios= 15 * np.random.rand(N) # numeros aleatorios entre 0 y 15
areas = np.pi * radios**2 # la formula de area de un circulo
plt.scatter(x, y, s=areas, c=colores, alpha=0.5)
plt.title("Scatter plot de puntos aleatorios")
plt.show()
"""
Explanation: Pero ademas podemos meter mas informacion, por ejemplo dar colores cada punto, o darle tamanos diferentes:
End of explanation
"""
N=500
x = np.random.rand(N) # numeros aleatorios entre 0 y 1
plt.hist(x)
plt.title("Histograma aleatorio")
plt.show()
"""
Explanation: Histogramas (hist)
Los histogramas nos muestran distribuciones de datos, la forma de los datos, nos muestran el numero de datos de diferentes tipos:
End of explanation
"""
N=500
x = np.random.randn(N)
plt.hist(x)
plt.title("Histograma aleatorio Normal")
plt.show()
N=1000
x1 = np.random.randn(N)
x2 = 2+2*np.random.randn(N)
plt.hist(x1,20,alpha=0.3)
plt.hist(x2,20,alpha=0.3)
plt.title("Histograma de dos distribuciones")
plt.show()
"""
Explanation: otro tipo de datos, tomados de una campana de gauss, es decir una distribucion normal:
End of explanation
"""
xurl="http://spreadsheets.google.com/pub?key=phAwcNAVuyj2tPLxKvvnNPA&output=xls"
df=pd.read_excel(xurl)
print("Tamano completo es %s"%str(df.shape))
df.head()
"""
Explanation: Bases de datos en el internet
Aveces los datos que queremos se encuentran en el internet. Asumiendo que se encuentran ordenados y en un formato amigable siempre los podemos bajar y guardar como un DataFrame.
Por ejemplo:
Gapminder es una pagina con mas de 500 conjunto de daatos relacionado a indicadores globales como ingresos, producto interno bruto (PIB=GDP) y esperanza de vida.
Aqui bajamos la base de datos de esperanza de vida, lo guardamos en memoria y lo lodeamos como un excel:
Ojo! Aqui usamos .head() para imprimir los primeros 5 renglones del dataframe pues son gigantescos los datos.
End of explanation
"""
df = df.rename(columns={'Life expectancy with projections. Yellow is IHME': 'Life expectancy'})
df.index=df['Life expectancy']
df=df.drop('Life expectancy',axis=1)
df=df.transpose()
df.head()
"""
Explanation: Arreglando los Datos
Head nos permite darle un vistazo a los datos... asi a puro ojo vemos que las columnas son anios y los renglones los paises...ponder reversar esto con transpose, pero tambien vemos que esta con indices enumerados, prefeririamos que los indices fueran los paises, entonces los cambiamos y tiramos la columna que ya no sirve...al final un head para ver que todo esta bien... a este juego de limpiar y arreglar datos se llama "Data Wrangling"
End of explanation
"""
df['Mexico'].plot()
print("== Esperanza de Vida en Mexico ==")
"""
Explanation: Entonces ahora podemos ver la calidad de vida en Mexico atravez del tiempo:
End of explanation
"""
subdf=df[ df.index >= 1890 ]
subdf=subdf[ subdf.index <= 1955 ]
subdf['Mexico'].plot()
plt.title("Esperanza de Vida en Mexico entre 1890 y 1955")
plt.show()
"""
Explanation: de esta visualizacion vemos que la caldiad ha ido subiendo apartir de 1900, ademas vemos mucho movimiento entre 1890 y 1950, justo cuando habia muchas guerras en Mexico.
Tambien podemos seleccionar un rango selecto de años, vemos que este rango es interesante entonces
End of explanation
"""
df['Mexico'].plot()
plt.xlim(1890,1955)
plt.title("Esperanza de Vida en Mexico entre 1890 y 1955")
plt.show()
"""
Explanation: o sin tanto rollo, podemos restringuir el rango de nuestra grafica con xlim (los limites del eje X)
End of explanation
"""
df[['Mexico','United States','Canada']].plot()
plt.title("Esperanza de Vida en Norte-America")
plt.show()
"""
Explanation: Tambien es importante ver como esto se compara con otros paises, podemos comparar con todo Norteamerica:
End of explanation
"""
|
jinzishuai/learn2deeplearn | deeplearning.ai/C5.SequenceModel/Week1_RNN/assignment/Dinosaur Island -- Character-level language model/Dinosaurus Island -- Character level language model final - v2-Copy1.ipynb | gpl-3.0 | import numpy as np
from utils import *
import random
from random import shuffle
"""
Explanation: Character level language model - Dinosaurus land
Welcome to Dinosaurus Island! 65 million years ago, dinosaurs existed, and in this assignment they are back. You are in charge of a special task. Leading biology researchers are creating new breeds of dinosaurs and bringing them to life on earth, and your job is to give names to these dinosaurs. If a dinosaur does not like its name, it might go beserk, so choose wisely!
<table>
<td>
<img src="images/dino.jpg" style="width:250;height:300px;">
</td>
</table>
Luckily you have learned some deep learning and you will use it to save the day. Your assistant has collected a list of all the dinosaur names they could find, and compiled them into this dataset. (Feel free to take a look by clicking the previous link.) To create new dinosaur names, you will build a character level language model to generate new names. Your algorithm will learn the different name patterns, and randomly generate new names. Hopefully this algorithm will keep you and your team safe from the dinosaurs' wrath!
By completing this assignment you will learn:
How to store text data for processing using an RNN
How to synthesize data, by sampling predictions at each time step and passing it to the next RNN-cell unit
How to build a character-level text generation recurrent neural network
Why clipping the gradients is important
We will begin by loading in some functions that we have provided for you in rnn_utils. Specifically, you have access to functions such as rnn_forward and rnn_backward which are equivalent to those you've implemented in the previous assignment.
End of explanation
"""
data = open('dinos.txt', 'r').read()
data= data.lower()
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print('There are %d total characters and %d unique characters in your data.' % (data_size, vocab_size))
"""
Explanation: 1 - Problem Statement
1.1 - Dataset and Preprocessing
Run the following cell to read the dataset of dinosaur names, create a list of unique characters (such as a-z), and compute the dataset and vocabulary size.
End of explanation
"""
char_to_ix = { ch:i for i,ch in enumerate(sorted(chars)) }
ix_to_char = { i:ch for i,ch in enumerate(sorted(chars)) }
print(ix_to_char)
"""
Explanation: The characters are a-z (26 characters) plus the "\n" (or newline character), which in this assignment plays a role similar to the <EOS> (or "End of sentence") token we had discussed in lecture, only here it indicates the end of the dinosaur name rather than the end of a sentence. In the cell below, we create a python dictionary (i.e., a hash table) to map each character to an index from 0-26. We also create a second python dictionary that maps each index back to the corresponding character character. This will help you figure out what index corresponds to what character in the probability distribution output of the softmax layer. Below, char_to_ix and ix_to_char are the python dictionaries.
End of explanation
"""
### GRADED FUNCTION: clip
def clip(gradients, maxValue):
'''
Clips the gradients' values between minimum and maximum.
Arguments:
gradients -- a dictionary containing the gradients "dWaa", "dWax", "dWya", "db", "dby"
maxValue -- everything above this number is set to this number, and everything less than -maxValue is set to -maxValue
Returns:
gradients -- a dictionary with the clipped gradients.
'''
dWaa, dWax, dWya, db, dby = gradients['dWaa'], gradients['dWax'], gradients['dWya'], gradients['db'], gradients['dby']
### START CODE HERE ###
# clip to mitigate exploding gradients, loop over [dWax, dWaa, dWya, db, dby]. (≈2 lines)
for gradient in [dWax, dWaa, dWya, db, dby]:
None
### END CODE HERE ###
gradients = {"dWaa": dWaa, "dWax": dWax, "dWya": dWya, "db": db, "dby": dby}
return gradients
np.random.seed(3)
dWax = np.random.randn(5,3)*10
dWaa = np.random.randn(5,5)*10
dWya = np.random.randn(2,5)*10
db = np.random.randn(5,1)*10
dby = np.random.randn(2,1)*10
gradients = {"dWax": dWax, "dWaa": dWaa, "dWya": dWya, "db": db, "dby": dby}
gradients = clip(gradients, 10)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("gradients[\"dWax\"][3][1] =", gradients["dWax"][3][1])
print("gradients[\"dWya\"][1][2] =", gradients["dWya"][1][2])
print("gradients[\"db\"][4] =", gradients["db"][4])
print("gradients[\"dby\"][1] =", gradients["dby"][1])
"""
Explanation: 1.2 - Overview of the model
Your model will have the following structure:
Initialize parameters
Run the optimization loop
Forward propagation to compute the loss function
Backward propagation to compute the gradients with respect to the loss function
Clip the gradients to avoid exploding gradients
Using the gradients, update your parameter with the gradient descent update rule.
Return the learned parameters
<img src="images/rnn.png" style="width:450;height:300px;">
<caption><center> Figure 1: Recurrent Neural Network, similar to what you had built in the previous notebook "Building a RNN - Step by Step". </center></caption>
At each time-step, the RNN tries to predict what is the next character given the previous characters. The dataset $X = (x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, ..., x^{\langle T_x \rangle})$ is a list of characters in the training set, while $Y = (y^{\langle 1 \rangle}, y^{\langle 2 \rangle}, ..., y^{\langle T_x \rangle})$ is such that at every time-step $t$, we have $y^{\langle t \rangle} = x^{\langle t+1 \rangle}$.
2 - Building blocks of the model
In this part, you will build two important blocks of the overall model:
- Gradient clipping: to avoid exploding gradients
- Sampling: a technique used to generate characters
You will then apply these two functions to build the model.
2.1 - Clipping the gradients in the optimization loop
In this section you will implement the clip function that you will call inside of your optimization loop. Recall that your overall loop structure usually consists of a forward pass, a cost computation, a backward pass, and a parameter update. Before updating the parameters, you will perform gradient clipping when needed to make sure that your gradients are not "exploding," meaning taking on overly large values.
In the exercise below, you will implement a function clip that takes in a dictionary of gradients and returns a clipped version of gradients if needed. There are different ways to clip gradients; we will use a simple element-wise clipping procedure, in which every element of the gradient vector is clipped to lie between some range [-N, N]. More generally, you will provide a maxValue (say 10). In this example, if any component of the gradient vector is greater than 10, it would be set to 10; and if any component of the gradient vector is less than -10, it would be set to -10. If it is between -10 and 10, it is left alone.
<img src="images/clip.png" style="width:400;height:150px;">
<caption><center> Figure 2: Visualization of gradient descent with and without gradient clipping, in a case where the network is running into slight "exploding gradient" problems. </center></caption>
Exercise: Implement the function below to return the clipped gradients of your dictionary gradients. Your function takes in a maximum threshold and returns the clipped versions of your gradients. You can check out this hint for examples of how to clip in numpy. You will need to use the argument out = ....
End of explanation
"""
# GRADED FUNCTION: sample
def sample(parameters, char_to_ix, seed):
"""
Sample a sequence of characters according to a sequence of probability distributions output of the RNN
Arguments:
parameters -- python dictionary containing the parameters Waa, Wax, Wya, by, and b.
char_to_ix -- python dictionary mapping each character to an index.
seed -- used for grading purposes. Do not worry about it.
Returns:
indices -- a list of length n containing the indices of the sampled characters.
"""
# Retrieve parameters and relevant shapes from "parameters" dictionary
Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']
vocab_size = by.shape[0]
n_a = Waa.shape[1]
### START CODE HERE ###
# Step 1: Create the one-hot vector x for the first character (initializing the sequence generation). (≈1 line)
x = None
# Step 1': Initialize a_prev as zeros (≈1 line)
a_prev = None
# Create an empty list of indices, this is the list which will contain the list of indices of the characters to generate (≈1 line)
indices = []
# Idx is a flag to detect a newline character, we initialize it to -1
idx = -1
# Loop over time-steps t. At each time-step, sample a character from a probability distribution and append
# its index to "indices". We'll stop if we reach 50 characters (which should be very unlikely with a well
# trained model), which helps debugging and prevents entering an infinite loop.
counter = 0
newline_character = char_to_ix['\n']
while (idx != newline_character and counter != 50):
# Step 2: Forward propagate x using the equations (1), (2) and (3)
a = None
z = None
y = None
# for grading purposes
np.random.seed(counter+seed)
# Step 3: Sample the index of a character within the vocabulary from the probability distribution y
idx = None
# Append the index to "indices"
None
# Step 4: Overwrite the input character as the one corresponding to the sampled index.
x = None
x[None] = None
# Update "a_prev" to be "a"
a_prev = None
# for grading purposes
seed += 1
counter +=1
### END CODE HERE ###
if (counter == 50):
indices.append(char_to_ix['\n'])
return indices
np.random.seed(2)
_, n_a = 20, 100
Wax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)
b, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b, "by": by}
indices = sample(parameters, char_to_ix, 0)
print("Sampling:")
print("list of sampled indices:", indices)
print("list of sampled characters:", [ix_to_char[i] for i in indices])
"""
Explanation: Expected output:
<table>
<tr>
<td>
**gradients["dWaa"][1][2] **
</td>
<td>
10.0
</td>
</tr>
<tr>
<td>
**gradients["dWax"][3][1]**
</td>
<td>
-10.0
</td>
</td>
</tr>
<tr>
<td>
**gradients["dWya"][1][2]**
</td>
<td>
0.29713815361
</td>
</tr>
<tr>
<td>
**gradients["db"][4]**
</td>
<td>
[ 10.]
</td>
</tr>
<tr>
<td>
**gradients["dby"][1]**
</td>
<td>
[ 8.45833407]
</td>
</tr>
</table>
2.2 - Sampling
Now assume that your model is trained. You would like to generate new text (characters). The process of generation is explained in the picture below:
<img src="images/dinos3.png" style="width:500;height:300px;">
<caption><center> Figure 3: In this picture, we assume the model is already trained. We pass in $x^{\langle 1\rangle} = \vec{0}$ at the first time step, and have the network then sample one character at a time. </center></caption>
Exercise: Implement the sample function below to sample characters. You need to carry out 4 steps:
Step 1: Pass the network the first "dummy" input $x^{\langle 1 \rangle} = \vec{0}$ (the vector of zeros). This is the default input before we've generated any characters. We also set $a^{\langle 0 \rangle} = \vec{0}$
Step 2: Run one step of forward propagation to get $a^{\langle 1 \rangle}$ and $\hat{y}^{\langle 1 \rangle}$. Here are the equations:
$$ a^{\langle t+1 \rangle} = \tanh(W_{ax} x^{\langle t \rangle } + W_{aa} a^{\langle t \rangle } + b)\tag{1}$$
$$ z^{\langle t + 1 \rangle } = W_{ya} a^{\langle t + 1 \rangle } + b_y \tag{2}$$
$$ \hat{y}^{\langle t+1 \rangle } = softmax(z^{\langle t + 1 \rangle })\tag{3}$$
Note that $\hat{y}^{\langle t+1 \rangle }$ is a (softmax) probability vector (its entries are between 0 and 1 and sum to 1). $\hat{y}^{\langle t+1 \rangle}_i$ represents the probability that the character indexed by "i" is the next character. We have provided a softmax() function that you can use.
Step 3: Carry out sampling: Pick the next character's index according to the probability distribution specified by $\hat{y}^{\langle t+1 \rangle }$. This means that if $\hat{y}^{\langle t+1 \rangle }_i = 0.16$, you will pick the index "i" with 16% probability. To implement it, you can use np.random.choice.
Here is an example of how to use np.random.choice():
python
np.random.seed(0)
p = np.array([0.1, 0.0, 0.7, 0.2])
index = np.random.choice([0, 1, 2, 3], p = p.ravel())
This means that you will pick the index according to the distribution:
$P(index = 0) = 0.1, P(index = 1) = 0.0, P(index = 2) = 0.7, P(index = 3) = 0.2$.
Step 4: The last step to implement in sample() is to overwrite the variable x, which currently stores $x^{\langle t \rangle }$, with the value of $x^{\langle t + 1 \rangle }$. You will represent $x^{\langle t + 1 \rangle }$ by creating a one-hot vector corresponding to the character you've chosen as your prediction. You will then forward propagate $x^{\langle t + 1 \rangle }$ in Step 1 and keep repeating the process until you get a "\n" character, indicating you've reached the end of the dinosaur name.
End of explanation
"""
# GRADED FUNCTION: optimize
def optimize(X, Y, a_prev, parameters, learning_rate = 0.01):
"""
Execute one step of the optimization to train the model.
Arguments:
X -- list of integers, where each integer is a number that maps to a character in the vocabulary.
Y -- list of integers, exactly the same as X but shifted one index to the left.
a_prev -- previous hidden state.
parameters -- python dictionary containing:
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
b -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
learning_rate -- learning rate for the model.
Returns:
loss -- value of the loss function (cross-entropy)
gradients -- python dictionary containing:
dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)
dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)
dWya -- Gradients of hidden-to-output weights, of shape (n_y, n_a)
db -- Gradients of bias vector, of shape (n_a, 1)
dby -- Gradients of output bias vector, of shape (n_y, 1)
a[len(X)-1] -- the last hidden state, of shape (n_a, 1)
"""
### START CODE HERE ###
# Forward propagate through time (≈1 line)
loss, cache = None
# Backpropagate through time (≈1 line)
gradients, a = None
# Clip your gradients between -5 (min) and 5 (max) (≈1 line)
gradients = None
# Update parameters (≈1 line)
parameters = None
### END CODE HERE ###
return loss, gradients, a[len(X)-1]
np.random.seed(1)
vocab_size, n_a = 27, 100
a_prev = np.random.randn(n_a, 1)
Wax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)
b, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b, "by": by}
X = [12,3,5,11,22,3]
Y = [4,14,11,22,25, 26]
loss, gradients, a_last = optimize(X, Y, a_prev, parameters, learning_rate = 0.01)
print("Loss =", loss)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("np.argmax(gradients[\"dWax\"]) =", np.argmax(gradients["dWax"]))
print("gradients[\"dWya\"][1][2] =", gradients["dWya"][1][2])
print("gradients[\"db\"][4] =", gradients["db"][4])
print("gradients[\"dby\"][1] =", gradients["dby"][1])
print("a_last[4] =", a_last[4])
"""
Explanation: Expected output:
<table>
<tr>
<td>
**list of sampled indices:**
</td>
<td>
[18, 2, 26, 0]
</td>
</tr><tr>
<td>
**list of sampled characters:**
</td>
<td>
['r', 'b', 'z', '\n']
</td>
</tr>
</table>
3 - Building the language model
It is time to build the character-level language model for text generation.
3.1 - Gradient descent
In this section you will implement a function performing one step of stochastic gradient descent (with clipped gradients). You will go through the training examples one at a time, so the optimization algorithm will be stochastic gradient descent. As a reminder, here are the steps of a common optimization loop for an RNN:
Forward propagate through the RNN to compute the loss
Backward propagate through time to compute the gradients of the loss with respect to the parameters
Clip the gradients if necessary
Update your parameters using gradient descent
Exercise: Implement this optimization process (one step of stochastic gradient descent).
We provide you with the following functions:
```python
def rnn_forward(X, Y, a_prev, parameters):
""" Performs the forward propagation through the RNN and computes the cross-entropy loss.
It returns the loss' value as well as a "cache" storing values to be used in the backpropagation."""
....
return loss, cache
def rnn_backward(X, Y, parameters, cache):
""" Performs the backward propagation through time to compute the gradients of the loss with respect
to the parameters. It returns also all the hidden states."""
...
return gradients, a
def update_parameters(parameters, gradients, learning_rate):
""" Updates parameters using the Gradient Descent Update Rule."""
...
return parameters
```
End of explanation
"""
# GRADED FUNCTION: model
def model(data, ix_to_char, char_to_ix, num_iterations = 35000, n_a = 50, dino_names = 7, vocab_size = 27):
"""
Trains the model and generates dinosaur names.
Arguments:
data -- text corpus
ix_to_char -- dictionary that maps the index to a character
char_to_ix -- dictionary that maps a character to an index
num_iterations -- number of iterations to train the model for
n_a -- number of units of the RNN cell
dino_names -- number of dinosaur names you want to sample at each iteration.
vocab_size -- number of unique characters found in the text, size of the vocabulary
Returns:
parameters -- learned parameters
"""
# Retrieve n_x and n_y from vocab_size
n_x, n_y = vocab_size, vocab_size
# Initialize parameters
parameters = initialize_parameters(n_a, n_x, n_y)
# Initialize loss (this is required because we want to smooth our loss, don't worry about it)
loss = get_initial_loss(vocab_size, dino_names)
# Build list of all dinosaur names (training examples).
with open("dinos.txt") as f:
examples = f.readlines()
examples = [x.lower().strip() for x in examples]
# Shuffle list of all dinosaur names
shuffle(examples)
# Initialize the hidden state of your LSTM
a_prev = np.zeros((n_a, 1))
# Optimization loop
for j in range(num_iterations):
### START CODE HERE ###
# Use the hint above to define one training example (X,Y) (≈ 2 lines)
index = None
X = None
Y = None
# Perform one optimization step: Forward-prop -> Backward-prop -> Clip -> Update parameters
# Choose a learning rate of 0.01
curr_loss, gradients, a_prev = None
### END CODE HERE ###
# Use a latency trick to keep the loss smooth. It happens here to accelerate the training.
loss = smooth(loss, curr_loss)
# Every 2000 Iteration, generate "n" characters thanks to sample() to check if the model is learning properly
if j % 2000 == 0:
print('Iteration: %d, Loss: %f' % (j, loss) + '\n')
# The number of dinosaur names to print
seed = 0
for name in range(dino_names):
# Sample indices and print them
sampled_indices = sample(parameters, char_to_ix, seed)
print_sample(sampled_indices, ix_to_char)
seed += 1 # To get the same result for grading purposed, increment the seed by one.
print('\n')
return parameters
"""
Explanation: Expected output:
<table>
<tr>
<td>
**Loss **
</td>
<td>
126.503975722
</td>
</tr>
<tr>
<td>
**gradients["dWaa"][1][2]**
</td>
<td>
0.194709315347
</td>
<tr>
<td>
**np.argmax(gradients["dWax"])**
</td>
<td> 93
</td>
</tr>
<tr>
<td>
**gradients["dWya"][1][2]**
</td>
<td> -0.007773876032
</td>
</tr>
<tr>
<td>
**gradients["db"][4]**
</td>
<td> [-0.06809825]
</td>
</tr>
<tr>
<td>
**gradients["dby"][1]**
</td>
<td>[ 0.01538192]
</td>
</tr>
<tr>
<td>
**a_last[4]**
</td>
<td> [-1.]
</td>
</tr>
</table>
3.2 - Training the model
Given the dataset of dinosaur names, we use each line of the dataset (one name) as one training example. Every 100 steps of stochastic gradient descent, you will sample 10 randomly chosen names to see how the algorithm is doing. Remember to shuffle the dataset, so that stochastic gradient descent visits the examples in random order.
Exercise: Follow the instructions and implement model(). When examples[index] contains one dinosaur name (string), to create an example (X, Y), you can use this:
python
index = j % len(examples)
X = [None] + [char_to_ix[ch] for ch in examples[index]]
Y = X[1:] + [char_to_ix["\n"]]
Note that we use: index= j % len(examples), where j = 1....num_iterations, to make sure that examples[index] is always a valid statement (index is smaller than len(examples)).
The first entry of X being None will be interpreted by rnn_forward() as setting $x^{\langle 0 \rangle} = \vec{0}$. Further, this ensures that Y is equal to X but shifted one step to the left, and with an additional "\n" appended to signify the end of the dinosaur name.
End of explanation
"""
parameters = model(data, ix_to_char, char_to_ix)
"""
Explanation: Run the following cell, you should observe your model outputting random-looking characters at the first iteration. After a few thousand iterations, your model should learn to generate reasonable-looking names.
End of explanation
"""
from __future__ import print_function
from keras.callbacks import LambdaCallback
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking
from keras.layers import LSTM
from keras.utils.data_utils import get_file
from keras.preprocessing.sequence import pad_sequences
from shakespeare_utils import *
import sys
import io
"""
Explanation: Conclusion
You can see that your algorithm has started to generate plausible dinosaur names towards the end of the training. At first, it was generating random characters, but towards the end you could see dinosaur names with cool endings. Feel free to run the algorithm even longer and play with hyperparameters to see if you can get even better results. Our implemetation generated some really cool names like maconucon, marloralus and macingsersaurus. Your model hopefully also learned that dinosaur names tend to end in saurus, don, aura, tor, etc.
If your model generates some non-cool names, don't blame the model entirely--not all actual dinosaur names sound cool. (For example, dromaeosauroides is an actual dinosaur name and is in the training set.) But this model should give you a set of candidates from which you can pick the coolest!
This assignment had used a relatively small dataset, so that you could train an RNN quickly on a CPU. Training a model of the english language requires a much bigger dataset, and usually needs much more computation, and could run for many hours on GPUs. We ran our dinosaur name for quite some time, and so far our favoriate name is the great, undefeatable, and fierce: Mangosaurus!
<img src="images/mangosaurus.jpeg" style="width:250;height:300px;">
4 - Writing like Shakespeare
The rest of this notebook is optional and is not graded, but we hope you'll do it anyway since it's quite fun and informative.
A similar (but more complicated) task is to generate Shakespeare poems. Instead of learning from a dataset of Dinosaur names you can use a collection of Shakespearian poems. Using LSTM cells, you can learn longer term dependencies that span many characters in the text--e.g., where a character appearing somewhere a sequence can influence what should be a different character much much later in ths sequence. These long term dependencies were less important with dinosaur names, since the names were quite short.
<img src="images/shakespeare.jpg" style="width:500;height:400px;">
<caption><center> Let's become poets! </center></caption>
We have implemented a Shakespeare poem generator with Keras. Run the following cell to load the required packages and models. This may take a few minutes.
End of explanation
"""
print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
model.fit(x, y, batch_size=128, epochs=1, callbacks=[print_callback])
# Run this cell to try with different inputs without having to re-train the model
generate_output()
"""
Explanation: To save you some time, we have already trained a model for ~1000 epochs on a collection of Shakespearian poems called "The Sonnets".
Let's train the model for one more epoch. When it finishes training for an epoch---this will also take a few minutes---you can run generate_output, which will prompt asking you for an input (<40 characters). The poem will start with your sentence, and our RNN-Shakespeare will complete the rest of the poem for you! For example, try "Forsooth this maketh no sense " (don't enter the quotation marks). Depending on whether you include the space at the end, your results might also differ--try it both ways, and try other inputs as well.
End of explanation
"""
|
planetlabs/notebooks | jupyter-notebooks/in-class-exercises/band-math-generate-ndvi/generate-ndvi-exercise-key.ipynb | apache-2.0 | # To use Planet's CLI from this Notebook, begin your line as follows:
!planet data
# Here is an example of using Planet's CLI to search for a known item id:
# !planet data download --item-type PSScene --asset-type ortho_analytic_4b_sr --dest data --string-in id 20160831_180302_0e26
"""
Explanation: Deriving a vegetation index from 4-band satellite data
A vegetation index is generated by combining two or more spectral bands from a satellite image. There are many different vegetation indices; in this exercise we'll learn about the most commonly-used index.
NDVI
Researchers often use a vegetation index called NDVI to measure the "greenness" or density of vegetation across a landscape. In addition to monitoring vegetation health, NDVI (Normalized Difference Vegetation Index) can be used to track climate change, agricultural production, desertification, and land cover change. Developed by NASA scientist Compton Tucker in 1977, NDVI is derived from satellite imagery and compares reflected near-infrared light to reflected visible red light. It can be expressed as following equation:
In general, healthy and/or dense vegetation reflects a lot of near-infrared light and not as much red visible light. Conversely, when vegetation is sparse or not-so-healthy, its near-infrared reflectance decreases and its red light reflectance increases. You can read more about how NDVI is used to study cyclical, seasonal, and long-term changes to the Earth's physical characteristics from NASA and USGS researchers.
To create this vegetation index, we're going to use PlanetScope's SR (Surface Reflectance) data product. You can learn more about Surface Reflectance (SR) and Planet data here, but for the purposes of this exercise, all you need to know is: SR data is satellite data that has been algorithmically corrected to remove atmospheric interference.
In this exercise, you'll learn how to perform an NDVI calculation on PlanetScope Surface Reflectance data in Python, and generate a colorized NDVI image for visual analysis. Here are the steps to follow:
Download a PlanetScope SR product
Extract data from the red and near-infrared bands
Perform the NDVI calculation
Save the NDVI image
Apply a color scheme to the NDVI image
Generate a histogram to view NDVI values
Requirements
Python 2.7 or 3+
Planet's Python Client
rasterio
numpy
matplotlib
Planet API Key, stored as environment variable $PL_API_KEY.
Planet 4-Band Imagery with the following specifications: item-type: PSScene; asset-type: ortho_analytic_4b_sr
Step 1. Download a PlanetScope SR Product
For this exercise you'll need a 4-band PlanetScope Surface Reflectance product. You can search for & download your own data, or use the demo data provided in-class. If you choose to use the demo data, skip to Step 2.
To search for your own data, you'll first need to define an Area of Interest (AOI). http://geojson.io is a free browser-based tool that makes generating a GeoJSON-formatted AOI easy.
Once that's done, use one of the following methods to search for & download data:
- using Planet's Python CLI to interact with Planet's API from the command line
- using Planet's API directly to search and download
- using the Planet Explorer site to visually search for & download data
With all of the above, you'll want to filter for 4-Band PlanetScope data (item type: PSScene) and download the associated SR product (asset type: ortho_analytic_4b_sr)
Option 1: Searching & Downloading via CLI
If you choose to use Planet's CLI, you might fight these search and download quickstart guides to be useful.
End of explanation
"""
# To use Planet's API, you'll probably begin by importing your favorite HTTP toolkit, e.g.:
import requests
from requests.auth import HTTPBasicAuth
# Your Planet API key is available in this Notebook as an env variable, e.g.:
import os
PLANET_API_KEY = os.getenv('PL_API_KEY')
"""
Explanation: Option 2: Searching & Downloading via API
If you prefer to use Planet's API directly via Python, this search & download quickstart Notebook may be useful.
End of explanation
"""
import rasterio
filename = "data/20160831_180302_0e26_3B_AnalyticMS_SR.tif"
# Load red and NIR bands - note all PlanetScope 4-band images have band order BGRN
with rasterio.open(filename) as src:
band_red = src.read(3)
with rasterio.open(filename) as src:
band_nir = src.read(4)
"""
Explanation: Option 3: Searching & Downloading via Planet Explorer
If you prefer to browse for images visually, log in to your Planet account and use Planet Explorer to search for PlanetScope imagery. You'll want to make sure to set the Source filter to show only 4-band PlanetScope Scene results.
You can click here for an example search showing 4-band PlanetScope data in California's Central Valley.
Success! Data Obtained
Regardless of the path you chose to obtain data for this exercise, once you have successfully aquired a 4-band PlanetScope analytic_SR-type GeoTIFF, place the file in the data/ directory adjacent to this Notebook.
Step 2. Extract the data from the red and near-infrared bands
For this step, use Rasterio to open the raster image you downloaded (the .tif file). After that, use Rasterio read the data from the red and near-infrared bands: this will load the band data into arrays that you can manipulate using Python's NumPy libary.
Note: in PlanetScope 4-band images, the band order is BGRN: (1) Blue, (2) Green, (3) Red, (4) Near-infrared.
End of explanation
"""
# allow division by zero without throwing a warning
import numpy
numpy.seterr(divide='ignore', invalid='ignore')
# Calculate NDVI - remember, bands read via rasterio are just numpy arrays
ndvi = (band_nir.astype(float) - band_red.astype(float)) / (band_nir + band_red)
"""
Explanation: Step 3. Perform the NDVI calculation
Next, you're going to calculate NDVI through subtraction and division of the values stored in the NumPy arrays. This calculation will give you NDVI values that range from -1 to 1. Values closer to 1 indicate a greater density of vegetation or higher level of "greenness."
As a reminder, the NDVI formula is:
\begin{equation}
ndvi = \frac{nir-red}{(nir+red)}
\end{equation}
Where nir is the Near-infrared band, and red is the Red band.
End of explanation
"""
# check range NDVI values, excluding NaN
print(numpy.nanmin(ndvi))
print(numpy.nanmax(ndvi))
"""
Explanation: As a quick check of our calculations, let's print the minimum and maximum values in our calculated ndvi. Because we're using the NDVI formula to normalize the input bands, we know that our expected values should fall within -1.0 to +1.0.
(HINT: this is still a numpy array, so use numpy functions here).
End of explanation
"""
# get the metadata of original GeoTIFF:
meta = src.meta
print(meta)
# get the dtype of our NDVI array:
ndvi_dtype = ndvi.dtype
print(ndvi_dtype)
# set the source metadata as kwargs we'll use to write the new data:
kwargs = meta
# update the 'dtype' value to match our NDVI array's dtype:
kwargs.update(dtype=ndvi_dtype)
# update the 'count' value since our output will no longer be a 4-band image:
kwargs.update(count=1)
# Finally, use rasterio to write new raster file 'data/ndvi.tif':
with rasterio.open('data/ndvi.tif', 'w', **kwargs) as dst:
dst.write(ndvi, 1)
"""
Explanation: Assuming your min & max values are in-range -- congratulations! You have performed what is known as raster band math. Well done. This skill has many applications beyond the NDVI you're calculating in this exercise: the relationship of values between different spectral bands is the basis for many kinds of remote sensing analysis.
Step 5. Save the NDVI image
Now that you've calculated NDVI values, you're going to save the results to a new single-band image, making sure the new image file uses the geospatial metadata from the GeoTIFF you originally acquired, and the dtype of the new numpy array you generated above.
End of explanation
"""
from matplotlib import colors
# Credit: Joe Kington
class MidpointNormalize(colors.Normalize):
"""
Normalize the colorbar so that diverging bars work there way either side from a prescribed midpoint value
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return numpy.ma.masked_array(numpy.interp(value, x, y), numpy.isnan(value))
# Begin by pulling in pyplot
import matplotlib.pyplot as plt
# Set min/max values from NDVI range for image
# HINT: refer back to earlier, when we verified our min & max values were within expected range
min=numpy.nanmin(ndvi)
max=numpy.nanmax(ndvi)
# Set our custom midpoint for most effective NDVI analysis
mid=0.1
# Set your favorite diverging color scheme
# You can use https://matplotlib.org/users/colormaps.html as a reference
colormap = plt.cm.RdYlGn
# Call MidPointNormalize with our min, max, and custom midpoint
norm = MidpointNormalize(vmin=min, vmax=max, midpoint=mid)
# Create a pyplot figure, in which we'll display our colorized NDVI
fig = plt.figure(figsize=(20,10))
# Add a subplot to our figure, which will contain the colorbar
ax = fig.add_subplot(111)
# Use 'imshow' to specify the input data, colormap, min, max, and norm for the colorbar
cbar_plot = ax.imshow(ndvi, cmap=colormap, vmin=min, vmax=max, norm=norm)
# Turn off the display of axis labels
ax.axis('off')
# Set a title
ax.set_title('Normalized Difference Vegetation Index', fontsize=18, fontweight='bold')
# Configure the colorbar
cbar = fig.colorbar(cbar_plot, orientation='horizontal', shrink=0.65)
# Call 'savefig' to save this plot to an image file
fig.savefig("data/ndvi-fig.png", dpi=200, bbox_inches='tight', pad_inches=0.7)
# Finally - let's take a look!
plt.show()
"""
Explanation: Step 6. Apply a color scheme to visualize the NDVI values on the image
Now that you've created ndvi.tif, you may be tempted to open it immediately & take a look at what you've accomplished. If you do, don't be disappointed when ndvi.tif opened in your favorite image viewer doesn't look like much at all. That's normal! Remember that this is not just any .tif but a GeoTIFF - one in which every pixel has a value of 1.0 or less.
At this point, you could open ndvi.tif in a Desktop GIS GUI like QGIS, and define color values for each pixel in order to get meaningful visual information out of the data. But this is a Python exercise, so let's use Matplotlib to do the same thing.
As we verified earlier, we know the values in our NDVI will range from -1 to 1. To best visualize this, we want to use a diverging color scheme, and we want to center the colorbar at a defined midpoint. Interestingly, the best midpoint for NDVI analysis is 0.1 - not 0.0 as you might expect. You can read more about how NDVIs are interpreted here.
To normalize a colorbar against our custom midpoint, we're going to take advantage of the following handy class originally created by Joe Kington:
End of explanation
"""
# Define a new figure
fig2 = plt.figure(figsize=(20,10))
# Give this new figure a subplot, which will contain the histogram itself
ax = fig2.add_subplot(111)
# Add a title & (x,y) labels to the plot
plt.title("NDVI Histogram", fontsize=18, fontweight='bold')
plt.xlabel("NDVI values", fontsize=14)
plt.ylabel("Number of pixels", fontsize=14)
# For the x-axis, we want to count every pixel that is not an empty value
x = ndvi[~numpy.isnan(ndvi)]
# Define the number of bins to divide the data into
bins = 20
# Define a color for the histogram
# You can use https://matplotlib.org/2.0.0/examples/color/named_colors.html as a reference
color = 'lightgreen'
# call 'hist` with our x-axis, bins, and color details
ax.hist(x,bins,color=color)
# Save the generated figure to an external image file
fig2.savefig("data/ndvi-histogram.png", dpi=200, bbox_inches='tight', pad_inches=0.7)
# Finally - let's take a look!
plt.show()
"""
Explanation: 7. Generate a histogram of NDVI values
Congratulations! You've used band math to apply a well-known vegetation index formula to satellite data, and visualized it for analysis using a diverging color ramp. You're well on your way to getting meaningful information out of satellite imagery using Python.
As one last step, you use pyplot to generate a histogram of values in your NDVI calculation. This can be useful for quick analysis, giving visual insight into the distribution of "healthy" vs "unhealthy" vegetation values in your study area.
End of explanation
"""
|
awsteiner/o2sclpy | doc/static/examples/table.ipynb | gpl-3.0 | import o2sclpy
import matplotlib.pyplot as plot
import sys
plots=True
if 'pytest' in sys.modules:
plots=False
"""
Explanation: O$_2$scl table example for O$_2$sclpy
See the O$_2$sclpy documentation at
https://neutronstars.utk.edu/code/o2sclpy for more information.
End of explanation
"""
link=o2sclpy.linker()
link.link_o2scl()
"""
Explanation: Link the o2scl library:
End of explanation
"""
hf=o2sclpy.hdf_file(link)
hf.open(link.o2scl_settings.get_data_dir()+b'apr98.o2')
"""
Explanation: Create an HDF5 file object and open the table in O$_2$scl's data file for the Akmal, Pandharipande, and Ravenhall equation of state. The open() function for the hdf_file class is documented here.
End of explanation
"""
tab=o2sclpy.table(link)
name=b''
"""
Explanation: We create a table object and specify a blank name to indicate
that we just want to read the first table in the file.
End of explanation
"""
o2sclpy.hdf_input_table(link,hf,tab,name)
"""
Explanation: Read the table:
End of explanation
"""
hf.close()
"""
Explanation: Close the HDF5 file.
End of explanation
"""
cc=o2sclpy.cap_cout()
cc.open()
tab.summary()
cc.close()
"""
Explanation: We use the cap_cout class to capture std::cout to the Jupyter notebook. The summary() function lists the columns in the table.
End of explanation
"""
if plots:
plot.plot(tab['rho'],tab['nuc'])
plot.plot(tab['rho'],tab['neut'])
plot.show()
"""
Explanation: Finally, we use matplotlib to plot the data stored in the table:
End of explanation
"""
|
aaossa/Dear-Notebooks | More/FacebookGraphAPI_ES.ipynb | gpl-3.0 | import json
import requests
BASE = "https://graph.facebook.com"
VERSION = "v2.5"
# Si queremos imprimir los json de respuesta
# de una forma mas agradable a la vista podemos usar
def print_pretty(jsonstring, indent=4, sort_keys=False):
print(json.dumps(jsonstring, indent=indent, sort_keys=sort_keys))
"""
Explanation: Facebook Graph API v2.5
En este IPython Notebook se anotarán algunos usos básicos la API que provee Facebook.
End of explanation
"""
with open("credentials") as f:
access_token = str(f.read().splitlines()[0])
"""
Explanation: Tomamos el access token temporal creado en Graph API Explorer. Si queremos crear uno que sea permanente podemos usar las instrucciones de esta pregunta de StackOverflow o de esta otra pregunta. Alternativamente, podemos crear un token de acceso con nuestra id y clave:
End of explanation
"""
url = "{}/{}/me".format(BASE, VERSION)
params = {
"access_token": access_token,
"fields": ["id", "name"]
}
req = requests.get(url, params=params)
print_pretty(req.json())
my_id = req.json()["id"]
my_name = req.json()["name"]
"""
Explanation: Partiremos con lo más simple. Una consulta GET para obtener información sobre nosotros mismos.
GET /me
El token de acceso se envía como parámetro, junto con los campos que queremos obtener de la consulta:
End of explanation
"""
url = "{}/{}/me/feed".format(BASE, VERSION)
params = {
"access_token": access_token,
"message": "Este estado lo publiqué usando la API de Facebook :O"
}
req = requests.post(url, params=params)
status_id = req.json()["id"]
print("status_id = {}".format(status_id))
"""
Explanation: Ahora, publicaremos un estado. Esta request nos retornará la id del post, que será publicado con visibilidad "Solo para mi" (Only me)
POST /me/feed
End of explanation
"""
url = "{}/{}/{}".format(BASE, VERSION, status_id)
params = {
"access_token": access_token
}
req = requests.delete(url, params = params)
print_pretty(req.json())
"""
Explanation: Luego, podemos directamente borrar un estado solo si lo publicamos usando la API:
DELETE /{status-id}
End of explanation
"""
|
GoogleCloudPlatform/asl-ml-immersion | notebooks/kubeflow_pipelines/pipelines/labs/kfp_pipeline.ipynb | apache-2.0 | # Set `PATH` to include the directory containing TFX CLI and skaffold.
PATH = %env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
"""
Explanation: Continuous training pipeline with KFP and Cloud AI Platform
Learning Objectives:
1. Learn how to use KF pre-build components (BiqQuery, CAIP training and predictions)
1. Learn how to use KF lightweight python components
1. Learn how to build a KF pipeline with these components
1. Learn how to compile, upload, and run a KF pipeline with the command line
In this lab, you will build, deploy, and run a KFP pipeline that orchestrates BigQuery and Cloud AI Platform services to train, tune, and deploy a scikit-learn model.
Setup
End of explanation
"""
!grep 'BASE_IMAGE =' -A 5 pipeline/covertype_training_pipeline.py
"""
Explanation: Understanding the pipeline design
The workflow implemented by the pipeline is defined using a Python based Domain Specific Language (DSL). The pipeline's DSL is in the covertype_training_pipeline.py file that we will generate below.
The pipeline's DSL has been designed to avoid hardcoding any environment specific settings like file paths or connection strings. These settings are provided to the pipeline code through a set of environment variables.
End of explanation
"""
%%writefile ./pipeline/covertype_training_pipeline.py
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KFP pipeline orchestrating BigQuery and Cloud AI Platform services."""
import os
import kfp
from helper_components import evaluate_model, retrieve_best_run
from jinja2 import Template
from kfp.components import func_to_container_op
from kfp.gcp import use_gcp_secret
# Defaults and environment settings
BASE_IMAGE = os.getenv("BASE_IMAGE")
TRAINER_IMAGE = os.getenv("TRAINER_IMAGE")
RUNTIME_VERSION = os.getenv("RUNTIME_VERSION")
PYTHON_VERSION = os.getenv("PYTHON_VERSION")
COMPONENT_URL_SEARCH_PREFIX = os.getenv("COMPONENT_URL_SEARCH_PREFIX")
USE_KFP_SA = os.getenv("USE_KFP_SA")
TRAINING_FILE_PATH = "datasets/training/data.csv"
VALIDATION_FILE_PATH = "datasets/validation/data.csv"
TESTING_FILE_PATH = "datasets/testing/data.csv"
# Parameter defaults
SPLITS_DATASET_ID = "splits"
HYPERTUNE_SETTINGS = """
{
"hyperparameters": {
"goal": "MAXIMIZE",
"maxTrials": 6,
"maxParallelTrials": 3,
"hyperparameterMetricTag": "accuracy",
"enableTrialEarlyStopping": True,
"params": [
{
"parameterName": "max_iter",
"type": "DISCRETE",
"discreteValues": [500, 1000]
},
{
"parameterName": "alpha",
"type": "DOUBLE",
"minValue": 0.0001,
"maxValue": 0.001,
"scaleType": "UNIT_LINEAR_SCALE"
}
]
}
}
"""
# Helper functions
def generate_sampling_query(source_table_name, num_lots, lots):
"""Prepares the data sampling query."""
sampling_query_template = """
SELECT *
FROM
`{{ source_table }}` AS cover
WHERE
MOD(ABS(FARM_FINGERPRINT(TO_JSON_STRING(cover))), {{ num_lots }}) IN ({{ lots }})
"""
query = Template(sampling_query_template).render(
source_table=source_table_name, num_lots=num_lots, lots=str(lots)[1:-1]
)
return query
# Create component factories
component_store = # TODO
bigquery_query_op = # TODO - use the pre-built bigquery/query component
mlengine_train_op = # TODO - use the pre-built ml_engine/train
mlengine_deploy_op = # TODO - use the pre-built ml_engine/deploy component
retrieve_best_run_op = # TODO - package the retrieve_best_run function into a lightweight component
evaluate_model_op = # TODO - package the evaluate_model function into a lightweight component
@kfp.dsl.pipeline(
name="Covertype Classifier Training",
description=(
"The pipeline training and deploying the Covertype "
"classifierpipeline_yaml"
),
)
def covertype_train(
project_id,
region,
source_table_name,
gcs_root,
dataset_id,
evaluation_metric_name,
evaluation_metric_threshold,
model_id,
version_id,
replace_existing_version,
hypertune_settings=HYPERTUNE_SETTINGS,
dataset_location="US",
):
"""Orchestrates training and deployment of an sklearn model."""
# Create the training split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[1, 2, 3, 4]
)
training_file_path = f"{gcs_root}/{TRAINING_FILE_PATH}"
create_training_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id="",
output_gcs_path=training_file_path,
dataset_location=dataset_location,
)
# Create the validation split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[8]
)
validation_file_path = f"{gcs_root}/{VALIDATION_FILE_PATH}"
create_validation_split = # TODO - use the bigquery_query_op
# Create the testing split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[9])
testing_file_path = f"{gcs_root}/{TESTING_FILE_PATH}"
create_testing_split = # TODO - use the bigquery_query_op
# Tune hyperparameters
tune_args = [
"--training_dataset_path",
create_training_split.outputs["output_gcs_path"],
"--validation_dataset_path",
create_validation_split.outputs["output_gcs_path"],
"--hptune",
"True",
]
job_dir = f"{gcs_root}/jobdir/hypertune/{kfp.dsl.RUN_ID_PLACEHOLDER}"
hypertune = # TODO - use the mlengine_train_op
# Retrieve the best trial
get_best_trial = retrieve_best_run_op(
project_id, hypertune.outputs['job_id'])
# Train the model on a combined training and validation datasets
job_dir = f"{gcs_root}/jobdir/{kfp.dsl.RUN_ID_PLACEHOLDER}"
train_args = [
"--training_dataset_path",
create_training_split.outputs["output_gcs_path"],
"--validation_dataset_path",
create_validation_split.outputs["output_gcs_path"],
"--alpha",
get_best_trial.outputs["alpha"],
"--max_iter",
get_best_trial.outputs["max_iter"],
"--hptune",
"False",
]
train_model = # TODO - use the mlengine_train_op
# Evaluate the model on the testing split
eval_model = evaluate_model_op(
dataset_path=str(create_testing_split.outputs["output_gcs_path"]),
model_path=str(train_model.outputs["job_dir"]),
metric_name=evaluation_metric_name,
)
# Deploy the model if the primary metric is better than threshold
with kfp.dsl.Condition(
eval_model.outputs["metric_value"] > evaluation_metric_threshold
):
deploy_model = mlengine_deploy_op( # pylint: disable=unused-variable
model_uri=train_model.outputs["job_dir"],
project_id=project_id,
model_id=model_id,
version_id=version_id,
runtime_version=RUNTIME_VERSION,
python_version=PYTHON_VERSION,
replace_existing_version=replace_existing_version,
)
# Configure the pipeline to run using the service account defined
# in the user-gcp-sa k8s secret
if USE_KFP_SA == "True":
kfp.dsl.get_pipeline_conf().add_op_transformer(
use_gcp_secret("user-gcp-sa")
)
"""
Explanation: The pipeline uses a mix of custom and pre-build components.
Pre-build components. The pipeline uses the following pre-build components that are included with the KFP distribution:
BigQuery query component
AI Platform Training component
AI Platform Deploy component
Custom components. The pipeline uses two custom helper components that encapsulate functionality not available in any of the pre-build components. The components are implemented using the KFP SDK's Lightweight Python Components mechanism. The code for the components is in the helper_components.py file:
Retrieve Best Run. This component retrieves a tuning metric and hyperparameter values for the best run of a AI Platform Training hyperparameter tuning job.
Evaluate Model. This component evaluates a sklearn trained model using a provided metric and a testing dataset.
Exercise
Complete the TODOs the pipeline file below.
End of explanation
"""
!cat base_image/Dockerfile
"""
Explanation: The custom components execute in a container image defined in base_image/Dockerfile.
End of explanation
"""
!cat trainer_image/Dockerfile
"""
Explanation: The training step in the pipeline employes the AI Platform Training component to schedule a AI Platform Training job in a custom training container. The custom training image is defined in trainer_image/Dockerfile.
End of explanation
"""
REGION = "us-central1"
ENDPOINT = "337dd39580cbcbd2-dot-us-central2.pipelines.googleusercontent.com"
ARTIFACT_STORE_URI = (
"gs://qwiklabs-gcp-04-406b0039d298-kubeflowpipelines-default"
)
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
"""
Explanation: Building and deploying the pipeline
Before deploying to AI Platform Pipelines, the pipeline DSL has to be compiled into a pipeline runtime format, also refered to as a pipeline package. The runtime format is based on Argo Workflow, which is expressed in YAML.
Configure environment settings
Update the below constants with the settings reflecting your lab environment.
REGION - the compute region for AI Platform Training and Prediction
ARTIFACT_STORE - the GCS bucket created during installation of AI Platform Pipelines. The bucket name ends with the -kubeflowpipelines-default suffix.
ENDPOINT - set the ENDPOINT constant to the endpoint to your AI Platform Pipelines instance. Then endpoint to the AI Platform Pipelines instance can be found on the AI Platform Pipelines page in the Google Cloud Console.
Open the SETTINGS for your instance
Use the value of the host variable in the Connect to this Kubeflow Pipelines instance from a Python client via Kubeflow Pipelines SKD section of the SETTINGS window.
End of explanation
"""
IMAGE_NAME = "trainer_image"
TAG = "latest"
TRAINER_IMAGE = f"gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}"
!gcloud builds submit --timeout 15m --tag $TRAINER_IMAGE trainer_image
"""
Explanation: Build the trainer image
End of explanation
"""
IMAGE_NAME = "base_image"
TAG = "latest"
BASE_IMAGE = f"gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}"
!gcloud builds submit --timeout 15m --tag $BASE_IMAGE base_image
"""
Explanation: Build the base image for custom components
End of explanation
"""
USE_KFP_SA = False
COMPONENT_URL_SEARCH_PREFIX = (
"https://raw.githubusercontent.com/kubeflow/pipelines/0.2.5/components/gcp/"
)
RUNTIME_VERSION = "1.15"
PYTHON_VERSION = "3.7"
%env USE_KFP_SA={USE_KFP_SA}
%env BASE_IMAGE={BASE_IMAGE}
%env TRAINER_IMAGE={TRAINER_IMAGE}
%env COMPONENT_URL_SEARCH_PREFIX={COMPONENT_URL_SEARCH_PREFIX}
%env RUNTIME_VERSION={RUNTIME_VERSION}
%env PYTHON_VERSION={PYTHON_VERSION}
"""
Explanation: Compile the pipeline
You can compile the DSL using an API from the KFP SDK or using the KFP compiler.
To compile the pipeline DSL using the KFP compiler.
Set the pipeline's compile time settings
The pipeline can run using a security context of the GKE default node pool's service account or the service account defined in the user-gcp-sa secret of the Kubernetes namespace hosting Kubeflow Pipelines. If you want to use the user-gcp-sa service account you change the value of USE_KFP_SA to True.
Note that the default AI Platform Pipelines configuration does not define the user-gcp-sa secret.
End of explanation
"""
# TODO
"""
Explanation: Use the CLI compiler to compile the pipeline
Exercise
Compile the covertype_training_pipeline.py with the dsl-compile command line:
End of explanation
"""
!head covertype_training_pipeline.yaml
"""
Explanation: The result is the covertype_training_pipeline.yaml file.
End of explanation
"""
PIPELINE_NAME = "covertype_continuous_training"
# TODO
"""
Explanation: Deploy the pipeline package
Exercise
Upload the pipeline to the Kubeflow cluster using the kfp command line:
End of explanation
"""
!kfp --endpoint $ENDPOINT pipeline list
"""
Explanation: Submitting pipeline runs
You can trigger pipeline runs using an API from the KFP SDK or using KFP CLI. To submit the run using KFP CLI, execute the following commands. Notice how the pipeline's parameters are passed to the pipeline run.
List the pipelines in AI Platform Pipelines
End of explanation
"""
PIPELINE_ID = "0918568d-758c-46cf-9752-e04a4403cd84"
EXPERIMENT_NAME = "Covertype_Classifier_Training"
RUN_ID = "Run_001"
SOURCE_TABLE = "covertype_dataset.covertype"
DATASET_ID = "splits"
EVALUATION_METRIC = "accuracy"
EVALUATION_METRIC_THRESHOLD = "0.69"
MODEL_ID = "covertype_classifier"
VERSION_ID = "v01"
REPLACE_EXISTING_VERSION = "True"
GCS_STAGING_PATH = f"{ARTIFACT_STORE_URI}/staging"
"""
Explanation: Submit a run
Find the ID of the covertype_continuous_training pipeline you uploaded in the previous step and update the value of PIPELINE_ID .
End of explanation
"""
# TODO
"""
Explanation: Exercise
Create BigQuery Dataset with DATASET_ID variable by using the bq mk --force command.
Run the pipeline using the kfp command line. Here are some of the variable
you will have to use to pass to the pipeline:
EXPERIMENT_NAME is set to the experiment used to run the pipeline. You can choose any name you want. If the experiment does not exist it will be created by the command
RUN_ID is the name of the run. You can use an arbitrary name
PIPELINE_ID is the id of your pipeline. Use the value retrieved by the kfp pipeline list command
GCS_STAGING_PATH is the URI to the GCS location used by the pipeline to store intermediate files. By default, it is set to the staging folder in your artifact store.
REGION is a compute region for AI Platform Training and Prediction.
End of explanation
"""
|
mayank-johri/LearnSeleniumUsingPython | Section 1 - Core Python/Chapter 05 - Data Types/String.ipynb | gpl-3.0 | #### Standard String Examples:
friend = 'Chandu\tNalluri'
print(friend)
manager_details = "# Roshan Musheer:\nExcellent Manager and human being."
print(manager_details)
"""
Explanation: String
Strings are Python builtins datatype for handling text. They are immutable thus you can not add, remove or updated any character in the string. If you wish to perform these operations than you need to create a new string and assign the existing/new variable name to it.
String is a sequence of characters.
characters
Escape Characters
an escape character is a character which invokes an alternative interpretation on subsequent characters in a character sequence. An escape character is a particular case of metacharacters.
Table: Escape Characters
| Escape sequence | Hex value in ASCII | Character represented |
|------------------|--------------------|------------------------------------------------------------------------------------|
| \a | 07 | Alert (Beep, Bell) (added in C89)[1] |
| \b | 08 | Backspace |
| \f | 0C | Formfeed |
| \n | 0A | Newline (Line Feed); see notes below |
| \r | 0D | Carriage Return |
| \t | 09 | Horizontal Tab |
| \v | 0B | Vertical Tab |
| \ | 5C | Backslash |
| \' | 27 | Single quotation mark |
| \" | 22 | Double quotation mark |
| \? | 3F | Question mark (used to avoid trigraphs) |
| \nnnnote 1 | any | The byte whose numerical value is given by nnn interpreted as an octal number |
| \xhh… | any | The byte whose numerical value is given by hh… interpreted as a hexadecimal number |
| \enote 2 | 1B | escape character (some character sets) |
| \Uhhhhhhhhnote 3 | none | Unicode code point where h is a hexadecimal digit |
| \uhhhhnote 4 | none | Unicode code point below 10000 hexadecimal |
String Types
Strings can be classified in 2 categories.
- Standard String: Standard string is one which executed the escape characters
- Raw String: Raw Strings on the other hand handle escape characters as normal characters and do not process them
Standard String
End of explanation
"""
+ Raw String: `a = r'Roshan\tMusheer'` # Roshan\tMusheer
+ Unicode String: `u = u'Björk'`
"""
Explanation: encoding and decoding
Standard String
End of explanation
"""
a = r'Roshan\tMusheer'
print(a)
path = "C:\new_data\technical_jargons"
print(path)
path = R"C:\new_data\technical_jargons"
print(path)
"""
Explanation: Since Python 3, strings are by default unicode string.
The standard string can be converted to unicode by using the function unicode().
String can be initialized using:
With single or double quotes ('', "").
On several consecutive lines, provided that it's between three single or double quotes (''' ''', """ """).
Without expansion characters (example: s = r '\ n', where s will contain the characters \ and n).
End of explanation
"""
a = 'Roshan\tMusheer'
print(a)
"""
Explanation: NOTE: both r and R work the same way
End of explanation
"""
s = 'Camel'
print(id(s))
"""
Explanation: String Operations:
Creation
End of explanation
"""
st_the = "The "
st_action = " ran away !!!"
st = st_the + s + st_action
print(st)
print(s)
print(st_the)
print(st_action)
print(id(st))
print(id(st_the))
print(id(s))
print(id(st_action))
print(dir(s))
"""
Explanation: Concatenation
String concatenation is a process of joining two or more strings into a single string. As we have already discussed that string is an immutable datatype thus we have to create a new string for concatenation, what that means is the original strings will still remain the same and new one will be created using the texts from the originals.
There are multiple ways in which we can achive the concatenation. The most common method of achiving the concatenation, is to use + operator.
Lets take an example, where we have three string's and lets try to concatenate them using it.
End of explanation
"""
print( 'Size of %s => %d' % (s, len(s)))
print(dir(s))
print( 'Size of %s => %d' % (s, s.__len__()))
def size(strdata):
c = 0
for a in strdata:
c+=1
return c
print(size("Anshu"))
"""
Explanation: Interpolation
string interpolation (or variable interpolation, variable substitution, or variable expansion) is the process of evaluating a string literal containing one or more placeholders, yielding a result in which the placeholders are replaced with their corresponding values.
End of explanation
"""
# name = 'World'
# program = 'Python'
# print(f'Hello {name}! This is {program}')
# String processed as a sequence
s = "Murthy "
for ch in s: print(ch , end=',') # This
# print(help(print))
print("\b.")
print("~"*79)
# Strings are objects
if s.startswith('M'): print(s.upper())
print(s.lower())
print("~"*79)
# what will happen?
print(3*s)
# print(dir(s))
s = " Murthy "
age = 5
print(s + str(age))
print(s.strip(), age)
# print(s + age)
st = " Mayank Johri "
print(len(st))
s = st.strip()
print(len(s))
print(st.rstrip())
print(st.lstrip())
m = "Mohan Shah"
x = ["mon", "tues", "wed"]
y = ","
a = "On Leave"
print(y.join(x)) # -> mon,tues,wed
print(m.join(y))
print(a.join(y))
print(y.join(a))
print(a.join(m))
"""
Explanation: %-formatting
Str.format()
Template Strings
Literal String
It is the new Interpolation method as it is implemented in Python 3.6.
End of explanation
"""
" ".join(x)
book_desc = ["This", "book", "is good"]
" ".join(book_desc)
"""
Explanation: Create a string from a list of string items
End of explanation
"""
# Zeros left
print ('Now is %02d:%02d.' % (6, 30))
# Real (The number after the decimal point specifies how many decimal digits )
print ('Percent: %.1f%%, Exponencial:%.2e' % (5.333, 0.00314))
# Octal and hexadecimal
print ('Decimal: %d, Octal: %o, Hexadecimal: %x' % (10, 10, 10))
"""
Explanation: The operator % is used for string interpolation. The interpolation is more efficient in use of memory than the conventional concatenation.
Symbols used in the interpolation:
%s: string.
%d: integer.
%o: octal.
%x: hexacimal.
%f: real.
%e: real exponential.
%%: percent sign.
Symbols can be used to display numbers in various formats.
Example:
End of explanation
"""
peoples = [('Mayank', 'friend', 'Manish'),
('Mayank', 'reportee', 'Roshan Musheer')]
# Parameters are identified by order
msg = '{0} is {1} of {2}'
for name, relationship, friend in peoples:
print(msg.format(name, relationship, friend))
# Parameters are identified by name
msg = '{greeting}, it is {hour:02d}:{minute:02d}'
print(msg.format(greeting='Good Morning', hour=9, minute=30))
print(msg)
# Builtin function format()
print ('Pi =', format(3.14159, '.3e'))
print ('Pi =', format(3.14159, '.1e'))
"""
Explanation: format
In addition to interpolation operator %, the string method and function format() is available.
The function format() can be used only to format one piece of data each time.
Examples:
End of explanation
"""
'{} {}'.format('सूर्य', 'नमस्कार')
'{1} {0}'.format('सूर्य', 'नमस्कार')
'{:>10}'.format('सूर्य नमस्कार')
'{:20}'.format('सूर्य नमस्कार')
'{:4}'.format('Bonjour')
'{:_<5}'.format('Ja')
'{:^7}'.format('こんにちは')
'{:.5}'.format('Bonjour')
'{:10.5}'.format('Bonjour')
'{:{align}{width}}'.format('Bonjour', align='^', width='9')
'{:.{prec}} = {:.{prec}f}'.format('Bonjour', 2.22, prec=4)
'{:d}'.format(1980)
'{:f}'.format(3.141592653589793)
'{:4f}'.format(3.141592653589793)
'{:04d}'.format(119)
'{:06.2f}'.format(3.141592653589793)
'{:+d}'.format(119)
'{:+d}'.format(-119)
### Need to find for complex & boolean numbers
## '{:+d+d}'.format(-3 + 2j)
'{:=5d}'.format((- 111))
'{: d}'.format(101)
'{name} {surname}'.format(name='Mayank', surname='Johri')
user = dict(name='Mayank', surname='Johri')
'{u[name]} {u[surname]}'.format(u=user)
lst = list(range(10))
'{l[2]} {l[7]}'.format(l=lst)
from datetime import datetime
'{:%Y-%m-%d %H:%M}'.format(datetime(2017, 12, 23, 14, 15))
class Yoga(object):
def __repr__(self):
return 'सूर्य नमस्कार'
'{0!r} <-> {0!a}'.format(Yoga())
"""
Explanation: >>> TODO !!!
Explain the below examples
End of explanation
"""
myStr = "maya Deploy, version: 0.0.3 "
print(myStr.capitalize())
print(myStr.center(60))
print(myStr.center(60, "*"))
print(myStr.center(10, "*"))
print(myStr.count('a'))
print(myStr.count('e'))
print(myStr.endswith('all'))
print(myStr.endswith('.0.3'))
print(myStr.endswith('.0.3 '))
print(myStr.find("g"))
print(myStr.find("e"))
"""
Explanation: str in-build module
Strings implement all of the common sequence operations, along with the additional methods described below.
End of explanation
"""
print("m" in myStr)
print("M" in myStr)
c = "one"
print(c.isalpha())
c = "1"
print(c.isalpha())
superscripts = "\u00B2"
five = "\u0A6B"
#str.isdecimal() (Only Decimal Numbers)
print(five)
print(c.isdecimal())
print(five.isdecimal())
print("10 ->", "10".isdecimal())
print("10.001".isdecimal())
str = u"this 2009";
print(str.isdecimal())
str = u"23443434";
print(str.isdecimal())
print(fractions.isdecimal())
# str.isdigit() (Decimals, Subscripts, Superscripts)
fractions = "\u00BC"
print(fractions)
print(c.isdigit())
print(fractions.isdigit())
print(five.isdigit())
print("10".isdigit())
str = u"this 2009";
print(str.isdigit())
str = u"23443.434";
print(str.isdigit())
print(superscripts)
print(superscripts.isdigit())
print(superscripts.isdecimal())
print(superscripts+superscripts)
print(fractions+fractions)
# str.isnumeric() (Digits, Fractions, Subscripts, Superscripts, Roman Numerals, Currency Numerators)
print(fractions)
print(fractions.isnumeric())
print(five.isnumeric())
print(myStr.isalnum())
print("one".isalnum())
print("thirteen".isalnum())
"""
Explanation: Note: The find() method should be used only if you need to know the position of sub. To check if sub is a substring or not, use the in operator:
checking: substring in main_string : returns true or false
End of explanation
"""
import string
# the alphabet
print(dir(string))
a = string.ascii_letters
print(a)
# Shifting left the alphabet
b = a[1:] + a[0]
print(b)
print(b.__doc__)
print(string.digits)
print(string.hexdigits)
print(help(string.printable))
"""
Explanation: String Module
Various functions for dealing with text are implemented in the module string.
End of explanation
"""
import string
# Creates a template string
st = string.Template('$warning occurred in $when $$what')
# Fills the model with a dictionary
s = st.substitute({'warning': 'Lack of electricity',
'when': 'April 3, 2002'})
# Shows:
# Lack of electricity occurred in April 3, 2002
print(s)
# Unicode String
u = u'Hüsker Dü'
# Convert to str
s = u.encode('latin1')
print (s, '=>', type(s))
# String str
s = 'Hüsker Dü'
# u = s.decode('latin1')
print (repr(u), '=>', type(u))
"""
Explanation: Template
The module also implements a type called Template, which is a model string that can be filled through a dictionary. Identifiers are initialized by a dollar sign ($) and may be surrounded by curly braces, to avoid confusion.
Example:
End of explanation
"""
|
pbeens/ICS-Computer-Studies | Python/Class Demos/Demo Notebook (work in progress).ipynb | mit | import numpy as np
nums1 = np.random.randint(1,11, 15)
nums1
"""
Explanation: Class Python Demos
I will be using this Notebook for class demos. To use at home, load Anaconda (https://www.continuum.io/downloads) or WinPython (https://winpython.github.io/)
Set() demo
First let's create a random list using the numpy library.
End of explanation
"""
set1 = set(nums1)
set1
"""
Explanation: Let's look at what set() does!
End of explanation
"""
nums2 = np.random.randint(1,11, 12)
nums2
set2 = set(nums2)
set2
"""
Explanation: Let's create a 2nd list and set.
End of explanation
"""
set2.difference(set1)
set1.difference(set2)
"""
Explanation: ...and look at the differences!
End of explanation
"""
# Intersection
set1 & set2
# Union
set1 | set2
# Difference
(set1 - set2) | (set2 - set1)
# Difference method 2
(set1 | set2) - (set1 & set2)
"""
Explanation: See https://en.wikibooks.org/wiki/Python_Programming/Sets for more information about sets!
End of explanation
"""
|
Danghor/Algorithms | Python/Chapter-06/Ordered-Binary-Tree.ipynb | gpl-2.0 | class OrderedBinaryTree:
def __init__(self):
self.mKey = None
self.mValue = None
self.mLeft = None
self.mRight = None
"""
Explanation: Ordered Binary Trees
This notebook implements ordered binary trees. In order to define this notion, we first have to define
the concept of ordered binary trees. In the following, assume a set $\texttt{Key}$ and a set $\texttt{Value}$ are given. Then, the
set $\mathcal{B}$ of all ordered binary trees is defined inductively.
- $\texttt{Nil} \in \mathcal{B}$
- $\texttt{Node}(k, v, l, r) \in \mathcal{B}$ iff the following conditions hold:
- $k \in\texttt{Key}$,
- $v \in\texttt{Value}$,
- $l \in\mathcal{B}$,
- $r \in\mathcal{B}$,
- all keys that occur in the left subtree $l$ are smaller than $k$,
- all keys that occur in the right subtree $r$ are bigger than $k$,
therefore $l < k < r$.
The class OrderedBinaryTree represents the nodes of an ordered binary tree.
- $\texttt{Nil}$ is created as $\texttt{OrderedBinaryTree}()$.
- $\texttt{Node}(k,v,l,r)$ is created as follows:
t = OrderedBinaryTree()
t.mKey = k
t.mValue = v
t.mLeft = l
t.mRight = r
The constructor below creates the empty tree.
End of explanation
"""
def isEmpty(self):
return self.mKey == None
OrderedBinaryTree.isEmpty = isEmpty
del isEmpty
"""
Explanation: Given an ordered binary tree $t$, the expression $t.\texttt{isEmpty}()$ checks whether $t$ is the empty tree.
End of explanation
"""
def find(self, key):
if self.isEmpty():
return None
elif self.mKey == key:
return self.mValue
elif key < self.mKey:
return self.mLeft.find(key)
else:
return self.mRight.find(key)
OrderedBinaryTree.find = find
del find
"""
Explanation: Given an ordered binary tree $t$ and a key $k$, the expression $t.\texttt{find}(k)$ returns the value stored unter the key $k$.
The method find is defined inductively as follows:
- $\texttt{Nil}.\texttt{find}(k) = \Omega$,
because the empty tree is interpreted as the empty map.
$\texttt{Node}(k, v, l, r).\texttt{find}(k) = v$,
because the node $\texttt{Node}(k,v,l,r)$ stores the assignment $k \mapsto v$.
- $k_1 < k_2 \rightarrow \texttt{Node}(k_2, v, l, r).\texttt{find}(k_1) = l.\texttt{find}(k_1)$,
because if $k_1$ is less than $k_2$, then any mapping for $k_1$ has to be stored in the left subtree $l$.
- $k_1 > k_2 \rightarrow \texttt{Node}(k_2, v, l, r).\texttt{find}(k_1) = r.\texttt{find}(k_1)$,
because if $k_1$ is greater than $k_2$, then any mapping for $k_1$ has to be stored in the right subtree $r$.
End of explanation
"""
def insert(self, key, value):
if self.isEmpty():
self.mKey = key
self.mValue = value
self.mLeft = OrderedBinaryTree()
self.mRight = OrderedBinaryTree()
elif self.mKey == key:
self.mValue = value
elif key < self.mKey:
self.mLeft.insert(key, value)
else:
self.mRight.insert(key, value)
OrderedBinaryTree.insert = insert
del insert
"""
Explanation: Given an ordered binary tree $t$, a key $k$ and a value $v$, the expression $t.\texttt{insert}(k, v)$ updates the tree $t$ such that the key $k$ is associated with the value $v$.
The method insert is defined inductively as follows:
- $\texttt{Nil}.\texttt{insert}(k,v) = \texttt{Node}(k,v, \texttt{Nil}, \texttt{Nil})$,
If the tree is empty, the information to be stored is stored at the root.
$\texttt{Node}(k,v_2,l,r).\texttt{insert}(k,v_1) = \texttt{Node}(k, v_1, l, r)$,
If the key $k$ is located at the root, we overwrite the old information.
- $k_1 < k_2 \rightarrow
\texttt{Node}(k_2, v_2, l, r).\texttt{insert}(k_1, v_1) = \texttt{Node}\bigl(k_2, v_2, l.\texttt{insert}(k_1, v_1), r\bigr)$,
If the key $k_1$, which is the key for which we want to store a value, is less than the key
$k_2$ at the root, then we have to insert the information in the left subtree.
- $k_1 > k_2 \rightarrow
\texttt{Node}(k_2, v_2, l, r).\texttt{insert}(k_1, v_1) =
\texttt{Node}\bigl(k_2, v_2, l, r.\texttt{insert}(k_1, v_1)\bigr)$,
If the key $k_1$, which is the key for which we want to store a value, is bigger than the key
$k_2$ at the root, then we have to insert the information in the right subtree.
End of explanation
"""
def delete(self, key):
if self.isEmpty():
return
if key == self.mKey:
if self.mLeft.isEmpty():
self._update(self.mRight) # the following would not work: self = self.mRight
elif self.mRight.isEmpty():
self._update(self.mLeft)
else:
rs, km, vm = self.mRight._delMin()
self.mKey = km
self.mValue = vm
self.mRight = rs
elif key < self.mKey:
self.mLeft.delete(key)
else:
self.mRight.delete(key)
OrderedBinaryTree.delete = delete
del delete
"""
Explanation: Given an ordered binary tree $t$ and a key $k$, the expression $t.\texttt{delete}(k)$ removes the key $k$ and its associated value from $t$. The method delete is defined inductively.
- $\texttt{Nil}.\texttt{delete}(k) = \texttt{Nil}$.
- $\texttt{Node}(k,v,\texttt{Nil},r).\texttt{delete}\bigl(k\bigr) = r$.
- $\texttt{Node}(k,v,l,\texttt{Nil}).\texttt{delete}(k) = l$.
- If $l \not= \texttt{Nil} \,\wedge\, r \not= \texttt{Nil} \,\wedge\, r.\texttt{delMin}() = [r',k_{min}, v_{min}]$,
then
$$\texttt{Node}(k,v,l,r).\texttt{delete}(k) = \texttt{Node}(k_{min},v_{min},l,r').$$
If the key to be removed is found at the root of the tree and neither of its subtrees is
empty, the call $r\mathtt{.}\texttt{delMin}()$ removes the smallest key together with its
associated value from the subtree $r$ yielding the subtree $r'$.
The smallest key from $r$ is then stored at the root of the new tree.
$k_1 < k_2 \rightarrow \texttt{Node}(k_2,v_2,l,r).\texttt{delete}\bigl(k_1) =
\texttt{Node}(k_2,v_2,l.\texttt{delete}(k_1),r)$.
If the key that is to be removed is less than the key stored at the root, the key $k$ can only be
located in the left subtree $l$. Hence, $k$ is removed from the left subtree $l$ recursively.
- $k_1 > k_2 \rightarrow \texttt{Node}(k_2,v_2,l,r).\texttt{delete}(k_1) =
\texttt{Node}(k_2,v_2,l,r.\texttt{delete}(k_1))$.
If the key that is to be removed is greater than the key stored at the root, the key $k$ can only be
located in the right subtree $r$. Hence, $k$ is removed from the right subtree $r$ recursively.
End of explanation
"""
def _delMin(self):
if self.mLeft.isEmpty():
return self.mRight, self.mKey, self.mValue
else:
ls, km, vm = self.mLeft._delMin()
self.mLeft = ls
return self, km, vm
OrderedBinaryTree._delMin = _delMin
del _delMin
"""
Explanation: Given a non-empty ordered binary tree $t$, the expression $t.\texttt{delMin}()$ removes the smallest key $k_m$ and its associated value $v_m$ from $t$ and returns the triple
$$(r,k_m,v_m),$$
where $r$ is the tree that results from removing $k_m$ and $v_m$ from $t$. The function is defined via the following equations:
- $\texttt{Node}(k, v, \texttt{Nil}, r).\texttt{delMin}() = (r, k, v)$
If the left subtree is empty, $k$ has to be the smallest key in the tree
$\texttt{Node}(k, v, \texttt{Nil}, r)$. If $k$ is removed, we are left with the subtree $r$.
$l\not= \texttt{Nil} \wedge l.\texttt{delMin}() = (l',k_{min}, v_{min}) \;\rightarrow
\texttt{Node}(k, v, l, r).\texttt{delMin}() = \bigl(\texttt{Node}(k, v, l', r), k_{min}, v_{min}\bigr)$.
If the left subtree $l$ in the binary tree $t = \texttt{Node}(k, v, l, r)$
is not empty, then the smallest key of $t$ is located inside the left subtree $l$.
This smallest key is recursively removed from $l$. This yields the tree
$l'$. Next, $l$ is replaced by $l'$ in $t$. The resulting tree is
$t' = \texttt{Node}(k, v, l', r)$.
End of explanation
"""
def _update(self, t):
self.mKey = t.mKey
self.mValue = t.mValue
self.mLeft = t.mLeft
self.mRight = t.mRight
OrderedBinaryTree._update = _update
del _update
"""
Explanation: Given two ordered binary trees s and t, the expression s._update(t) overwrites the attributes of s with the corresponding attributes of t.
End of explanation
"""
def keyList(self):
if self.isEmpty():
return []
return self.mLeft.keyList() + [self.mKey] + self.mRight.keyList()
OrderedBinaryTree.keyList = keyList
del keyList
import graphviz as gv
"""
Explanation: Given an ordered binary tree $b$, the method $b.\texttt{keyList}()$ returns the list of all keys occurring in $b$.
Note that this list has to be sorted ascendingly.
End of explanation
"""
def toDot(self):
OrderedBinaryTree.sNodeCount = 0 # static variable of class OrderedBinaryTree
dot = gv.Digraph(node_attr={'shape': 'record', 'style': 'rounded'})
NodeDict = {}
self._assignIDs(NodeDict)
for n, t in NodeDict.items():
if t.mValue != None:
dot.node(str(n), label='{' + str(t.mKey) + '|' + str(t.mValue) + '}')
elif t.mKey != None:
dot.node(str(n), label=str(t.mKey))
else:
dot.node(str(n), label='', shape='point')
for n, t in NodeDict.items():
if not t.mLeft == None:
dot.edge(str(n), str(t.mLeft.mID))
if not t.mRight == None:
dot.edge(str(n), str(t.mRight.mID))
return dot
OrderedBinaryTree.toDot = toDot
del toDot
"""
Explanation: Given an ordered binary tree $t$, the function $t.\texttt{toDot}()$ renders the tree graphically using graphviz.
End of explanation
"""
def _assignIDs(self, NodeDict):
OrderedBinaryTree.sNodeCount += 1
self.mID = OrderedBinaryTree.sNodeCount
NodeDict[self.mID] = self
if self.isEmpty():
return
self.mLeft ._assignIDs(NodeDict)
self.mRight._assignIDs(NodeDict)
OrderedBinaryTree._assignIDs = _assignIDs
del _assignIDs
"""
Explanation: Given a binary tree t the method t._assignIDs(NodeDict) assigns a unique identifier with each node. The dictionary NodeDict maps these identifiers to the nodes where they occur.
End of explanation
"""
def demo():
m = OrderedBinaryTree()
m.insert("anton", 123)
m.insert("hugo" , 345)
m.insert("gustav", 789)
m.insert("jens", 234)
m.insert("hubert", 432)
m.insert("andre", 342)
m.insert("philipp", 342)
m.insert("rene", 345)
m.insert("ans", 123)
m.insert("alfa", 123)
m.insert("algo", 345)
return m
t = demo()
t.toDot()
t.delete('anton')
t.toDot()
t.delete('gustav')
t.toDot()
t.delete('hubert')
t.toDot()
"""
Explanation: The function $\texttt{demo}()$ creates a small ordered binary tree.
End of explanation
"""
import random as rnd
t = OrderedBinaryTree()
for k in range(30):
k = rnd.randrange(100)
t.insert(k, None)
t.toDot()
"""
Explanation: Let's generate an ordered binary tree with random keys.
End of explanation
"""
t = OrderedBinaryTree()
for k in range(30):
t.insert(k, None)
t.toDot()
"""
Explanation: This tree looks more or less balanced. Lets us create a tree where things do not work out that well.
End of explanation
"""
for k in range(30):
t.delete(k)
t.toDot()
"""
Explanation: In order to check whether the method delete works as expected, we try the following:
End of explanation
"""
n = 100
S = OrderedBinaryTree()
L = list(range(2, n + 1))
rnd.shuffle(L) # avoid worst case performance
for x in L:
S.insert(x, None)
for p in range(2, n // 2 + 1):
for q in range(p, n // p + 1):
S.delete(p * q)
S.toDot()
print(S.keyList())
"""
Explanation: Let us compute the set $S$ of prime numbers up to some given number $n\in\mathbb{N}$. Mathematically, this set can be defined as
$$ S = { 2, \cdots, n } - \bigl{ p \cdot q \;\big|\; p, q \in {2, \cdots, n } \bigr}. $$
End of explanation
"""
|
NYUDataBootcamp/Projects | UG_S16/Acosta-NHL-GRIT.ipynb | mit | import pandas as pd #PandasPandas
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
print('PandaPandaPanda ', pd.__version__)
df=pd.read_csv('NHLQUANT.csv')
"""
Explanation: Data BootCamp Project
End of explanation
"""
plt.plot(df.index,df['Grit'])
"""
Explanation: Who has Grit?
Hockey has always been a eloquent yet brutal sport, in this analysis I'm finding the player that embodies the brutality.
End of explanation
"""
df.head(10)
"""
Explanation: AHHH
The graph above is the simplest display of my data. Out of roughly 900 NHL players, only a few are recognizeable from the mass.
End of explanation
"""
df.mean()
"""
Explanation: This is the way my quantitative data looks. Most of the column headers are self explanatory, but i'll go into further detail later.
End of explanation
"""
pd.to_numeric(df, errors='ignore')
y=df["Age"]
z=df["Grit"]
plt.plot(y,z)
df['Grit']>130
df.ix[df['Grit']>130]
"""
Explanation: Above is the quantitative means of the data i've acquired. Grit is a weighted compilation of penalty minutes, hits, blocked shots, and fights (making it somewhat subjective).
End of explanation
"""
df.ix[df['Grit']>300]
df.ix[df['Grit']>400]
Best=df.ix[df['Grit']>400]
Best.sort("Age").plot.barh('Age',"Grit")
"""
Explanation: Since i'm primarily interested in players with the most Grit, i'm going to limit my searches to a higher percentile.
End of explanation
"""
QL=pd.read_csv("NHLQUAL.csv")
QL.head(5)
"""
Explanation: Of the original 900, these are the 10 players with the most Grit.
End of explanation
"""
print(QL.at[61, "First Name"]+QL.at[61, 'Last Name'],QL.at[94, 'First Name']+QL.at[94, 'Last Name'],
QL.at[712, "First Name"]+QL.at[712, "Last Name"],QL.at[209, 'First Name']+QL.at[209, 'Last Name'],QL.at[306, "First Name"]+QL.at[306, 'Last Name'],QL.at[497, 'First Name']+QL.at[497, 'Last Name'],QL.at[524, 'First Name']+QL.at[524, 'Last Name'],QL.at[565, 'First Name']+QL.at[565, 'Last Name'],QL.at[641, 'First Name']+QL.at[641, 'Last Name'],QL.at[877, 'First Name']+QL.at[877, 'Last Name'])
"""
Explanation: Above is how my qualitative data is structured. I've seperated the datasets for ease of manipulation.
End of explanation
"""
Best.sort("Age").plot.barh('Age',"HitF")
Best.sort("Age").plot.barh('Age',"HitA")
"""
Explanation: Above are the hardiest players in the NHL, but how do they perform?
End of explanation
"""
Best.plot(Best.index ,'GP')
plt.ylim([60,85])
"""
Explanation: The two graphs above represent hits given and hits received respectively.
End of explanation
"""
fig, ax=plt.subplots(nrows=2, ncols=1, sharex=True, sharey=True)
Best['G'].plot(ax=ax[0],color='green')
Best['A'].plot(ax=ax[1],color='red')
"""
Explanation: The above graph reflects the amount of games played during the season. The x-axis is simply the index value attributed to the player.
End of explanation
"""
|
scheema/Machine-Learning | Datascience_Lab0.ipynb | mit | import numpy as np
from io import BytesIO
import matplotlib
import matplotlib.pyplot as plt
import random
from mpl_toolkits.mplot3d import Axes3D
from bs4 import BeautifulSoup
import urllib.request
%matplotlib inline
"""
Explanation: Solution Implementation by Srinivas Cheemalapati
CS 109A/AC 209A/STAT 121A Data Science: Lab 0
Harvard University<br>
Fall 2016<br>
Instructors: W. Pan, P. Protopapas, K. Rader
Import libraries
End of explanation
"""
#create a variable for the file dataset_HW0.txt
fname = 'dataset_HW0.txt'
#fname
# Option 1: Open the file and load the data into the numpy array; skip the headers
with open(fname) as f:
lines = (line for line in f if not line.startswith('#'))
data = np.loadtxt(lines, delimiter=',', skiprows=1)
# What is the shape of the data
data.shape
#Option 2: Open the file and load the data into the numpy array; skip the headers
data = np.loadtxt('dataset_HW0.txt', delimiter=',', skiprows=1)
data.shape
# print the first 3 rows of the data
data[0:3]
#data[:,0]
# show the range of values for birth weight
fig = plt.figure()
axes = fig.add_subplot(111)
plt.xlabel("birth weight")
axes.hist(data[:,0])
# show the range of values for the femur length
fig = plt.figure()
axes = fig.add_subplot(111)
plt.xlabel("femur length")
axes.hist(data[:,1])
"""
Explanation: Problem 1: Processing Tabular Data from File
In this problem, we practice reading csv formatted data and doing some very simple data exploration.
Part (a): Reading CSV Data with Numpy
Open the file $\mathtt{dataset}$_$\mathtt{HW0.txt}$, containing birth biometrics as well as maternal data for a number of U.S. births, and inspect the csv formatting of the data. Load the data, without the column headers, into an numpy array.
Do some preliminary explorations of the data by printing out the dimensions as well as the first three rows of the array. Finally, for each column, print out the range of the values.
<b>Prettify your output</b>, add in some text and formatting to make sure your outputs are readable (e.g. "36x4" is less readable than "array dimensions: 36x4").
End of explanation
"""
#calculate the overall means
birth_weight_mean = data[:,0].mean()
birth_weight_mean
#calculagte the overall mean for Femur Length
femur_length_mean = data[:,1].mean()
femur_length_mean
# Capture the birth weight
birth_weight = data[:,0]
#Capture the Femur length
femur_length = data[:,1]
# Capture the maternal age
maternal_age = data[:,2]
maternal_age.shape
# Create indexes for the different maternal age groups
#group_1
group_1 = maternal_age <= 17
#group_2
group_2 = [(maternal_age >= 18) & (maternal_age <= 34)]
#group_3
group_3 = [(maternal_age >= 35) & (maternal_age <= 50)]
bw_g1 = data[:, 0][group_1]
age0_17 = data[:, 2][group_1]
bw_g1.mean()
fl_g1 = data[:, 1][group_1]
fl_g1.mean()
bw_g2 = data[:, 0][group_2]
age18_34 = data[:, 2][group_2]
bw_g2.mean()
fl_g2 = data[:, 1][group_2]
fl_g2.mean()
bw_g3 = data[:, 0][group_3]
age35_50 = data[:, 2][group_3]
bw_g3.mean()
fl_g3 = data[:, 1][group_3]
fl_g3.mean()
"""
Explanation: Part (b): Simple Data Statistics
Compute the mean birth weight and mean femur length for the entire dataset. Now, we want to split the birth data into three groups based on the mother's age:
Group I: ages 0-17
Group II: ages 18-34
Group III: ages 35-50
For each maternal age group, compute the mean birth weight and mean femure length.
<b>Prettify your output.</b>
Compare the group means with each other and with the overall mean, what can you conclude?
End of explanation
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for c, m in [('r', 'o')]:
ax.scatter(bw_g1, fl_g1, age0_17, edgecolor=c,facecolors=(0,0,0,0), marker=m, s=40)
for c, m in [('b', 's')]:
ax.scatter(bw_g2, fl_g2, age18_34, edgecolor=c,facecolors=(0,0,0,0), marker=m, s=40)
for c, m in [('g', '^')]:
ax.scatter(bw_g3, fl_g3, age35_50, edgecolor=c,facecolors=(0,0,0,0), marker=m, s=40)
fig.suptitle('3D Data Visualization', fontsize=14, fontweight='bold')
ax.set_title('Birth Weigth vs Femur Length vs Weight Plot')
ax.set_xlabel('birth_weight')
ax.set_ylabel('femur_length')
ax.set_zlabel('maternal_age')
plt.show()
"""
Explanation: Part (c): Simple Data Visualization
Visualize the data using a 3-D scatter plot (label the axes and title your plot). How does your visual analysis compare with the stats you've computed in Part (b)?
End of explanation
"""
plt.scatter(maternal_age,birth_weight, color='r', marker='o')
plt.xlabel("maternal age")
plt.ylabel("birth weight")
plt.show()
plt.scatter(maternal_age,femur_length, color='b', marker='s')
plt.xlabel("maternal age")
plt.ylabel("femur length")
plt.show()
plt.scatter(birth_weight,femur_length, color='g', marker='^')
plt.xlabel("birth weight")
plt.ylabel("femur length")
plt.show()
"""
Explanation: Part (d): Simple Data Visualization (Continued)
Visualize two data attributes at a time,
maternal age against birth weight
maternal age against femur length
birth weight against femur length
using 2-D scatter plots.
Compare your visual analysis with your analysis from Part (b) and (c).
End of explanation
"""
# load the file into a beautifulsoup object
page = urllib.request.urlopen("http://www.gutenberg.org/files/5200/5200-h/5200-h.htm").read()
# prettify the data read from the url and print the first 1000 characters
soup = BeautifulSoup(page, "html.parser")
print(soup.prettify()[0:1000])
"""
Explanation: Problem 2: Processing Web Data
In this problem we practice some basic web-scrapping using Beautiful Soup.
Part (a): Opening and Reading Webpages
Open and load the page (Kafka's The Metamorphosis) at
$\mathtt{http://www.gutenberg.org/files/5200/5200-h/5200-h.htm}$
into a BeautifulSoup object.
The object we obtain is a parse tree (a data structure representing all tags and relationship between tags) of the html file. To concretely visualize this object, print out the first 1000 characters of a representation of the parse tree using the $\mathtt{prettify()}$ function.
End of explanation
"""
# print the content of the head tag
soup.head
# print the string inside the head tag
soup.head.title
# print each child of the head tag
soup.head.meta
# print the string inside the title tag
soup.head.title.string
# print the string inside the pre-formatbted text (pre) tag
print(soup.body.pre.string)
# print the string inside first paragraph (p) tag
print(soup.body.p.string)
"""
Explanation: Part (b): Exploring the Parsed HTML
Explore the nested data structure you obtain in Part (a) by printing out the following:
the content of the head tag
the string inside the head tag
each child of the head tag
the string inside the title tag
the string inside the preformatted text (pre) tag
the string inside the first paragraph (p) tag
Make your output readable.
End of explanation
"""
print(soup.get_text()[1:1000])
"""
Explanation: Part (c): Extracting Text
Now we want to extract the text of The Metamorphosis and do some simple analysis. Beautiful Soup provides a way to extract all text from a webpage via the $\mathtt{get}$_$\mathtt{text()}$ function.
Print the first and last 1000 characters of the text returned by $\mathtt{get}$_$\mathtt{text()}$. Is this the content of the novela? Where is the content of The Metamorphosis stored in the BeautifulSoup object?
End of explanation
"""
p = soup.find_all('p')
combined_text = ''
for node in soup.findAll('p'):
combined_text += "".join(node.findAll(text=True))
print(combined_text[0:1000])
"""
Explanation: Part (d): Extracting Text (Continued)
Using the $\mathtt{find}$_$\mathtt{all()}$ function, extract the text of The Metamorphosis and concatenate the result into a single string. Print out the first 1000 characters of the string as a sanity check.
End of explanation
"""
word_list = combined_text.lower().replace(':','').replace('.','').replace(',', '').replace('"','').replace('!','').replace('?','').replace(';','').split()
#print(word_list[0:100])
word_length = [len(n) for n in word_list]
print(word_length[0:100])
total_word_length = sum(word_length)
print("The total word length: ", total_word_length)
wordcount = len(word_list)
print("The total number of words: ", wordcount)
avg_word_length = total_word_length / wordcount
print("The average word length is: ", avg_word_length)
# function to calculate the number of uniques words
# wordcount = {}
# for word in word_list:
# if word not in wordcount:
# wordcount[word] = 1
# else:
# wordcount[word] += 1
# for k,v in wordcount.items():
# print (len(k), v)
# Print the histogram for the word lengths
fig = plt.figure()
axes = fig.add_subplot(111)
plt.xlabel("Word Lengths")
plt.xlabel("Count")
#axes.hist(word_length)
plt.hist(word_length, bins=np.arange(min(word_length), max(word_length) + 1, 1))
"""
Explanation: Part (e): Word Count
Count the number of words in The Metamorphosis. Compute the average word length and plot a histogram of word lengths.
You'll need to adjust the number of bins for each histogram.
Hint: You'll need to pre-process the text in order to obtain the correct word/sentence length and count.
End of explanation
"""
|
zizouvb/deeplearning | gan_mnist/Intro_to_GANs_Exercises.ipynb | mit | %matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
"""
Explanation: Generative Adversarial Network
In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!
GANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:
Pix2Pix
CycleGAN
A whole list
The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.
The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can fool the discriminator.
The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.
End of explanation
"""
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, [None, real_dim])
inputs_z = tf.placeholder(tf.float32, [None, z_dim])
return inputs_real, inputs_z
"""
Explanation: Model Inputs
First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.
Exercise: Finish the model_inputs function below. Create the placeholders for inputs_real and inputs_z using the input sizes real_dim and z_dim respectively.
End of explanation
"""
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out:
'''
with tf.variable_scope('generator', reuse = reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(inputs = z,
units = n_units,
activation = None)
# Leaky ReLU
h1 = tf.maximum(x = alpha * h1,
y = h1)
# Logits and tanh output
logits = tf.layers.dense(inputs = h1,
units = out_dim,
activation = None)
out = tf.tanh(logits)
return out
"""
Explanation: Generator network
Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.
Variable Scope
Here we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.
We could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.
To use tf.variable_scope, you use a with statement:
python
with tf.variable_scope('scope_name', reuse=False):
# code here
Here's more from the TensorFlow documentation to get another look at using tf.variable_scope.
Leaky ReLU
TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can just take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:
$$
f(x) = max(\alpha * x, x)
$$
Tanh Output
The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.
Exercise: Implement the generator network in the function below. You'll need to return the tanh output. Make sure to wrap your code in a variable scope, with 'generator' as the scope name, and pass the reuse keyword argument from the function to tf.variable_scope.
End of explanation
"""
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse = reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(inputs = x,
units = n_units,
activation = None)
# Leaky ReLU
h1 = tf.maximum(x = h1,
y = h1 * alpha)
logits = tf.layers.dense(inputs = h1,
units = 1,
activation = None)
out = tf.sigmoid(logits)
return out, logits
"""
Explanation: Discriminator
The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.
Exercise: Implement the discriminator network in the function below. Same as above, you'll need to return both the logits and the sigmoid output. Make sure to wrap your code in a variable scope, with 'discriminator' as the scope name, and pass the reuse keyword argument from the function arguments to tf.variable_scope.
End of explanation
"""
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
"""
Explanation: Hyperparameters
End of explanation
"""
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model = generator(input_z, input_size, n_units=g_hidden_size, reuse=False, alpha=alpha)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, reuse=False, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, n_units=d_hidden_size, reuse=True, alpha=alpha)
"""
Explanation: Build network
Now we're building the network from the functions defined above.
First is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.
Then, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.
Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).
Exercise: Build the network from the functions you defined earlier.
End of explanation
"""
# Calculate losses
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = d_logits_real,
labels = tf.ones_like(d_logits_real)*(1-smooth)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = d_logits_fake,
labels = tf.ones_like(d_logits_fake)*0))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = d_logits_fake,
labels = tf.ones_like(d_logits_fake)*(1-smooth)))
"""
Explanation: Discriminator and Generator Losses
Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropies, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like
python
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
For the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)
The discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.
Finally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.
Exercise: Calculate the losses for the discriminator and the generator. There are two discriminator losses, one for real images and one for fake images. For the real image loss, use the real logits and (smoothed) labels of ones. For the fake image loss, use the fake logits with labels of all zeros. The total discriminator loss is the sum of those two losses. Finally, the generator loss again uses the fake logits from the discriminator, but this time the labels are all ones because the generator wants to fool the discriminator.
End of explanation
"""
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
"""
Explanation: Optimizers
We want to update the generator and discriminator variables separately. So we need to get the variables for each part and build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.
For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables that start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance).
We can do something similar with the discriminator. All the variables in the discriminator start with discriminator.
Then, in the optimizer we pass the variable lists to the var_list keyword argument of the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.
Exercise: Below, implement the optimizers for the generator and discriminator. First you'll need to get a list of trainable variables, then split that list into two lists, one for the generator variables and another for the discriminator variables. Finally, using AdamOptimizer, create an optimizer for each network that update the network variables separately.
End of explanation
"""
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
"""
Explanation: Training
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
"""
Explanation: Training loss
Here we'll check out the training losses for the generator and discriminator.
End of explanation
"""
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
"""
Explanation: Generator samples from training
Here we can view samples of images from the generator. First we'll look at images taken while training.
End of explanation
"""
_ = view_samples(-1, samples)
"""
Explanation: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
End of explanation
"""
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
"""
Explanation: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
End of explanation
"""
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
"""
Explanation: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
Sampling from the generator
We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!
End of explanation
"""
|
Autodesk/molecular-design-toolkit | moldesign/_notebooks/Tutorial 3. Quantum Chemistry.ipynb | apache-2.0 | %matplotlib inline
import numpy as np
from matplotlib.pylab import *
try: import seaborn #optional, makes plots look nicer
except ImportError: pass
import moldesign as mdt
from moldesign import units as u
"""
Explanation: <span style="float:right"><a href="http://moldesign.bionano.autodesk.com/" target="_blank" title="About">About</a> <a href="https://github.com/autodesk/molecular-design-toolkit/issues" target="_blank" title="Issues">Issues</a> <a href="http://bionano.autodesk.com/MolecularDesignToolkit/explore.html" target="_blank" title="Tutorials">Tutorials</a> <a href="http://autodesk.github.io/molecular-design-toolkit/" target="_blank" title="Documentation">Documentation</a></span>
</span>
<br>
<center><h1>Tutorial 3: Intro to Quantum Chemistry </h1> </center>
This tutorial shows how to select a quantum chemistry method, visualize orbitals, and analyze the electronic wavefunction.
End of explanation
"""
atom1 = mdt.Atom('H')
atom2 = mdt.Atom('H')
atom1.bond_to(atom2,1)
atom2.x = 2.0 * u.angstrom
h2 = mdt.Molecule([atom1,atom2], name='H2', charge=0)
h2.draw(height=300)
"""
Explanation: I. Build and minimize minimal basis set hydrogen
A. Build the molecule
This cell builds H<sub>2</sub> by creating the two atoms, and explicitly setting their positions.
Try editing this cell to:
* Create HeH<sup>+</sup>
* Create H<sub>3</sub><sup>+</sup>
* Change the atoms' initial positions
End of explanation
"""
h2.set_energy_model(mdt.models.RHF, basis='3-21g')
h2.calculate()
print('Calculated properties:', h2.properties.keys())
print('Potential energy:', h2.potential_energy)
"""
Explanation: B. Run a hartree-fock calculation
The next cell adds the RHF energy model to our molecule, then triggers a calculation.
Try editing this cell to:
* Change the atomic basis
* Get a list of other available energy models (type mdt.models. and then hit the [tab] key)
End of explanation
"""
h2.draw_orbitals()
"""
Explanation: C. Visualize the orbitals
After running the calculation, we have enough information to visualize the molecular orbitals.
End of explanation
"""
minimization = h2.minimize(frame_interval=1, nsteps=10)
minimization.draw_orbitals()
"""
Explanation: D. Minimize the energy
Here, we'll run a quick energy minimization then visualize how the hydrogen nuclei AND the atomic wavefunctions changed.
End of explanation
"""
wfn = h2.wfn
wfn
"""
Explanation: II. Analyzing the wavefunction
The wavefunction created during QM calculations will be stored as an easy-to-analyze python object:
End of explanation
"""
mos = wfn.orbitals.canonical
mos
"""
Explanation: A. Molecular orbital data
First, let's examine the molecular orbitals. The overlaps, fock matrix, coefficents, and density matrix are all available as 2D numpy arrays (with units where applicable).
We'll specifically look at the "canonical" orbitals that result from Hartree-Fock calculations.
End of explanation
"""
mos.coeffs
"""
Explanation: MOs are, of course, a linear combination of AOs:
\begin{equation} \left| \text{MO}i \right \rangle = \sum_j c{ij} \left| \text{AO}_j \right\rangle \end{equation}
The coefficient $c_{ij}$ is stored at mos.coeffs[i,j]
End of explanation
"""
mos.overlaps
"""
Explanation: Most MO sets are orthogonal; their overlaps will often be the identity matrix (plus some small numerical noise)
End of explanation
"""
matshow(mos.fock.value_in(u.eV), cmap=cm.seismic)
colorbar(label='fock element/eV')
title('Fock matrix')
"""
Explanation: By definition, the fock matrix should be orthogonal as well; the orbital energies are on its diagonal.
End of explanation
"""
olap = mos.overlap(wfn.aobasis)
olap
"""
Explanation: The MolecularOrbitals class also offers several methods to transform operators into different bases. For instance, the overlap method creates an overlap matrix between the AOs and MOs, where olap[i,j] is the overlap between MO i and AO j:
\begin{equation}
\text{olap[i,j]} = \left\langle MO_i \middle| AO_j \right \rangle
\end{equation}
End of explanation
"""
matshow(mos.h2e.value_in(u.eV), cmap=cm.inferno)
colorbar(label='2-electron hamiltonian term / eV')
title('2-electron hamiltonian')
"""
Explanation: Various other matrices are available from this this object, such as the two-electron Hamiltonian terms:
End of explanation
"""
aos = wfn.orbitals.atomic
aos[:]
"""
Explanation: B. Individual orbitals
You can work with inidividual orbitals as well. For instance, to get a list (in order) of our four atomic orbitals (i.e., the basis functions):
End of explanation
"""
orb = aos[0]
print('Name:', orb.name)
print('Energy:', orb.energy)
"""
Explanation: Let's grab the lowest orbital and examine some of its properties:
End of explanation
"""
ha_1s = aos[0]
hb_1s = aos[3]
print('Overlap between 1s orbitals: ', ha_1s.overlap(hb_1s))
print('Fock element between 1s orbitals', ha_1s.fock_element(hb_1s))
"""
Explanation: Orbital objects also give you access to various matrix elements:
End of explanation
"""
wfn.aobasis
"""
Explanation: C. Basis functions
An object representing the wavefunction's basis functions is available at wfn.aobasis
End of explanation
"""
basis_function = wfn.aobasis[0]
print('Name:', basis_function.name)
print('Angular quantum number:', basis_function.l)
print('Azimuthal quantum number:', basis_function.m)
print('Centered at:', basis_function.center)
"""
Explanation: It stores a list of AtomicBasisFunction objects:
End of explanation
"""
basis_function.primitives
"""
Explanation: Each basis function is defined as a linear combination of "primitive" 3D Gaussian functions:
End of explanation
"""
primitive = basis_function.primitives[0]
print(primitive)
print("Coeff:", primitive.coeff)
print("Alpha:", primitive.alpha)
"""
Explanation: And these primitives can themselves be examined:
End of explanation
"""
|
pysal/pysal | notebooks/explore/pointpats/distance_statistics.ipynb | bsd-3-clause | import scipy.spatial
import pysal.lib as ps
import numpy as np
from pysal.explore.pointpats import PointPattern, PoissonPointProcess, as_window, G, F, J, K, L, Genv, Fenv, Jenv, Kenv, Lenv
%matplotlib inline
import matplotlib.pyplot as plt
"""
Explanation: Distance Based Statistical Method for Planar Point Patterns
Authors: Serge Rey sjsrey@gmail.com and Wei Kang weikang9009@gmail.com
Introduction
Distance based methods for point patterns are of three types:
Mean Nearest Neighbor Distance Statistics
Nearest Neighbor Distance Functions
Interevent Distance Functions
In addition, we are going to introduce a computational technique Simulation Envelopes to aid in making inferences about the data generating process. An example is used to demonstrate how to use and interpret simulation envelopes.
End of explanation
"""
points = [[66.22, 32.54], [22.52, 22.39], [31.01, 81.21],
[9.47, 31.02], [30.78, 60.10], [75.21, 58.93],
[79.26, 7.68], [8.23, 39.93], [98.73, 77.17],
[89.78, 42.53], [65.19, 92.08], [54.46, 8.48]]
pp = PointPattern(points)
pp.summary()
"""
Explanation: Mean Nearest Neighbor Distance Statistics
The nearest neighbor(s) for a point $u$ is the point(s) $N(u)$ which meet the condition
$$d_{u,N(u)} \leq d_{u,j} \forall j \in S - u$$
The distance between the nearest neighbor(s) $N(u)$ and the point $u$ is nearest neighbor distance for $u$. After searching for nearest neighbor(s) for all the points and calculating the corresponding distances, we are able to calculate mean nearest neighbor distance by averaging these distances.
It was demonstrated by Clark and Evans(1954) that mean nearest neighbor distance statistics distribution is a normal distribution under null hypothesis (underlying spatial process is CSR). We can utilize the test statistics to determine whether the point pattern is the outcome of CSR. If not, is it the outcome of cluster or regular
spatial process?
Mean nearest neighbor distance statistic
$$\bar{d}{min}=\frac{1}{n} \sum{i=1}^n d_{min}(s_i)$$
End of explanation
"""
# one nearest neighbor (default)
pp.knn()
"""
Explanation: We may call the method knn in PointPattern class to find $k$ nearest neighbors for each point in the point pattern pp.
End of explanation
"""
# two nearest neighbors
pp.knn(2)
pp.max_nnd # Maximum nearest neighbor distance
pp.min_nnd # Minimum nearest neighbor distance
pp.mean_nnd # mean nearest neighbor distance
pp.nnd # Nearest neighbor distances
pp.nnd.sum()/pp.n # same as pp.mean_nnd
pp.plot()
"""
Explanation: The first array is the ids of the most nearest neighbor for each point, the second array is the distance between each point and its most nearest neighbor.
End of explanation
"""
gp1 = G(pp, intervals=20)
gp1.plot()
"""
Explanation: Nearest Neighbor Distance Functions
Nearest neighbour distance distribution functions (including the nearest “event-to-event” and “point-event” distance distribution functions) of a point process are cumulative distribution functions of several kinds -- $G, F, J$. By comparing the distance function of the observed point pattern with that of the point pattern from a CSR process, we are able to infer whether the underlying spatial process of the observed point pattern is CSR or not for a given confidence level.
$G$ function - event-to-event
The $G$ function is defined as follows: for a given distance $d$, $G(d)$ is the proportion of nearest neighbor distances that are less than $d$.
$$G(d) = \sum_{i=1}^n \frac{ \phi_i^d}{n}$$
$$
\phi_i^d =
\begin{cases}
1 & \quad \text{if } d_{min}(s_i)<d \
0 & \quad \text{otherwise } \
\end{cases}
$$
If the underlying point process is a CSR process, $G$ function has an expectation of:
$$
G(d) = 1-e(-\lambda \pi d^2)
$$
However, if the $G$ function plot is above the expectation this reflects clustering, while departures below expectation reflect dispersion.
End of explanation
"""
gp1.plot(qq=True)
"""
Explanation: A slightly different visualization of the empirical function is the quantile-quantile plot:
End of explanation
"""
gp1.d # distance domain sequence (corresponding to the x-axis)
gp1.G #cumulative nearest neighbor distance distribution over d (corresponding to the y-axis))
"""
Explanation: in the q-q plot the csr function is now a diagonal line which serves to make accessment of departures from csr visually easier.
It is obvious that the above $G$ increases very slowly at small distances and the line is below the expected value for a CSR process (green line). We might think that the underlying spatial process is regular point process. However, this visual inspection is not enough for a final conclusion. In Simulation Envelopes, we are going to demonstrate how to simulate data under CSR many times and construct the $95\%$ simulation envelope for $G$.
End of explanation
"""
fp1 = F(pp, intervals=20) # The default is to randomly generate 100 points.
fp1.plot()
fp1.plot(qq=True)
"""
Explanation: $F$ function - "point-event"
When the number of events in a point pattern is small, $G$ function is rough (see the $G$ function plot for the 12 size point pattern above). One way to get around this is to turn to $F$ function where a given number of randomly distributed points are generated in the domain and the nearest event neighbor distance is calculated for each point. The cumulative distribution of all nearest event neighbor distances is called $F$ function.
End of explanation
"""
fp1 = F(pp, intervals=50)
fp1.plot()
fp1.plot(qq=True)
"""
Explanation: We can increase the number of intervals to make $F$ more smooth.
End of explanation
"""
jp1 = J(pp, intervals=20)
jp1.plot()
"""
Explanation: $F$ function is more smooth than $G$ function.
$J$ function - a combination of "event-event" and "point-event"
$J$ function is defined as follows:
$$J(d) = \frac{1-G(d)}{1-F(d)}$$
If $J(d)<1$, the underlying point process is a cluster point process; if $J(d)=1$, the underlying point process is a random point process; otherwise, it is a regular point process.
End of explanation
"""
kp1 = K(pp)
kp1.plot()
"""
Explanation: From the above figure, we can observe that $J$ function is obviously above the $J(d)=1$ horizontal line. It is approaching infinity with nearest neighbor distance increasing. We might tend to conclude that the underlying point process is a regular one.
Interevent Distance Functions
Nearest neighbor distance functions consider only the nearest neighbor distances, "event-event", "point-event" or the combination. Thus, distances to higher order neighbors are ignored, which might reveal important information regarding the point process. Interevent distance functions, including $K$ and $L$ functions, are proposed to consider distances between all pairs of event points. Similar to $G$, $F$ and $J$ functions, $K$ and $L$ functions are also cumulative distribution function.
$K$ function - "interevent"
Given distance $d$, $K(d)$ is defined as:
$$K(d) = \frac{\sum_{i=1}^n \sum_{j=1}^n \psi_{ij}(d)}{n \hat{\lambda}}$$
where
$$
\psi_{ij}(d) =
\begin{cases}
1 & \quad \text{if } d_{ij}<d \
0 & \quad \text{otherwise } \
\end{cases}
$$
$\sum_{j=1}^n \psi_{ij}(d)$ is the number of events within a circle of radius $d$ centered on event $s_i$ .
Still, we use CSR as the benchmark (null hypothesis) and see how the $K$ function estimated from the observed point pattern deviate from that under CSR, which is $K(d)=\pi d^2$. $K(d)<\pi d^2$ indicates that the underlying point process is a regular point process. $K(d)>\pi d^2$ indicates that the underlying point process is a cluster point process.
End of explanation
"""
lp1 = L(pp)
lp1.plot()
"""
Explanation: $L$ function - "interevent"
$L$ function is a scaled version of $K$ function, defined as:
$$L(d) = \sqrt{\frac{K(d)}{\pi}}-d$$
End of explanation
"""
realizations = PoissonPointProcess(pp.window, pp.n, 100, asPP=True) # simulate CSR 100 times
genv = Genv(pp, intervals=20, realizations=realizations) # call Genv to generate simulation envelope
genv
genv.observed
genv.plot()
"""
Explanation: Simulation Envelopes
A Simulation envelope is a computer intensive technique for inferring whether an observed pattern significantly deviates from what would be expected under a specific process. Here, we always use CSR as the benchmark. In order to construct a simulation envelope for a given function, we need to simulate CSR a lot of times, say $1000$ times. Then, we can calculate the function for each simulated point pattern. For every distance $d$, we sort the function values of the $1000$ simulated point patterns. Given a confidence level, say $95\%$, we can acquire the $25$th and $975$th value for every distance $d$. Thus, a simulation envelope is constructed.
Simulation Envelope for G function
Genv class in pysal.
End of explanation
"""
fenv = Fenv(pp, intervals=20, realizations=realizations)
fenv.plot()
"""
Explanation: In the above figure, LB and UB comprise the simulation envelope. CSR is the mean function calculated from the simulated data. G is the function estimated from the observed point pattern. It is well below the simulation envelope. We can infer that the underlying point process is a regular one.
Simulation Envelope for F function
Fenv class in pysal.
End of explanation
"""
jenv = Jenv(pp, intervals=20, realizations=realizations)
jenv.plot()
"""
Explanation: Simulation Envelope for J function
Jenv class in pysal.
End of explanation
"""
kenv = Kenv(pp, intervals=20, realizations=realizations)
kenv.plot()
"""
Explanation: Simulation Envelope for K function
Kenv class in pysal.
End of explanation
"""
lenv = Lenv(pp, intervals=20, realizations=realizations)
lenv.plot()
"""
Explanation: Simulation Envelope for L function
Lenv class in pysal.
End of explanation
"""
from pysal.lib.cg import shapely_ext
from pysal.explore.pointpats import Window
import pysal.lib as ps
va = ps.io.open(ps.examples.get_path("vautm17n.shp"))
polys = [shp for shp in va]
state = shapely_ext.cascaded_union(polys)
"""
Explanation: CSR Example
In this example, we are going to generate a point pattern as the "observed" point pattern. The data generating process is CSR. Then, we will simulate CSR in the same domain for 100 times and construct a simulation envelope for each function.
End of explanation
"""
a = [[1],[1,2]]
np.asarray(a)
n = 100
samples = 1
pp = PoissonPointProcess(Window(state.parts), n, samples, asPP=True)
pp.realizations[0]
pp.n
"""
Explanation: Generate the point pattern pp (size 100) from CSR as the "observed" point pattern.
End of explanation
"""
csrs = PoissonPointProcess(pp.window, 100, 100, asPP=True)
csrs
"""
Explanation: Simulate CSR in the same domain for 100 times which would be used for constructing simulation envelope under the null hypothesis of CSR.
End of explanation
"""
genv = Genv(pp.realizations[0], realizations=csrs)
genv.plot()
"""
Explanation: Construct the simulation envelope for $G$ function.
End of explanation
"""
genv.low # lower bound of the simulation envelope for G
genv.high # higher bound of the simulation envelope for G
"""
Explanation: Since the "observed" $G$ is well contained by the simulation envelope, we infer that the underlying point process is a random process.
End of explanation
"""
fenv = Fenv(pp.realizations[0], realizations=csrs)
fenv.plot()
"""
Explanation: Construct the simulation envelope for $F$ function.
End of explanation
"""
jenv = Jenv(pp.realizations[0], realizations=csrs)
jenv.plot()
"""
Explanation: Construct the simulation envelope for $J$ function.
End of explanation
"""
kenv = Kenv(pp.realizations[0], realizations=csrs)
kenv.plot()
"""
Explanation: Construct the simulation envelope for $K$ function.
End of explanation
"""
lenv = Lenv(pp.realizations[0], realizations=csrs)
lenv.plot()
"""
Explanation: Construct the simulation envelope for $L$ function.
End of explanation
"""
|
tclaudioe/Scientific-Computing | SC1/Bonus_Newton_Rn.ipynb | bsd-3-clause | import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from ipywidgets import interact
"""
Explanation: <center>
<h1> ILI285 - Computación Científica I / INF285 - Computación Científica </h1>
<h2> Newton's Method in $\mathbb{R}^n$ </h2>
<h2> <a href="#acknowledgements"> [S]cientific [C]omputing [T]eam </a> </h2>
<h2> Version: 1.01</h2>
</center>
Table of Contents
Newton's method
Python Modules and Functions
Acknowledgements
End of explanation
"""
f1= lambda x,y: x**2+y**2-1
f2= lambda x,y: y-x**2
J = lambda x,y: np.array([[2*x, 2*y],[-2*x, 1]])
Newton = lambda x,y: np.array([[x],[y]])-np.linalg.solve(J(x,y),np.array([[f1(x,y)],[f2(x,y)]]))
delta = 0.025
x = np.arange(-1.5, 1.5, delta)
y = np.arange(-1.5, 1.5, delta)
X, Y = np.meshgrid(x, y)
Z1 = f1(X,Y)
Z2 = f2(X,Y)
plt.figure()
CS1 = plt.contour(X, Y, Z1,levels=[0])
CS2 = plt.contour(X, Y, Z2,levels=[0])
#plt.clabel(CS1, inline=1, fontsize=10)
#plt.clabel(CS2, inline=1, fontsize=10)
plt.grid()
plt.axis('equal')
plt.title(r'Newton $\mathbb{R}^n$')
plt.show()
def Show_Newton(x0=1.2,y0=0.3,n=0):
plt.figure()
CS1 = plt.contour(X, Y, Z1,levels=[0])
CS2 = plt.contour(X, Y, Z2,levels=[0])
plt.grid()
plt.axis('equal')
plt.title(r'Newton $\mathbb{R}^n$')
plt.plot(x0,y0,'rx')
print(x0,y0)
for i in np.arange(n):
xout=Newton(x0,y0)
x1=float(xout[0])
y1=float(xout[1])
plt.plot(x1,y1,'rx')
plt.plot([x0, x1],[y0, y1],'r')
x0=x1
y0=y1
print(x0,y0)
plt.show()
interact(Show_Newton,x0=(-1.4,1.4,0.1),y0=(-1.4,1.4,0.1), n=(0,100,1))
"""
Explanation: <div id='newton' />
Newton's method
End of explanation
"""
|
tensorflow/docs-l10n | site/en-snapshot/probability/examples/Eight_Schools.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2018 The TensorFlow Probability Authors.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
"""
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
import warnings
tf.enable_v2_behavior()
plt.style.use("ggplot")
warnings.filterwarnings('ignore')
"""
Explanation: Eight schools
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/probability/examples/Eight_Schools"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Eight_Schools.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Eight_Schools.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/probability/tensorflow_probability/examples/jupyter_notebooks/Eight_Schools.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
The eight schools problem (Rubin 1981) considers the effectiveness of SAT coaching programs conducted in parallel at eight schools. It has become a classic problem (Bayesian Data Analysis, Stan) that illustrates the usefulness of hierarchical modeling for sharing information between exchangeable groups.
The implemention below is an adaptation of an Edward 1.0 tutorial.
Imports
End of explanation
"""
num_schools = 8 # number of schools
treatment_effects = np.array(
[28, 8, -3, 7, -1, 1, 18, 12], dtype=np.float32) # treatment effects
treatment_stddevs = np.array(
[15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float32) # treatment SE
fig, ax = plt.subplots()
plt.bar(range(num_schools), treatment_effects, yerr=treatment_stddevs)
plt.title("8 Schools treatment effects")
plt.xlabel("School")
plt.ylabel("Treatment effect")
fig.set_size_inches(10, 8)
plt.show()
"""
Explanation: The Data
From Bayesian Data Analysis, section 5.5 (Gelman et al. 2013):
A study was performed for the Educational Testing Service to analyze the effects of special coaching programs for SAT-V (Scholastic Aptitude Test-Verbal) in each of eight high schools. The outcome variable in each study was the score on a special administration of the SAT-V, a standardized multiple choice test administered by the Educational Testing Service and used to help colleges make admissions decisions; the scores can vary between 200 and 800, with mean about 500 and standard deviation about 100. The SAT examinations are designed to be resistant to short-term efforts directed specifically toward improving performance on the test; instead they are designed to reflect knowledge acquired and abilities developed over many years of education. Nevertheless, each of the eight schools in this study considered its short-term coaching program to be very successful at increasing SAT scores. Also, there was no prior reason to believe that any of the eight programs was more effective than any other or that some were more similar in effect to each other than to any other.
For each of the eight schools ($J = 8$), we have an estimated treatment effect $y_j$ and a standard error of the effect estimate $\sigma_j$. The treatment effects in the study were obtained by a linear regression on the treatment group using PSAT-M and PSAT-V scores as control variables. As there was no prior belief that any of the schools were more or less similar or that any of the coaching programs would be more effective, we can consider the treatment effects as exchangeable.
End of explanation
"""
model = tfd.JointDistributionSequential([
tfd.Normal(loc=0., scale=10., name="avg_effect"), # `mu` above
tfd.Normal(loc=5., scale=1., name="avg_stddev"), # `log(tau)` above
tfd.Independent(tfd.Normal(loc=tf.zeros(num_schools),
scale=tf.ones(num_schools),
name="school_effects_standard"), # `theta_prime`
reinterpreted_batch_ndims=1),
lambda school_effects_standard, avg_stddev, avg_effect: (
tfd.Independent(tfd.Normal(loc=(avg_effect[..., tf.newaxis] +
tf.exp(avg_stddev[..., tf.newaxis]) *
school_effects_standard), # `theta` above
scale=treatment_stddevs),
name="treatment_effects", # `y` above
reinterpreted_batch_ndims=1))
])
def target_log_prob_fn(avg_effect, avg_stddev, school_effects_standard):
"""Unnormalized target density as a function of states."""
return model.log_prob((
avg_effect, avg_stddev, school_effects_standard, treatment_effects))
"""
Explanation: Model
To capture the data, we use a hierarchical normal model. It follows the generative process,
$$
\begin{align}
\mu &\sim \text{Normal}(\text{loc}{=}0,\ \text{scale}{=}10) \
\log\tau &\sim \text{Normal}(\text{loc}{=}5,\ \text{scale}{=}1) \
\text{for } & i=1\ldots 8:\
& \theta_i \sim \text{Normal}\left(\text{loc}{=}\mu,\ \text{scale}{=}\tau \right) \
& y_i \sim \text{Normal}\left(\text{loc}{=}\theta_i,\ \text{scale}{=}\sigma_i \right)
\end{align}
$$
where $\mu$ represents the prior average treatment effect and $\tau$ controls how much variance there is between schools. The $y_i$ and $\sigma_i$ are observed. As $\tau \rightarrow \infty$, the model approaches the no-pooling model, i.e., each of the school treatment effect estimates are allowed to be more independent. As $\tau \rightarrow 0$, the model approaches the complete-pooling model, i.e., all of the school treatment effects are closer to the group average $\mu$. To restrict the standard deviation to be positive, we draw $\tau$ from a lognormal distribution (which is equivalent to drawing $log(\tau)$ from a normal distribution).
Following Diagnosing Biased Inference with Divergences, we transform the model above into an equivalent non-centered model:
$$
\begin{align}
\mu &\sim \text{Normal}(\text{loc}{=}0,\ \text{scale}{=}10) \
\log\tau &\sim \text{Normal}(\text{loc}{=}5,\ \text{scale}{=}1) \
\text{for } & i=1\ldots 8:\
& \theta_i' \sim \text{Normal}\left(\text{loc}{=}0,\ \text{scale}{=}1 \right) \
& \theta_i = \mu + \tau \theta_i' \
& y_i \sim \text{Normal}\left(\text{loc}{=}\theta_i,\ \text{scale}{=}\sigma_i \right)
\end{align}
$$
We reify this model as a JointDistributionSequential instance:
End of explanation
"""
num_results = 5000
num_burnin_steps = 3000
# Improve performance by tracing the sampler using `tf.function`
# and compiling it using XLA.
@tf.function(autograph=False, jit_compile=True)
def do_sampling():
return tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=[
tf.zeros([], name='init_avg_effect'),
tf.zeros([], name='init_avg_stddev'),
tf.ones([num_schools], name='init_school_effects_standard'),
],
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.4,
num_leapfrog_steps=3))
states, kernel_results = do_sampling()
avg_effect, avg_stddev, school_effects_standard = states
school_effects_samples = (
avg_effect[:, np.newaxis] +
np.exp(avg_stddev)[:, np.newaxis] * school_effects_standard)
num_accepted = np.sum(kernel_results.is_accepted)
print('Acceptance rate: {}'.format(num_accepted / num_results))
fig, axes = plt.subplots(8, 2, sharex='col', sharey='col')
fig.set_size_inches(12, 10)
for i in range(num_schools):
axes[i][0].plot(school_effects_samples[:,i].numpy())
axes[i][0].title.set_text("School {} treatment effect chain".format(i))
sns.kdeplot(school_effects_samples[:,i].numpy(), ax=axes[i][1], shade=True)
axes[i][1].title.set_text("School {} treatment effect distribution".format(i))
axes[num_schools - 1][0].set_xlabel("Iteration")
axes[num_schools - 1][1].set_xlabel("School effect")
fig.tight_layout()
plt.show()
print("E[avg_effect] = {}".format(np.mean(avg_effect)))
print("E[avg_stddev] = {}".format(np.mean(avg_stddev)))
print("E[school_effects_standard] =")
print(np.mean(school_effects_standard[:, ]))
print("E[school_effects] =")
print(np.mean(school_effects_samples[:, ], axis=0))
# Compute the 95% interval for school_effects
school_effects_low = np.array([
np.percentile(school_effects_samples[:, i], 2.5) for i in range(num_schools)
])
school_effects_med = np.array([
np.percentile(school_effects_samples[:, i], 50) for i in range(num_schools)
])
school_effects_hi = np.array([
np.percentile(school_effects_samples[:, i], 97.5)
for i in range(num_schools)
])
fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True)
ax.scatter(np.array(range(num_schools)), school_effects_med, color='red', s=60)
ax.scatter(
np.array(range(num_schools)) + 0.1, treatment_effects, color='blue', s=60)
plt.plot([-0.2, 7.4], [np.mean(avg_effect),
np.mean(avg_effect)], 'k', linestyle='--')
ax.errorbar(
np.array(range(8)),
school_effects_med,
yerr=[
school_effects_med - school_effects_low,
school_effects_hi - school_effects_med
],
fmt='none')
ax.legend(('avg_effect', 'HMC', 'Observed effect'), fontsize=14)
plt.xlabel('School')
plt.ylabel('Treatment effect')
plt.title('HMC estimated school treatment effects vs. observed data')
fig.set_size_inches(10, 8)
plt.show()
"""
Explanation: Bayesian Inference
Given data, we perform Hamiltonian Monte Carlo (HMC) to calculate the posterior distribution over the model's parameters.
End of explanation
"""
print("Inferred posterior mean: {0:.2f}".format(
np.mean(school_effects_samples[:,])))
print("Inferred posterior mean se: {0:.2f}".format(
np.std(school_effects_samples[:,])))
"""
Explanation: We can observe the shrinkage toward the group avg_effect above.
End of explanation
"""
sample_shape = [5000]
_, _, _, predictive_treatment_effects = model.sample(
value=(tf.broadcast_to(np.mean(avg_effect, 0), sample_shape),
tf.broadcast_to(np.mean(avg_stddev, 0), sample_shape),
tf.broadcast_to(np.mean(school_effects_standard, 0),
sample_shape + [num_schools]),
None))
fig, axes = plt.subplots(4, 2, sharex=True, sharey=True)
fig.set_size_inches(12, 10)
fig.tight_layout()
for i, ax in enumerate(axes):
sns.kdeplot(predictive_treatment_effects[:, 2*i].numpy(),
ax=ax[0], shade=True)
ax[0].title.set_text(
"School {} treatment effect posterior predictive".format(2*i))
sns.kdeplot(predictive_treatment_effects[:, 2*i + 1].numpy(),
ax=ax[1], shade=True)
ax[1].title.set_text(
"School {} treatment effect posterior predictive".format(2*i + 1))
plt.show()
# The mean predicted treatment effects for each of the eight schools.
prediction = np.mean(predictive_treatment_effects, axis=0)
"""
Explanation: Criticism
To get the posterior predictive distribution, i.e., a model of new data $y^*$ given the observed data $y$:
$$ p(y^|y) \propto \int_\theta p(y^ | \theta)p(\theta |y)d\theta$$
we override the values of the random variables in the model to set them to the mean of the posterior distribution, and sample from that model to generate new data $y^*$.
End of explanation
"""
treatment_effects - prediction
"""
Explanation: We can look at the residuals between the treatment effects data and the predictions of the model posterior. These correspond with the plot above which shows the shrinkage of the estimated effects toward the population average.
End of explanation
"""
residuals = treatment_effects - predictive_treatment_effects
fig, axes = plt.subplots(4, 2, sharex=True, sharey=True)
fig.set_size_inches(12, 10)
fig.tight_layout()
for i, ax in enumerate(axes):
sns.kdeplot(residuals[:, 2*i].numpy(), ax=ax[0], shade=True)
ax[0].title.set_text(
"School {} treatment effect residuals".format(2*i))
sns.kdeplot(residuals[:, 2*i + 1].numpy(), ax=ax[1], shade=True)
ax[1].title.set_text(
"School {} treatment effect residuals".format(2*i + 1))
plt.show()
"""
Explanation: Because we have a distribution of predictions for each school, we can consider the distribution of residuals as well.
End of explanation
"""
|
zzsza/Datascience_School | 09. 기초 확률론2 - 확률 변수/05. 누적 분포 함수와 확률 밀도 함수.ipynb | mit | %%tikz
\filldraw [fill=white] (0,0) circle [radius=1cm];
\foreach \angle in {60,30,...,-270} {
\draw[line width=1pt] (\angle:0.9cm) -- (\angle:1cm);
}
\draw (0,0) -- (90:0.8cm);
"""
Explanation: 누적 분포 함수와 확률 밀도 함수
누적 분포 함수(cumulative distribution function)와 확률 밀도 함수(probability density function)는 확률 변수의 분포 즉, 확률 분포를 수학적으로 정의하기 위한 수식이다.
확률 분포의 묘사
확률의 정의에서 확률은 사건(event)이라는 표본의 집합에 대해 할당된 숫자라고 하였다. 데이터 분석을 하려면 확률이 구체적으로 어떻게 할당되었는지를 묘사(describe)하거 전달(communicate)해야 할 필요가 있다. 어떤 사건에 어느 정도의 확률이 할당되었는지를 묘사한 것을 확률 분포(distribution)라고 한다.
확률 분포를 묘사하기 위해서는 모든 사건(event)들을 하나 하나 제시하고 거기에 할당된 숫자를 보여야 하기 때문에 확률 분포의 묘사는 결코 쉽지 않은 작업이다. 그러나 확률 변수를 이용하면 이러한 묘사 작업이 좀 더 쉬워진다. 왜냐하면 사건(event)이 구간(interval)이 되고 이 구산을 지정하는데는 시작점과 끝점이라는 두 개의 숫자만 있으면 되기 때문이다.
[[school_notebook:4bcfe70a64de40ec945639236b0e911d]]
그러나 사건(event) 즉, 구간(interval) 하나를 정의하기 위해 숫자가 하나가 아닌 두 개가 필요하다는 점은 아무래도 불편하다. 숫자 하나만으로 사건 즉, 구간을 정의할 수 있는 방법은 없을까? 이를 해결하기 위한 아이디어 중 하나는 구간의 시작을 나타내는 숫자를 모두 같은 숫자인 음수 무한대($-\infty$)로 통일하는 것이다. 여러가지 구간들 중에서 시작점이 음수 무한대인 구간만 사용하는 것이라고 볼 수 있다.
$${ -\infty \leq X < -1 } $$
$${ -\infty \leq X < 0 } $$
$${ -\infty \leq X < 1 } $$
$${ -\infty \leq X < 2 } $$
$$ \vdots $$
$$ { -\infty \leq X < x } $$
$$ \vdots $$
물론 이러한 구간들은 시그마 필드를 구성하는 전체 사건(event)들 중 일부에 지나지 않는다. 그러나 확률 공간과 시그마 필드의 정의를 이용하면 이러한 구간들로부터 시작점이 음수 무한대가 아닌 다른 구간들을 생성할 수 있다. 또한 새로 생성된 구간들에 대한 확률값도 확률의 정의에 따라 계산할 수 있다.
누적 확률 분포
위와 같은 방법으로 서술된 확률 분포를 누적 분포 함수 (cumulative distribution function) 또는 누적 확률 분포라고 하고 약자로 cdf라고 쓴다. 일반적으로 cdf는 대문자를 사용하여 $F(x)$와 같은 기호로 표시하며 이 때 독립 변수 $x$는 범위의 끝을 뜻한다. 범위의 시작은 음의 무한대(negative infinity, $-\infty$)이다.
확률 변수 $X$에 대한 누적 확률 분포 $F(x)$의 수학적 정의는 다음과 같다.
$$ F(x) = P({X < x}) = P(X < x)$$
몇가지 누적 확률 분포 표시의 예를 들면 다음과 같다.
$$ \vdots $$
* $F(-1)$ : 확률 변수가 $-\infty$이상 -1 미만인 구간 내에 존재할 확률 즉, $P( { -\infty \leq X < -1 })$
* $F(0)$ : 확률 변수가 $-\infty$이상 0 미만인 구간 내에 존재할 확률 즉, $P( { -\infty \leq X < 0 })$
* $F(1)$ : 확률 변수가 $-\infty$이상 1 미만인 구간 내에 존재할 확률 즉, $P( { -\infty \leq X < 1 })$
$$ \vdots $$
* $F(10)$ : 확률 변수가 $-\infty$이상 10 미만인 구간 내에 존재할 확률 즉, $P( { -\infty \leq X < 10 })$
$$ \vdots $$
End of explanation
"""
t = np.linspace(-100, 500, 100)
F = t / 360
F[t < 0] = 0
F[t > 360] = 1
plt.plot(t, F)
plt.ylim(-0.1, 1.1)
plt.xticks([0, 180, 360]);
plt.title("Cumulative Distribution Function");
plt.xlabel("$x$ (deg.)");
plt.ylabel("$F(x)$");
"""
Explanation: 시계 바늘 확률 문제의 경우를 예로 들어보자. 이 경우에는 각도가 0도부터 360까지이지만 음의 무한대를 시작점으로 해도 상관없다.
$$ F(0) = P({ -\infty {}^{\circ} \leq \theta < 0 {}^{\circ} }) = 0 $$
$$ F(10) = P({ -\infty {}^{\circ} \leq \theta < 10 {}^{\circ} }) = \dfrac{1}{36} $$
$$ F(20) = P({ -\infty {}^{\circ} \leq \theta < 20 {}^{\circ} }) = \dfrac{2}{36} $$
$$ \vdots $$
$$ F(350) = P({ -\infty {}^{\circ} \leq \theta < 350 {}^{\circ} }) = \dfrac{35}{36} $$
$$ F(360) = P({ -\infty {}^{\circ} \leq \theta < 360 {}^{\circ} }) = 1 $$
$$ F(370) = P({ -\infty {}^{\circ} \leq \theta < 370 {}^{\circ} }) = 1 $$
$$ F(380) = P({ -\infty {}^{\circ} \leq \theta < 380 {}^{\circ} }) = 1 $$
$$ \vdots $$
이를 NumPy와 matplotlib를 사용하여 그래프로 그래면 다음과 같다.
End of explanation
"""
t = np.linspace(-100, 500, 1000)
F = t / 360
F[t < 0] = 0
F[t > 360] = 1
f = np.gradient(F) # 수치미분
plt.plot(t, f)
plt.ylim(-0.0001, f.max()*1.1)
plt.xticks([0, 180, 360]);
plt.title("Probability Density Function");
plt.xlabel("$x$ (deg.)");
plt.ylabel("$f(x)$");
"""
Explanation: 누적 밀도 함수 즉 cdf는 다음과 같은 특징을 가진다.
$F(-\infty) = 0$
$F(+\infty) = 1$
$F(x) \geq F(y) \;\; \text{ if } \;\; x > y $
확률 밀도 함수
누적 분포 함수는 확률 분포를 함수라는 편리한 상태로 바꾸어 주었다. 누적 분포 함수는 확률이 어느 사건(event)에 어느 정도 분포되어 있는지 수학적으로 명확하게 표현해 준다.
그러나 누적 분포 함수가 표현하는 사건이 음수 무한대를 시작점으로 하고 변수 $x$를 끝점으로 하는 구간이다보니 분포의 형상을 직관적으로 이해하기는 힘든 단점이 있다. 다시 말해서 어떤 확률 변수 값이 더 자주 나오는지에 대한 정보를 알기 힘들다는 점이다.
이를 알기 위해서는 확률 변수가 나올 수 있는 전체 구간 ($-\infty$ ~ $\infty$)을 아주 작은 폭을 가지는 구간들로 나눈 다음 각 구간의 확률을 살펴보는 것이 편리하다. 다만 이렇게 되면 구간의 폭(width)을 어느 정도로 정의해야 하는지에 대한 추가적인 약속이 필요하기 때문에 실효성이 떨어진다.
이러한 단점을 보완하기 위해 생각한 것이 절대적인 확률이 아닌 상대적인 확률 분포 형태만을 보기 위한 확률 밀도 함수(probability density function)이다.
누적 확률 분포 그래프의 x축의 오른쪽으로 이동하면서 크기의 변화를 살펴보자.만약 특정한 $x$값 근처의 구간에 확률이 배정되지 않았다면 누적 분포 함수는 그 구간을 지나도 증가하지 않는다. 즉, 기울기가 0이다. 왜냐하면 $x$ 값이 커졌다(x축의 오른쪽으로 이동하였다)는 것은 앞의 구간을 포함하는 더 큰 구간(사건)에 대한 확률을 묘사하고 있는 것인데 추가적으로 포함된 신규 구간에 확률이 없다면 그 신규 구간을 포함한 구간이나 포함하지 않은 구간이나 배정된 확률이 같기 때문이다.
누적 분포 함수의 기울기가 0이 아닌 경우는 추가적으로 포함된 구간에 0이 아닌 확률이 할당되어 있는 경우이다. 만약 더 많은 확률이 할당되었다면 누적 분포 함수는 그 구간을 지나면서 더 빠른 속도로 증가할 것이다. 다시 말해서 함수의 기울기가 커진다. 이러한 방식으로 누적 분포의 기울기의 크기를 보면 각 위치에 배정된 확률의 상대적 크기를 알 수 있다.
기울기를 구하는 수학적 연산이 미분(differentiation)이므로 확률 밀도 함수는 누적 분포 함수의 미분으로 정의한다.
$$ \dfrac{dF(x)}{dx} = f(x) $$
이를 적분으로 나타내면 다음과 같다.
$$ F(x) = \int_{-\infty}^{x} f(u) du $$
확률 밀도 함수는 특정 확률 변수 구간의 확률이 다른 구간에 비해 상대적으로 얼마나 높은가를 나타내는 것이며 그 값 자체가 확률은 아니다라는 점을 명심해야 한다.
확률 밀도 함수는 다음과 같은 특징을 가진다.
$-\infty$ 부터 $\infty$ 까지 적분하면 그 값은 1이 된다.
$$ \int_{-\infty}^{\infty} f(u)du = 1$$
확률 밀도 함수는 0보다 같거나 크다.
$$ f(x) \geq 0 $$
앞서 보인 시계 바늘 문제에서 확률 밀도함수를 구하면 다음과 같다.
End of explanation
"""
x = np.arange(1,7)
y = np.array([0.0, 0.1, 0.1, 0.2, 0.2, 0.4])
plt.stem(x, y);
plt.xlim(0, 7);
plt.ylim(-0.01, 0.5);
"""
Explanation: 확률 질량 함수
이산 확률 분포는 확률 밀도 함수를 정의할 수 없는 대신 확률 질량 함수가 존재한다. 확률 질량 함수(probability mass funtion)는 이산 확률 변수의 가능한 값 하나 하나에 대해 확률을 정의한 함수이다. 예를 들어 6면체인 주사위를 던져서 나올 수 있는 값은 1부터 6까지의 이산적인 값을 가지는데 이러한 이산 확률 변수는 예를 들어 다음과 같은 확률 질량 함수를 가질 수 있다. 이 경우에는 공정하지 않은(unfair) 주사위의 확률 분포를 보이고 있다.
End of explanation
"""
x = np.arange(1,7)
y = np.array([0.0, 0.1, 0.1, 0.2, 0.2, 0.4])
z = np.cumsum(y)
plt.step(x, z, where="post");
plt.xlim(0, 7);
plt.ylim(-0.01, 1.1)
plt.show()
"""
Explanation: 위의 확률 질량 함수는 주사위 눈금 1이 나오지 않고 6이 비정상적으로 많이 나오게 만든 비정상적인 주사위(unfair dice)를 묘사한다.
이 확률 변수에 대해 각 값을 누적하여 더하면 이산 확률 변수의 누적 분포 함수(cumulative distribution function)를 구할 수 있다.
End of explanation
"""
|
Pybonacci/notebooks | tutormagic.ipynb | bsd-2-clause | %load_ext tutormagic
"""
Explanation: Esta será una microentrada para presentar una extensión para el notebook que estoy usando en un curso interno que estoy dando en mi empresa.
Si a alguno más os puede valer para mostrar cosas básicas de Python (2 y 3, además de Java y Javascript) para muy principiantes me alegro.
Nombre en clave: tutormagic
Esta extensión lo único que hace es embeber dentro de un IFrame la página de pythontutor usando el código que hayamos definido en una celda de código precedida de la cell magic %%tutor.
Como he comentado anteriormente, se puede escribir código Python2, Python3, Java y Javascript, que son los lenguajes soportados por pythontutor.
Ejemplo
Primero deberemos instalar la extensión. Está disponible en pypi por lo que la podéis instalar usando pip install tutormagic. Una vez instalada, dentro de un notebook de IPython la deberías cargar usando:
End of explanation
"""
%%tutor --lang python3
a = 1
b = 2
def add(x, y):
return x + y
c = add(a, b)
"""
Explanation: Una vez hecho esto ya deberiamos tener disponible la cell magic para ser usada:
End of explanation
"""
%%tutor --lang javascript
var a = 1;
var b = 1;
console.log(a + b);
"""
Explanation: Ahora un ejemplo con javascript:
End of explanation
"""
|
phoebe-project/phoebe2-docs | 2.0/tutorials/backend.ipynb | gpl-3.0 | %matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
b['q'] = 0.8
b['ecc'] = 0.05
"""
Explanation: IPython Notebook | Python Script
Advanced: Digging into the Backend
Setup
As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details.
End of explanation
"""
b.add_dataset(phoebe.dataset.orb, times=np.linspace(0,40,1000), dataset='orb01', component=['primary', 'secondary'])
times, fluxes, sigmas = np.loadtxt('test.lc.in', unpack=True)
b.add_dataset(phoebe.dataset.lc, times=times, fluxes=fluxes, sigmas=sigmas, dataset='lc01')
"""
Explanation: And we'll attach some dummy datasets. See Datasets for more details.
End of explanation
"""
b.add_compute(compute='default_options')
ts, xs, ys, zs, vxs, vys, vzs =\
phoebe.dynamics.nbody.dynamics_from_bundle(b, np.linspace(0, 20, 500), compute='default_options')
"""
Explanation: From the Bundle
There is a middle-layer that translates the system in the Bundle into
the parameters (with correct units) required by the backend functions
themselves and returns the results (again with units).
Interaction with this middle-layer is much nicer than manually building
the system hierarchy, but doesn't avoid the overhead introduced by the frontend.
Dynamics
Let's say that you want to access (without adding a dataset) the dynamics
packages for the system you have setup in the Bundle.
First we'll look at Nbody dynamics, and later show the same for Keplerian.
End of explanation
"""
artist, = plt.plot(xs[0], zs[0], 'b-') # primary
artist, = plt.plot(xs[1], zs[1], 'r-') # secondary
"""
Explanation: The first returned item is an array of times - not surprisingly these match the times we just sent.
The rest of the returned items (xs, ys, etc) are each a list of arrays. The list contains an entry for each component in the system. So in this case, since we have a binary, each is a list of two arrays.
Now we can plot any of this information by pulling the correct array from these lists.
End of explanation
"""
ts, xs, ys, zs, vxs, vys, vzs, ethetas, elongans, eincls =\
phoebe.dynamics.keplerian.dynamics_from_bundle(b, np.linspace(0, 6, 500), compute='default_options', return_euler=True)
plt.cla()
artist, = plt.plot(xs[0], zs[0], 'b-') # primary
artist, = plt.plot(xs[1], zs[1], 'r-') # secondary
"""
Explanation: For Keplerian dynamics, there is an additional option whether to return the Euler angles as well. Since we'll later be using these to place the mesh in orbit, we'll set this to True.
End of explanation
"""
system = phoebe.backend.universe.System.from_bundle(b, compute='default_options')
"""
Explanation: Meshing
Similarly, we can create a backend "system" directly from the Bundle
End of explanation
"""
print system.bodies
"""
Explanation: This "system" object is just a container of multiple "bodies" which are also accessible
End of explanation
"""
system.initialize_meshes()
"""
Explanation: First we need to initialize the mesh "standard". By default this is done at periastron and stores a copy of each unscaled mesh at periastron. This mesh is later scaled, placed in orbit, and reprojected if necessary (for volume conservation in eccentric orbits, for example).
End of explanation
"""
system.bodies[0].get_standard_mesh(scaled=False)
system.bodies[0].get_standard_mesh(scaled=True)
"""
Explanation: These standards can then be accessed:
End of explanation
"""
xi, yi, zi, vxi, vyi, vzi, ethetai, elongani, eincli = phoebe.dynamics.dynamics_at_i(xs, ys, zs, vxs, vys, vzs, ethetas, elongans, eincls, i=0)
system.update_positions(ts[0], xi, yi, zi,
vxi, vyi, vzi,
ethetai, eincli, elongani)
"""
Explanation: We then simply need to pass positions, velocities, and euler angles at a given time to have the meshes scaled and placed in orbit for us.
From the dynamics, we have lists of arrays. So for the first time, we want ts[0] and xs[:,0], etc.
End of explanation
"""
system.meshes
"""
Explanation: The meshes are now placed in orbit at the current time. These meshes are accessible via:
End of explanation
"""
system.meshes.keys()
"""
Explanation: which acts like a dictionary to get a mesh from a single component (if desired)
End of explanation
"""
system.meshes.get_column('teffs')
"""
Explanation: We can pull any column out of the mesh in a similar dictionary style (with an entry for each component)
End of explanation
"""
system.meshes.get_column_flat('teffs')
"""
Explanation: or as a single flat array
End of explanation
"""
system.meshes['primary'].keys()
"""
Explanation: Note that the column names are not identical to those exposed in the frontend (since tags do not exist in the backend). To access the list of available keys in the mesh, we can directly inspect the mesh.
End of explanation
"""
system.handle_eclipses(eclipse_alg='native')
"""
Explanation: update_positions places the meshes in their orbits, handles volume conservation (for eccentric orbits), and computes instantaneous physical quantities (teff, logg, etc), but does NOT handle eclipse detection or subdivision.
We can now update the 'visibility' column by running eclipse detection.
End of explanation
"""
visibilities = system.meshes.get_column('visibilities')
print "primary visibility:", visibilities['primary'].sum() / len(visibilities['primary'])
print "secondary visibility:", visibilities['secondary'].sum() / len(visibilities['secondary'])
"""
Explanation: We can easily check to make sure an eclipse happened by computing the ratio of triangles for each component that are currently visible.
End of explanation
"""
times = np.linspace(0,10,500) # days
masses = [1.0, 0.8] # solar masses
smas = [1.0, 1.0] # AU
eccs = [0.8, 0.8] # unitless
incls = [np.pi/2, np.pi/2] # radians
per0s = [0, 0] # radians
long_ans = [0, 0] # radians
mean_anoms = [0, 0]
t0 = 0.0 # days
ts, xs, ys, zs, vxs, vys, vzs =\
phoebe.dynamics.nbody.dynamics(times, masses, smas,
eccs, incls, per0s,
long_ans, mean_anoms,
t0, stepsize=0.01,
gr=False,
ltte=False)
xi, yi, zi, vxi, vyi, vzi, ethetai, elongani, eincli = phoebe.dynamics.dynamics_at_i(xs, ys, zs, vxs, vys, vzs, ethetas, elongans, eincls, i=0)
artist, = plt.plot(xs[0], zs[0], 'b-') # primary
artist, = plt.plot(xs[1], zs[1], 'r-') # secondary
"""
Explanation: Without the Bundle
Now let's look at how we would setup the backend without a bundle. Once created, all of the capabilities described above are also available.
Dynamics
Dynamics without the bundle is nearly as simple, but special care needs to be made with passing values in the right units.
Once again, let's look at Nbody first and then Keplerian.
End of explanation
"""
times = np.linspace(0,10,500) # days
periods = [3.0, 3.0] # days
eccs = [0.0, 0.0]
smas = [1.0, 0.8] # Rsol (sma of the STAR about the CENTER OF MASS)
t0_perpasses = [0.0, 0.0] # days
per0s = [0.0, 0.0] # radians
long_ans = [0.0, 0.0] # radians
incls = [np.pi/2, np.pi/2] # radians
dpdts = [0.0, 0.0] # days/day
deccdts = [0.0, 0.0] # 1/day
dperdts = [0.0, 0.0] # radians/day
components = ['primary', 'secondary']
t0 = 0.0 # days
vgamma = 0.0 # solRad/day (TODO: double check!!!)
ts, xs, ys, zs, vxs, vys, vzs, ethetas, elongans, eincls =\
phoebe.dynamics.keplerian.dynamics(times, periods,
eccs, smas, t0_perpasses,
per0s, long_ans, incls,
dpdts, deccdts, dperdts,
components,
t0, vgamma=vgamma,
ltte=False,
return_euler=True)
plt.cla()
artist, = plt.plot(xs[0], ys[0], 'b-') # primary
artist, = plt.plot(xs[1], ys[1], 'r-') # secondary
"""
Explanation: Now let's look at Keplerian dynamics. Here we need to provide information for the parent orbit of each star. In the case of a simple binary, this can seem quite redundant.
End of explanation
"""
F = 1.0
Phi = 9.5
masses = [1.0, 0.8] # solar Masses
sma = 8.0 # solar radii
ecc = 0.0
freq_rot = 2*np.pi/1 # radians/day
abun = 0.0
alb_ref = 0.0
teff = 6000 # K
gravb_bol = 0.2
gravb_law = 'zeipel'
primarymesh = phoebe.backend.universe.Star(F, Phi, masses, sma, ecc,
freq_rot, teff, gravb_bol,
abun, alb_ref,
delta=0.1, maxpoints=1e5,
ind_self=0,
ind_sibling=1,
comp_no=1)
secondarymesh = primarymesh.copy()
system = phoebe.backend.universe.System({'primary': primarymesh, 'secondary': secondarymesh})
"""
Explanation: Meshing
End of explanation
"""
system.initialize_meshes()
system.update_positions(ts, xi, yi, zi,
vxi, vyi, vzi,
ethetai, eincli, elongani)
system.handle_eclipses(eclipse_alg='native')
"""
Explanation: Now that the "system" object is created, see the section in the "From the Bundle" section for details on initializing the meshes, placing them in orbit, eclipse detection, and accessing columns.
End of explanation
"""
|
borja876/Thinkful-DataScience-Borja | Housing Prices.ipynb | mit | %matplotlib inline
import numpy as np
import pandas as pd
import scipy
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
import math
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix, accuracy_score, mean_squared_error
from sklearn.model_selection import train_test_split,cross_val_score, KFold, cross_val_predict, GridSearchCV
from sklearn.decomposition import PCA as sklearn_pca
from sklearn.decomposition import PCA
from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.utils import resample
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor
from sklearn import preprocessing, decomposition
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
# Read and import data
housing = pd.read_csv('Melbourne_housing_FULL.csv')
housing.head()
#Check the columns in the raw data
housing.columns
#Check the kind of variables in the raw data
housing.info()
print(housing.isnull().sum())
print(len(housing))
#Eliminate the rows that have no info regarding price
housing1 = housing[np.isfinite(housing['Price'])]
#Transform the data into datetime
housing1['Date'] = pd.to_datetime(housing['Date'])
#Transform the date into months and year
housing1['Month'] = housing1['Date'].dt.month
housing1['Year'] = housing1['Date'].dt.year
#Delete the property count
del housing1['Propertycount']
#Understand the time span of the data
housing1.Date.min(), housing1.Date.max()
#Consider only the sales that have taken place within the last year of data (12 months)
#mask = (housing1['Date'] > '2016-12-08')
#housing2 = housing1.loc[mask]
#Check the Nan values that are still remaining
print(housing1.isnull().sum())
print(len(housing1))
#Drop the Nan values that are in the Regionname and CouncilArea columns and
#Fill the missing values with the median of each column
housing3 = housing1.dropna(subset=[['Regionname','CouncilArea']])
housing4 = housing3.fillna(housing1.median())
#Check that there is no Nan value
print(housing4.isnull().sum())
print(len(housing4))
#Check the type of info regarding each variable
housing4.info()
#Plot the relationship between the numeric variables
housing4.hist(bins=50, figsize=(20,15))
plt.savefig("attribute_histogram_plots")
plt.show()
#Plot the longitituda and lattitude to improve the nderstanding of the area
housing4.plot(kind="scatter", x="Longtitude", y="Lattitude", alpha=0.2, figsize=(10,5))
#Plot the prices agasint longtitude and lattitude. Understand the concentration of high prices within regions in Melbourne
housing4.plot(kind="scatter", x="Longtitude", y="Lattitude",
alpha=0.4, figsize=(12.5,5), c="Price", cmap=plt.get_cmap("jet"), colorbar=True, sharex=False)
#Create new features: One that is the price per built_area and the other one to eliminate the skweness of price
housing4['Price_per_buildingarea'] = housing4['Price']/(housing4['BuildingArea']+1)
housing4['logPrice'] = np.log(housing4['Price'])
housing4['logdistance'] = np.log(housing4['Distance']+1)
#Understand the number of Regions and Suburbs to create a categorical features that classifies
#the most convenient one according to the price
print('Number of Suburbs:', len(housing4['Suburb'].value_counts()))
print('Number of Regions: ' , len(housing4['Regionname'].value_counts()))
#Create the clusters of neighbouhoods/suburbs as it will give the adequate level of granularity
#Consider prices and frequency of purchase in each area
freq = housing4.groupby('Suburb').count()['Postcode']
mean = housing4.groupby('Suburb').median()['Price_per_buildingarea']
cluster = pd.concat([freq, mean], axis=1)
cluster['Suburb'] = cluster.index
cluster.columns = ['freq', 'Price_per_buildingarea','Suburb']
cluster.describe()
#Divide into 2 clusters: Moderate and Expensive according to price. Create the moderate cluster
cluster1 = cluster[cluster.Price_per_buildingarea <= 6131]
cluster1.index
#Create the Expensive cluster
cluster2 = cluster[cluster.Price_per_buildingarea > 6131]
cluster2.index
#Add the clusters into the dataset
def get_group(x):
if x in cluster1.index:
return 'Moderate'
else:
return 'Expensive'
housing4['Neighbourhood'] = housing4.Suburb.apply(get_group)
#Check the features in place after all the data manipulation
housing4.head()
#Check the values for the new features
housing4.Neighbourhood.unique()
#Change the values of the new categorical feature to numerical values
dict ={'Expensive': 0, 'Moderate': 1}
housing4['Neighbourhood'] = housing4['Neighbourhood'].map(dict)
#Check the relationship between logprice and the rest of the features
corr_matrix = housing4.corr()
corr_matrix["Price"].sort_values(ascending=False)
#Plot the relationship between all the numercial features
from pandas.plotting import scatter_matrix
attributes = ["Price", "Rooms", "Bedroom2", "Bathroom",'Car','BuildingArea']
scatter_matrix(housing4[attributes], figsize=(12, 8))
plt.savefig('matrix.png')
#Visualize the relationship between log Price and the most correlated variables.
plt.figure(figsize=(20, 5))
sns.set_style("whitegrid")
plt.subplot(1, 4, 1)
ax = sns.barplot(x="Rooms", y="Price", data=housing4, ci=None)
plt.ylabel("Price")
plt.title('Price vs Rooms')
plt.subplot(1, 4, 2)
ax = sns.barplot(x="Bedroom2", y="Price", data=housing4, ci=None)
plt.ylabel("Price")
plt.title('Price vs Bedroom2')
plt.subplot(1, 4, 3)
ax = sns.barplot(x="Bathroom", y="Price", data=housing4, ci=None)
plt.ylabel("Price")
plt.title('Price vs Bathroom')
plt.subplot(1, 4, 4)
ax = sns.barplot(x="Car", y="Price", data=housing4, ci=None)
plt.ylabel("Price")
plt.title('Price vs Car')
plt.tight_layout()
plt.show()
#Check all the columns in the manipulated dataframe
housing4.columns
#Create a new dataframe that incorporates the new variable Neghibourhood and eliminates the data that is not useful
housing5 = housing4.drop(['Suburb', 'Address', 'Method', 'SellerG', 'Date',
'Postcode', 'CouncilArea',
'Lattitude','Longtitude','Month','Year',#'Regionname'
],axis=1)
housing5.describe()
#Check values of the feature Regionname to transform into numercial values
housing5.Regionname.unique()
#Transform categorical Feature Regionname into numercial values
dict = {'Northern Metropolitan': 1,
'Western Metropolitan': 2,
'Southern Metropolitan': 3,
'Eastern Metropolitan': 4,
'South-Eastern Metropolitan': 5,
'Eastern Victoria' : 6,
'Northern Victoria' : 7,
'Western Victoria': 8}
housing5["Regionname"] = housing5["Regionname"].map(dict)
#Build a new feature house Age based on the YearBuilt
housing5 = housing5[housing5.YearBuilt >1970].reset_index(drop=True)
housing5['Age'] = np.log(1+(2018 - housing5['YearBuilt']))
#Drop YearBuilt
housing5.drop('YearBuilt', inplace=True, axis=1)
housing.Type.unique()
#Change into numerical the categorical feature Type
dict = {'h': 1, 'u': 2, 't': 3}
housing5['Type']= housing5['Type'].map(dict)
#Eliminate outliers
housing5 = housing5[housing5.Rooms < 16].reset_index(drop=True)
#Describe the dataset
housing5.describe()
housing5.columns
#Pre-process the data
names = housing5.columns
X = pd.DataFrame(preprocessing.scale(housing5), columns = names)
#Create the predicted and predictors for the model
X1 = X.drop(['logPrice','Price'],axis=1)
Y = X['logPrice']
# Build up the correlation mtrix
Z = X1
correlation_matrix = Z.corr()
plt.figure(figsize=(20, 10))
ax = sns.heatmap(correlation_matrix, annot=True)
plt.show()
"""
Explanation: Using this Kaggle data create a model to predict a house's value.
End of explanation
"""
#Eigenvectores & Eigenvalues
eig_vals, eig_vecs = np.linalg.eig(correlation_matrix)
sklearn_pca = PCA(n_components=len(Z.columns))
Y_sklearn = sklearn_pca.fit_transform(correlation_matrix)
print(
'The percentage of total variance in the dataset explained by each',
'component from Sklearn PCA.\n',
sklearn_pca.explained_variance_ratio_
)
#From the Scree plot.
plt.plot(eig_vals)
plt.show()
#PCA Analysis
# Create a scaler object
sc = StandardScaler()
# Fit the scaler to the features and transform
X_std = sc.fit_transform(X1)
# Create a PCA object
pca = decomposition.PCA(n_components=6)
# Fit the PCA and transform the data
X_std_pca = pca.fit_transform(X_std)
# View the new feature data's shape
X_std_pca.shape
# Create a new dataframe with the new features
XPCA = pd.DataFrame(X_std_pca)
XPCA.head()
"""
Explanation: PCA Analysis
End of explanation
"""
# create the RFE model and select features
lr = LinearRegression()
nfeatures = 4
rfe = RFE(lr,nfeatures)
fit = rfe.fit(X1,Y)
# summarize the selection of the features
result_RFE = pd.DataFrame(list(zip(X1.head(0), rfe.ranking_, rfe.support_)),columns=['Features','Ranking','Support'] )
result_RFE.sort_values('Ranking')
"""
Explanation: Recursive Feature Elimination & PCA
End of explanation
"""
#Calculate Feature Importance using Random Forest
rf = RandomForestRegressor()
rf.fit(X1, Y)
#Define feature importance
feature_importance = rf.feature_importances_
# Make importances relative to max importance.
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.figure(figsize=(20, 5))
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, X1.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Features')
plt.show()
#Feature Selection. Scores for the most relevant features (should we start with the one that has more explanatory power)
from sklearn.feature_selection import SelectKBest
# feature extraction
test = SelectKBest()
fit = test.fit(X1, Y)
#Identify features with highest score from a predictive perspective (for all programs)
names2 = X1.columns
Bestfeatures = pd.DataFrame(fit.scores_, index = names2)
Bestfeatures.columns = ['Best Features']
Bestfeatures.sort_values(by=['Best Features'], ascending=False)
#Build new set of features
#Initial features
names = ['Rooms', 'Type', 'Distance', 'Bedroom2', 'Bathroom', 'Car',
'Landsize', 'BuildingArea', 'Regionname', 'Price_per_buildingarea',
'Neighbourhood', 'Age']
X0 = X1[names]
#Recursive Feature Elimination
namesRFE = ['Rooms','Bathroom','Car', 'Type', 'Distance', 'Regionname', 'Neighbourhood','Age']
XRFE = X1[namesRFE]
#Best Select Features
namesBS = ['Rooms', 'Bathroom', 'Price_per_buildingarea', 'Neighbourhood']
XBS = X1[namesBS]
#Feature Importance (Random Forest)
namesFIRF = ['Rooms', 'Type', 'BuildingArea', 'Price_per_buildingarea', 'Neighbourhood']
XFIRF = X1[namesFIRF]
"""
Explanation: Feature Importance with Random Forest
End of explanation
"""
#Feature Selection using Lasso
plt.figure(figsize=(20, 5))
sns.set_style("whitegrid")
plt.subplot(1, 4, 1)
for lambd in [x * 0.01 for x in range(1, 100)]:
lasso = Lasso(alpha=lambd)
lasso_coef = lasso.fit(X0, Y).coef_
plt.xticks(range(len(names)), names, rotation=90)
plt.ylabel('Coefficients')
plt.plot(range(len(names)), lasso_coef)
plt.title('Lasso (All features)')
plt.subplot(1, 4, 2)
for lambd in [x * 0.01 for x in range(1, 100)]:
lasso = Lasso(alpha=lambd)
lasso_coef = lasso.fit(XRFE, Y).coef_
plt.xticks(range(len(namesRFE)), namesRFE, rotation=90)
plt.ylabel('Coefficients')
plt.plot(range(len(namesRFE)), lasso_coef)
plt.title('Lasso RFE')
plt.subplot(1, 4, 3)
for lambd in [x * 0.01 for x in range(1, 100)]:
lasso = Lasso(alpha=lambd)
lasso_coef = lasso.fit(XBS, Y).coef_
plt.xticks(range(len(namesBS)), namesBS, rotation=90)
plt.ylabel('Coefficients')
plt.plot(range(len(namesBS)), lasso_coef)
plt.title('Lasso (Best Selector)')
plt.subplot(1, 4, 4)
for lambd in [x * 0.01 for x in range(1, 100)]:
lasso = Lasso(alpha=lambd)
lasso_coef = lasso.fit(XFIRF, Y).coef_
plt.xticks(range(len(namesFIRF)), namesFIRF, rotation=90)
plt.ylabel('Coefficients')
plt.plot(range(len(namesFIRF)), lasso_coef)
plt.title('Lasso on Random Forest')
plt.tight_layout()
plt.show()
#New set of features based on results obtained in with Lasso, Random Forest and RFE
Xl = X1[['Rooms', 'BuildingArea', 'Regionname','Distance', 'Neighbourhood']]
#Split the data into training and testing datasets. Split: 70/30; train/test
X_train, X_test, y_train, y_test = train_test_split(Xl,Y, test_size=0.3, random_state=0)
#Initiating the cross validation generator, N splits = 5
kf = KFold(5)
"""
Explanation: Feature Selection using Lasso
End of explanation
"""
#Fit the Linear Regression model with the PCA features
lr = LinearRegression()
lr.fit(X_train, y_train)
print(lr.coef_)
#Predict the value of y
y_pred = lr.predict(X_test)
#Mean squared error of the regressor
lin_mse = mean_squared_error(y_pred, y_test)
lin_rmse = np.sqrt(lin_mse)
#Accuracy of the regressor
accln = cross_val_score(lr, X_test, y_test, cv=kf).mean()
acclnpca = cross_val_score(lr, XPCA, Y, cv=kf).mean()
print('RMSE: %.4f' % lin_rmse)
print('Linear Regression Accuracy: %.4f' % accln)
print('Linear Regression Accuracy PCA: %.4f' % acclnpca)
"""
Explanation: Linear Regression
End of explanation
"""
# Testing the Ridge Regression.
ridgeregr = Ridge()
k1 = np.arange(50)+1
parameters = {'alpha': k1}
#Fit parameters
ridgeregr1 = GridSearchCV(ridgeregr, param_grid=parameters, cv=kf)
#Fit the tunned classifier in the traiing space
ridgeregr1.fit(X_train, y_train)
#Print the best parameters
print(ridgeregr1.best_params_)
# Fit the model.
ridgeregr1.fit(X_test, y_test)
#Predict the value of y
y_pred = ridgeregr1.predict(X_test)
#Mean squared error of the regressor
rid_mse = mean_squared_error(y_pred, y_test)
rid_rmse = np.sqrt(rid_mse)
#Accuracy of the regressor
accridg = cross_val_score(ridgeregr1, X_test, y_test, cv=kf).mean()
accridgpca = cross_val_score(ridgeregr1, XPCA, Y, cv=kf).mean()
print('RMSE: %.4f' % rid_rmse)
print('Ridge Regression Accuracy: %.4f' % accridg)
print('Ridge Regression Accuracy PCA: %.4f' % accridgpca)
"""
Explanation: Ridge Regression
End of explanation
"""
#Initialize and fit the model
abc = RandomForestRegressor(n_jobs=-1)
#Create range of values to fit parameters
k2 = ['mse','mae']
k4 = ['auto','log2']
k3 = [150, 200,250, 300]
parameters = { 'criterion': k2,
'n_estimators': k3,
'max_features': k4
}
#Fit parameters
clf = GridSearchCV(abc, param_grid=parameters, cv=kf)
clf.fit(X_train,y_train)
#The best hyper parameters set
print("Best Hyper Parameters:", clf.best_params_)
#Predict the value of y
y_pred = clf.predict(X_test)
#Mean squared error of the regressor
rf_mse = mean_squared_error(y_pred, y_test)
rf_rmse = np.sqrt(rf_mse)
#Accuracy of the regressor
accrf = cross_val_score(clf, X_test, y_test, cv=kf).mean()
accrfpca = cross_val_score(clf, XPCA, Y, cv=kf).mean()
print('RMSE: %.4f' % rf_rmse)
print('Random Forest Accuracy: %.4f' % accrf)
print('Random Forest Accuracy PCA: %.4f' % accrfpca)
"""
Explanation: Random Forest
End of explanation
"""
#Initialize and fit the model
rf = DecisionTreeRegressor()
rfA = AdaBoostRegressor()
#Create range of values to fit parameters
k1 =['best']
k2 = ['mse','friedman_mse']
k3 = [100, 150, 200]
k4 = ['auto','sqrt']
parameters = {'criterion': k2,
'splitter': k1,
'max_features': k4
}
parameters2 = {'n_estimators':k3}
#Fit parameters
rf1 = GridSearchCV(rf, param_grid=parameters, cv=kf)
rf2 = GridSearchCV(rfA, param_grid=parameters2, cv=kf)
rf1.fit(X_train,y_train)
rf2.fit(X_train,y_train)
#The best hyper parameters set
print("Best Hyper Parameters:", rf1.best_params_)
print("Best Hyper Parameters:", rf2.best_params_)
#Predict the value of y
y_pred = rf1.predict(X_test)
y_pred2 = rf2.predict(X_test)
#Mean squared error of the regressor
dt_mse = mean_squared_error(y_pred, y_test)
dt_rmse = np.sqrt(dt_mse)
ada_mse2 = mean_squared_error(y_pred2, y_test)
ada_rmse2 = np.sqrt(ada_mse2)
#Accuracy of the regressor
accdt = cross_val_score(rf1, X_test, y_test, cv=kf).mean()
accada = cross_val_score(rf2, X_test, y_test, cv=kf).mean()
#Accuracy of the regressor
accdtpca = cross_val_score(rf1, XPCA, Y, cv=kf).mean()
accadapca = cross_val_score(rf2, XPCA, Y, cv=kf).mean()
print('RMSE: %.4f' % dt_rmse)
print('RMSE: %.4f' % ada_rmse2)
print('Decision Tree Accuracy: %.4f' % accdt)
print('Decision Tree Accuracy PCA: %.4f' % accdtpca)
print('Ada Boost Accuracy: %.4f' % accada)
print('Ada Boost Accuracy PCA: %.4f' % accadapca)
"""
Explanation: Decision Tree & Ada Boost Regressor
End of explanation
"""
# Train model
GBR = GradientBoostingRegressor()
k2 = ['ls','huber']
k4 = [200,300, 350]
parameters = { 'loss': k2,
'n_estimators': k4}
#Fit parameters
GBR1 = GridSearchCV(GBR, param_grid=parameters, cv=kf)
GBR1.fit(X_train, y_train)
#The best hyper parameters set
print("Best Hyper Parameters:", GBR1.best_params_)
#Predict the value of y
y_pred = GBR1.predict(X_test)
#Mean squared error of the regressor
gb_mse = mean_squared_error(y_pred, y_test)
gb_rmse = np.sqrt(gb_mse)
#Accuracy of the regressor
accgd = cross_val_score(GBR1, X_test, y_test, cv=kf).mean()
accgdpca = cross_val_score(GBR1, XPCA, Y, cv=kf).mean()
print('RMSE: %.4f' % gb_rmse)
print('Gradient Boosting Regressor Accuracy: %.4f' % accgd)
print('Gradient Boosting Regressor Accuracy PCA: %.4f' % accgdpca)
"""
Explanation: Gradient Boosting
End of explanation
"""
#Initialize and fit the model
KNR = KNeighborsRegressor(n_jobs=-1)
#Create range of values to fit parameters
#Create range of values to fit parameters
k1 = [17,19,21,23,25]
k3 = ['distance']
parameters = {'n_neighbors': k1, 'weights':k3}
#Fit parameters
KNR1 = GridSearchCV(KNR, param_grid=parameters, cv=kf)
KNR1.fit(X_train,y_train)
#The best hyper parameters set
print("Best Hyper Parameters:", KNR1.best_params_)
#Predict the value of y
y_pred = KNR1.predict(X_test)
#Mean squred error of the regressor
KNR_mse = mean_squared_error(y_pred, y_test)
KNR_rmse = np.sqrt(KNR_mse)
#Accuracy of the regressor
accKNR1 = cross_val_score(KNR1, X_test, y_test, cv=kf).mean()
accKNR1pca = cross_val_score(KNR1, XPCA, Y, cv=kf).mean()
print('RMSE: %.4f' % KNR_rmse)
print('KN Regressor Accuracy: %.4f' % accKNR1)
print('KN Regressor Accuracy PCA: %.4f' % accKNR1pca)
"""
Explanation: KNN Regressor
End of explanation
"""
#Initialize and fit the model
SVR0 = SVR()
#Create range of values to fit parameters
k1 = [1, 10, 100,]
k3 = ['linear', 'poly', 'rbf', 'sigmoid']
parameters = {'C': k1,
'kernel':k3}
#Fit parameters
SVR1 = GridSearchCV(SVR0, param_grid=parameters, cv=kf)
SVR1.fit(X_train,y_train)
#The best hyper parameters set
print("Best Hyper Parameters:", SVR1.best_params_)
#Predict the value of y
y_pred = SVR1.predict(X_test)
#Mean squared error of the regressor
svr_mse = mean_squared_error(y_pred, y_test)
svr_rmse = np.sqrt(svr_mse)
#Accuracy of the regressor
accsvr1 = cross_val_score(SVR1, X_test, y_test, cv=kf).mean()
accsvr1pca = cross_val_score(SVR1, XPCA, Y, cv=kf).mean()
print('RMSE: %.4f' % svr_rmse)
print('SVR Accuracy: %.4f' % accsvr1)
print('SVR Accuracy: %.4f' % accsvr1pca)
"""
Explanation: SVR
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.16/_downloads/plot_source_alignment.ipynb | bsd-3-clause | import os.path as op
import numpy as np
from mayavi import mlab
import mne
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
raw = mne.io.read_raw_fif(raw_fname)
trans = mne.read_trans(trans_fname)
src = mne.read_source_spaces(op.join(subjects_dir, 'sample', 'bem',
'sample-oct-6-src.fif'))
"""
Explanation: Source alignment and coordinate frames
The aim of this tutorial is to show how to visually assess that the data are
well aligned in space for computing the forward solution, and understand
the different coordinate frames involved in this process.
:depth: 2
Let's start out by loading some data.
End of explanation
"""
mne.viz.plot_alignment(raw.info, trans=trans, subject='sample',
subjects_dir=subjects_dir, surfaces='head-dense',
show_axes=True, dig=True, eeg=[], meg='sensors',
coord_frame='meg')
mlab.view(45, 90, distance=0.6, focalpoint=(0., 0., 0.))
print('Distance from head origin to MEG origin: %0.1f mm'
% (1000 * np.linalg.norm(raw.info['dev_head_t']['trans'][:3, 3])))
print('Distance from head origin to MRI origin: %0.1f mm'
% (1000 * np.linalg.norm(trans['trans'][:3, 3])))
"""
Explanation: Understanding coordinate frames
For M/EEG source imaging, there are three coordinate frames that we must
bring into alignment using two 3D transformation matrices <trans_matrices>_
that define how to rotate and translate points in one coordinate frame
to their equivalent locations in another.
:func:mne.viz.plot_alignment is a very useful function for inspecting
these transformations, and the resulting alignment of EEG sensors, MEG
sensors, brain sources, and conductor models. If the subjects_dir and
subject parameters are provided, the function automatically looks for the
Freesurfer MRI surfaces to show from the subject's folder.
We can use the show_axes argument to see the various coordinate frames
given our transformation matrices. These are shown by axis arrows for each
coordinate frame:
shortest arrow is (R)ight/X
medium is forward/(A)nterior/Y
longest is up/(S)uperior/Z
i.e., a RAS coordinate system in each case. We can also set
the coord_frame argument to choose which coordinate
frame the camera should initially be aligned with.
Let's take a look:
End of explanation
"""
mne.viz.plot_alignment(raw.info, trans=None, subject='sample', src=src,
subjects_dir=subjects_dir, dig=True,
surfaces=['head-dense', 'white'], coord_frame='meg')
"""
Explanation: Coordinate frame definitions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. raw:: html
<style>
.pink {color:DarkSalmon; font-weight:bold}
.blue {color:DeepSkyBlue; font-weight:bold}
.gray {color:Gray; font-weight:bold}
.magenta {color:Magenta; font-weight:bold}
.purple {color:Indigo; font-weight:bold}
.green {color:LimeGreen; font-weight:bold}
.red {color:Red; font-weight:bold}
</style>
.. role:: pink
.. role:: blue
.. role:: gray
.. role:: magenta
.. role:: purple
.. role:: green
.. role:: red
Neuromag head coordinate frame ("head", :pink:pink axes)
Defined by the intersection of 1) the line between the LPA
(:red:red sphere) and RPA (:purple:purple sphere), and
2) the line perpendicular to this LPA-RPA line one that goes through
the Nasion (:green:green sphere).
The axes are oriented as X origin→RPA, Y origin→Nasion,
Z origin→upward (orthogonal to X and Y).
.. note:: This gets defined during the head digitization stage during
acquisition, often by use of a Polhemus or other digitizer.
MEG device coordinate frame ("meg", :blue:blue axes)
This is defined by the MEG manufacturers. From the Elekta user manual:
The origin of the device coordinate system is located at the center
of the posterior spherical section of the helmet with axis going
from left to right and axis pointing front. The axis is, again
normal to the plane with positive direction up.
.. note:: The device is coregistered with the head coordinate frame
during acquisition via emission of sinusoidal currents in
head position indicator (HPI) coils
(:magenta:magenta spheres) at the beginning of the
recording. This is stored in raw.info['dev_head_t'].
MRI coordinate frame ("mri", :gray:gray axes)
Defined by Freesurfer, the MRI (surface RAS) origin is at the
center of a 256×256×256 1mm anisotropic volume (may not be in the center
of the head).
.. note:: This is aligned to the head coordinate frame that we
typically refer to in MNE as trans.
A bad example
Let's try using trans=None, which (incorrectly!) equates the MRI
and head coordinate frames.
End of explanation
"""
mne.viz.plot_alignment(raw.info, trans=trans, subject='sample',
src=src, subjects_dir=subjects_dir, dig=True,
surfaces=['head-dense', 'white'], coord_frame='meg')
"""
Explanation: It is quite clear that the MRI surfaces (head, brain) are not well aligned
to the head digitization points (dots).
A good example
Here is the same plot, this time with the trans properly defined
(using a precomputed matrix).
End of explanation
"""
# mne.gui.coregistration(subject='sample', subjects_dir=subjects_dir)
"""
Explanation: Defining the head↔MRI trans using the GUI
You can try creating the head↔MRI transform yourself using
:func:mne.gui.coregistration.
First you must load the digitization data from the raw file
(Head Shape Source). The MRI data is already loaded if you provide the
subject and subjects_dir. Toggle Always Show Head Points to see
the digitization points.
To set the landmarks, toggle Edit radio button in MRI Fiducials.
Set the landmarks by clicking the radio button (LPA, Nasion, RPA) and then
clicking the corresponding point in the image.
After doing this for all the landmarks, toggle Lock radio button. You
can omit outlier points, so that they don't interfere with the finetuning.
.. note:: You can save the fiducials to a file and pass
mri_fiducials=True to plot them in
:func:mne.viz.plot_alignment. The fiducials are saved to the
subject's bem folder by default.
* Click Fit Head Shape. This will align the digitization points to the
head surface. Sometimes the fitting algorithm doesn't find the correct
alignment immediately. You can try first fitting using LPA/RPA or fiducials
and then align according to the digitization. You can also finetune
manually with the controls on the right side of the panel.
* Click Save As... (lower right corner of the panel), set the filename
and read it with :func:mne.read_trans.
For more information, see step by step instructions
in these slides
<http://www.slideshare.net/mne-python/mnepython-coregistration>_.
Uncomment the following line to align the data yourself.
End of explanation
"""
sphere = mne.make_sphere_model(info=raw.info, r0='auto', head_radius='auto')
src = mne.setup_volume_source_space(sphere=sphere, pos=10.)
mne.viz.plot_alignment(
raw.info, eeg='projected', bem=sphere, src=src, dig=True,
surfaces=['brain', 'outer_skin'], coord_frame='meg', show_axes=True)
"""
Explanation: Alignment without MRI
The surface alignments above are possible if you have the surfaces available
from Freesurfer. :func:mne.viz.plot_alignment automatically searches for
the correct surfaces from the provided subjects_dir. Another option is
to use a spherical conductor model. It is passed through bem parameter.
End of explanation
"""
|
tuwien-musicir/rp_extract | RP_extract_Tutorial.ipynb | gpl-3.0 | # to install iPython notebook on your computer, use this in Terminal
sudo pip install "ipython[notebook]"
"""
Explanation: <center><h1>Rhythm and Timbre Analysis from Music</h1></center>
<center><h2>Rhythm Pattern Music Features</h2></center>
<center><h2>Extraction and Application Tutorial</h2></center>
<br>
<center><h3>Thomas Lidy and Alexander Schindler</h3>
<h3>lidy@ifs.tuwien.ac.at</h3>
<br>
<b>Institute of Software Technology and Interactive Systems</b><br>TU Wien
<br>
<center><h3>http://www.ifs.tuwien.ac.at/mir</h3></center>
<br>
<br>
Table of Contents
<a href="#requirements">Requirements</a>
<a href="#processing">Audio Processing</a>
<a href="#extraction">Audio Feature Extraction</a>
<a href="#application">Application Scenarios</a><br>
4.1 <a href="#getsoundcloud">Getting Songs from Soundcloud</a><br>
4.2. <a href="#similar">Finding Similar Sounding Songs</a>
<a name="requirements"><font color="#0404B4">1. Requirements</font></a>
This Tutorial uses iPython Notebook for interactive coding. If you use iPython Notebook, you can interactively execute your code (and the code here in the tutorial) directly in the Web browser. Otherwise you can copy & paste code from here to your prefered Python editor.
End of explanation
"""
# in Terminal
git clone https://github.com/tuwien-musicir/rp_extract.git
"""
Explanation: RP Extract Library
This is our mean library for rhythmic and timbral audio feature analysis:
<ul>
<li><a href="https://github.com/tuwien-musicir/rp_extract">RP_extract</a> Rhythm Patterns Audio Feature Extraction Library (includes <a href="https://github.com/WarrenWeckesser/wavio">Wavio</a> for reading wav files (incl. 24 bit)) </li>
</ul>
download <a href="https://github.com/tuwien-musicir/rp_extract/archive/master.zip">ZIP</a> or check out from GitHub:
End of explanation
"""
# in Terminal
sudo pip install numpy scipy matplotlib
"""
Explanation: Python Libraries
RP_extract depends on the following libraries. If not already included in your Python installation,
please install these Python libraries using pip or easy_install:
<ul>
<li><a href="http://www.numpy.org/">Numpy</a>: the fundamental package for scientific computing with Python. It implements a wide range of fast and powerful algebraic functions.</li>
<li><a href="http://www.scipy.org/install.html">Scipy</a>: Scientific Python library</li>
<li><a href="http://matplotlib.org">matplotlib</a>: only needed for plotting (if you skipt the plots below, you are fine without) </li>
</ul>
They can usually be installed via Python PIP installer on command line:
End of explanation
"""
# in Terminal
sudo pip install soundcloud urllib unicsv scikit-learn
git clone https://github.com/tuwien-musicir/mir_utils.git
"""
Explanation: Additional Libraries
These libraries are used in the later tutorial steps, but not necessarily needed if you want to use the RP_extract library alone:
<ul>
<li><a href="https://github.com/tuwien-musicir/mir_utils">mir_utils</a>: these are additional functions used for the Soundcloud Demo data set in the tutorial below</li>
<li><a href="https://developers.soundcloud.com">Soundcloud API</a>: used to retrieve and analyze music from Soundcloud.com</li>
<li>urllib: for downloading content from the web (may be pre-installed already, then you can skip it)</li>
<li><a href="https://pypi.python.org/pypi/unicsv/1.0.0">unicsv</a>: used in rp_extract_files.py for batch iteration over many wav or mp3 files, and storing features in CSV (only needed when you want to do batch feature extraction to CSV)</li>
<li><a href="http://scikit-learn.org/stable/">sklearn</a>: Scikit-Learn machine learning package - used in later tutorial steps for finding similar songs and/or using machine learning / classification
</ul>
End of explanation
"""
import os
path = '/path/to/ffmpeg/'
os.environ['PATH'] += os.pathsep + path
"""
Explanation: MP3 Decoder
If you want to use MP3 files as input, you need to have one of the following MP3 decoders installed in your system:
<ul>
<li>Windows: FFMpeg (ffmpeg.exe is included in RP_extract library on Github above, nothing to install)</li>
<li>Mac: <a href="http://www.thalictrum.com/en/products/lame.html">Lame for Mac</a> or <a href="http://ffmpegmac.net">FFMPeg for Mac</a></li>
<li>Linux: please install mpg123, lame or ffmpeg from your Software Install Center or Package Repository</li>
</ul>
Note: If you don't install it to a path which can be found by the operating system, use this to add path where you installed the MP3 decoder binary to your system PATH so Python can call it:
End of explanation
"""
%pylab inline
import warnings
warnings.filterwarnings('ignore')
%load_ext autoreload
%autoreload 2
# numerical processing and scientific libraries
import numpy as np
# plotting
import matplotlib.pyplot as plt
# reading wav and mp3 files
from audiofile_read import * # included in the rp_extract git package
# Rhythm Pattern Audio Extraction Library
from rp_extract import rp_extract
from rp_plot import * # can be skipped if you don't want to do any plots
# misc
from urllib import urlopen
import urllib2
import gzip
import StringIO
"""
Explanation: Import + Test your Environment
If you have installed all required libraries, the follwing imports should run without errors.
End of explanation
"""
# provide/adjust the path to your wav or mp3 file
audiofile = "music/test.wav"
samplerate, samplewidth, wavedata = audiofile_read(audiofile)
"""
Explanation: <a name="processing"><font color="#0404B4">2. Audio Processing</font></a>
Feature Extraction is the core of content-based description of audio files. With feature extraction from audio, a computer is able to recognize the content of a piece of music without the need of annotated labels such as artist, song title or genre. This is the essential basis for information retrieval tasks, such as similarity based searches (query-by-example, query-by-humming, etc.), automatic classification into categories, or automatic organization and clustering of music archives.
Content-based description requires the development of feature extraction techniques that analyze the acoustic characteristics of the signal. Features extracted from the audio signal are intended to describe the stylistic content of the music, e.g. beat, presence of voice, timbre, etc.
We use methods from digital signal processing and consider psycho-acoustic models in order to extract suitable semantic information from music. We developed various feature sets, which are appropriate for different tasks.
Load Audio Files
Load audio data from wav or mp3 file
We provide a library (audiofile_read.py) that is capable of reading WAV and MP3 files (MP3 through an external decoder, see Installation Requirements above).
Take any MP3 or WAV file on your disk - or download one from e.g. <a href="http://freemusicarchive.org">freemusicarchive.org</a>.
End of explanation
"""
nsamples = wavedata.shape[0]
nchannels = wavedata.shape[1]
print "Successfully read audio file:", audiofile
print samplerate, "Hz,", samplewidth*8, "bit,", nchannels, "channel(s),", nsamples, "samples"
"""
Explanation: <b>Note about Normalization:</b> Normalization is automatically done by audiofile_read() above.
Usually, an audio file stores integer values for the samples. However, for audio processing we need float values that's why the audiofile_read library already converts the input data to float values in the range of (-1,1).
This is taken care of by audiofile_read. In the rare case you don't want to normalize, use this line instead of the one above:
samplerate, samplewidth, wavedata = audiofile_read(audiofile, normalize=False)
In case you use another library to read in WAV files (such as scipy.io.wavfile.read) please have a look into audiofile_read code to do the normalization in the same way. Note that scipy.io.wavfile.read does not correctly read 24bit WAV files.
Audio Information
Let's print some information about the audio file just read:
End of explanation
"""
max_samples_plot = 4 * samplerate # limit number of samples to plot (to 4 sec), to avoid graphical overflow
if nsamples < max_samples_plot:
max_samples_plot = nsamples
plot_waveform(wavedata[0:max_samples_plot], 16, 5);
"""
Explanation: Plot Wave form
we use this to check if the WAV or MP3 file has been correctly loaded
End of explanation
"""
# use combine the channels by calculating their geometric mean
wavedata_mono = np.mean(wavedata, axis=1)
"""
Explanation: Audio Pre-processing
For audio processing and feature extraction, we use a single channel only.
Therefore in case we have a stereo signal, we combine the separate channels:
End of explanation
"""
plot_waveform(wavedata_mono[0:max_samples_plot], 16, 3)
plotstft(wavedata_mono, samplerate, binsize=512, ignore=True);
"""
Explanation: Below an example waveform of a mono channel after combining the stereo channels by arithmetic mean:
End of explanation
"""
features = rp_extract(wavedata, # the two-channel wave-data of the audio-file
samplerate, # the samplerate of the audio-file
extract_rp = True, # <== extract this feature!
transform_db = True, # apply psycho-accoustic transformation
transform_phon = True, # apply psycho-accoustic transformation
transform_sone = True, # apply psycho-accoustic transformation
fluctuation_strength_weighting=True, # apply psycho-accoustic transformation
skip_leadin_fadeout = 1, # skip lead-in/fade-out. value = number of segments skipped
step_width = 1) #
plotrp(features['rp'])
"""
Explanation: <a name="extraction"><font color="#0404B4">3. Audio Feature Extraction</font></a>
Rhythm Patterns
<img width="350" src="http://www.ifs.tuwien.ac.at/mir/audiofeatureextraction/feature_extraction_RP_SSD_RH_web.png" style="float:right;margin-left:20px;margin-bottom:20px">
Rhythm Patterns (also called Fluctuation Patterns) describe modulation amplitudes for a range of modulation frequencies on "critical bands" of the human auditory range, i.e. fluctuations (or rhythm) on a number of frequency bands. The feature extraction process for the Rhythm Patterns is composed of two stages:
First, the specific loudness sensation in different frequency bands is computed, by using a Short Time FFT, grouping the resulting frequency bands to psycho-acoustically motivated critical-bands, applying spreading functions to account for masking effects and successive transformation into the decibel, Phon and Sone scales. This results in a power spectrum that reflects human loudness sensation (Sonogram).
In the second step, the spectrum is transformed into a time-invariant representation based on the modulation frequency, which is achieved by applying another discrete Fourier transform, resulting in amplitude modulations of the loudness in individual critical bands. These amplitude modulations have different effects on human hearing sensation depending on their frequency, the most significant of which, referred to as fluctuation strength, is most intense at 4 Hz and decreasing towards 15 Hz. From that data, reoccurring patterns in the individual critical bands, resembling rhythm, are extracted, which – after applying Gaussian smoothing to diminish small variations – result in a time-invariant, comparable representation of the rhythmic patterns in the individual critical bands.
End of explanation
"""
features = rp_extract(wavedata, # the two-channel wave-data of the audio-file
samplerate, # the samplerate of the audio-file
extract_ssd = True, # <== extract this feature!
transform_db = True, # apply psycho-accoustic transformation
transform_phon = True, # apply psycho-accoustic transformation
transform_sone = True, # apply psycho-accoustic transformation
fluctuation_strength_weighting=True, # apply psycho-accoustic transformation
skip_leadin_fadeout = 1, # skip lead-in/fade-out. value = number of segments skipped
step_width = 1) #
plotssd(features['ssd'])
"""
Explanation: Statistical Spectrum Descriptor
The Sonogram is calculated as in the first part of the Rhythm Patterns calculation. According to the occurrence of beats or other rhythmic variation of energy on a specific critical band, statistical measures are able to describe the audio content. Our goal is to describe the rhythmic content of a piece of audio by computing the following statistical moments on the Sonogram values of each of the critical bands:
mean, median, variance, skewness, kurtosis, min- and max-value
End of explanation
"""
features = rp_extract(wavedata, # the two-channel wave-data of the audio-file
samplerate, # the samplerate of the audio-file
extract_rh = True, # <== extract this feature!
transform_db = True, # apply psycho-accoustic transformation
transform_phon = True, # apply psycho-accoustic transformation
transform_sone = True, # apply psycho-accoustic transformation
fluctuation_strength_weighting=True, # apply psycho-accoustic transformation
skip_leadin_fadeout = 1, # skip lead-in/fade-out. value = number of segments skipped
step_width = 1) #
plotrh(features['rh'])
"""
Explanation: Rhythm Histogram
The Rhythm Histogram features we use are a descriptor for general rhythmics in an audio document. Contrary to the Rhythm Patterns and the Statistical Spectrum Descriptor, information is not stored per critical band. Rather, the magnitudes of each modulation frequency bin of all critical bands are summed up, to form a histogram of "rhythmic energy" per modulation frequency. The histogram contains 60 bins which reflect modulation frequency between 0 and 10 Hz. For a given piece of audio, the Rhythm Histogram feature set is calculated by taking the median of the histograms of every 6 second segment processed.
End of explanation
"""
maxbin = features['rh'].argmax(axis=0) + 1 # +1 because it starts from 0
mod_freq_res = 1.0 / (2**18/44100.0) # resolution of modulation frequency axis (0.168 Hz) (= 1/(segment_size/samplerate))
#print mod_freq_res * 60 # resolution
bpm = maxbin * mod_freq_res * 60
print bpm
"""
Explanation: Get rough BPM from Rhythm Histogram
By looking at the maximum peak of a Rhythm Histogram, we can determine the beats per minute (BPM) very roughly by multiplying the Index of the Rhythm Histogram bin by the modulation frequency resolution (0.168 Hz) * 60. The resolution of this is however only at +/- 10 bpm.
End of explanation
"""
# adapt the fext array to your needs:
fext = ['rp','ssd','rh','mvd'] # sh, tssd, trh
features = rp_extract(wavedata,
samplerate,
extract_rp = ('rp' in fext), # extract Rhythm Patterns features
extract_ssd = ('ssd' in fext), # extract Statistical Spectrum Descriptor
extract_sh = ('sh' in fext), # extract Statistical Histograms
extract_tssd = ('tssd' in fext), # extract temporal Statistical Spectrum Descriptor
extract_rh = ('rh' in fext), # extract Rhythm Histogram features
extract_trh = ('trh' in fext), # extract temporal Rhythm Histogram features
extract_mvd = ('mvd' in fext), # extract Modulation Frequency Variance Descriptor
spectral_masking=True,
transform_db=True,
transform_phon=True,
transform_sone=True,
fluctuation_strength_weighting=True,
skip_leadin_fadeout=1,
step_width=1)
# let's see what we got in our dict
print features.keys()
# list the feature type dimensions
for k in features.keys():
print k, features[k].shape
"""
Explanation: Modulation Frequency Variance Descriptor
This descriptor measures variations over the critical frequency bands for a specific modulation frequency (derived from a rhythm pattern).
Considering a rhythm pattern, i.e. a matrix representing the amplitudes of 60 modulation frequencies on 24 critical bands, an MVD vector is derived by computing statistical measures (mean, median, variance, skewness, kurtosis, min and max) for each modulation frequency over the 24 bands. A vector is computed for each of the 60 modulation frequencies. Then, an MVD descriptor for an audio file is computed by the mean of multiple MVDs from the audio file's segments, leading to a 420-dimensional vector.
Temporal Statistical Spectrum Descriptor
Feature sets are frequently computed on a per segment basis and do not incorporate time series aspects. As a consequence, TSSD features describe variations over time by including a temporal dimension. Statistical measures (mean, median, variance, skewness, kurtosis, min and max) are computed over the individual statistical spec- trum descriptors extracted from segments at different time positions within a piece of audio. This captures timbral variations and changes over time in the audio spectrum, for all the critical Bark-bands. Thus, a change of rhythmic, instruments, voices, etc. over time is reflected by this feature set. The dimension is 7 times the dimension of an SSD (i.e. 1176).
Temporal Rhythm Histograms
Statistical measures (mean, median, variance, skewness, kurtosis, min and max) are computed over the individual Rhythm Histograms extracted from various segments in a piece of audio. Thus, change and variation of rhythmic aspects in time are captured by this descriptor.
Extract All Features
To extract ALL or selected ones of the before described features, you can use this command:
End of explanation
"""
# START SOUNDCLOUD API
import soundcloud
import urllib # for mp3 download
# To use soundcloud-python, you must first create a Client instance, passing at a minimum the client id you
# obtained when you registered your app:
# If you only need read-only access to public resources, simply provide a client id when creating a Client instance:
my_client_id= 'insert your soundcloud client id here'
client = soundcloud.Client(client_id=my_client_id)
# if there is no error after this, it should have worked
"""
Explanation: <a name="application"><font color="#0404B4">4. Application Scenarios</font></a>
Analyze Songs from Soundcloud
<a name="getsoundcloud"><font color="#0404B4">4.1. Getting Songs from Soundcloud</font></a>
In this step we are going to analyze songs from Soundcloud, using the Soundcloud API.
Please get your own API key first by clicking "Register New App" on <a href="https://developers.soundcloud.com">https://developers.soundcloud.com</a>.
Then we can start using the Soundcloud API:
End of explanation
"""
# GET TRACK INFO
#soundcloud_url = 'http://soundcloud.com/forss/flickermood'
soundcloud_url = 'https://soundcloud.com/majorlazer/be-together-feat-wild-belle'
track = client.get('/resolve', url=soundcloud_url)
print "TRACK ID:", track.id
print "Title:", track.title
print "Artist: ", track.user['username']
print "Genre: ", track.genre
print track.bpm, "bpm"
print track.playback_count, "times played"
print track.download_count, "times downloaded"
print "Downloadable?", track.downloadable
# if you want to see all information contained in 'track':
print vars(track)
"""
Explanation: Get Track Info
End of explanation
"""
if hasattr(track, 'download_url'):
print track.download_url
print track.stream_url
stream = client.get('/tracks/%d/streams' % track.id)
#print vars(stream)
print stream.http_mp3_128_url
"""
Explanation: Get Track URLs
End of explanation
"""
# set the MP3 download directory
mp3_dir = './music'
mp3_file = mp3_dir + os.sep + "%s.mp3" % track.title
# Download the 128 kbit stream MP3
urllib.urlretrieve (stream.http_mp3_128_url, mp3_file)
print "Downloaded " + mp3_file
"""
Explanation: Download Preview MP3
End of explanation
"""
# use your own soundcloud urls here
soundcloud_urls = [
'https://soundcloud.com/absencemusik/lana-del-rey-born-to-die-absence-remix',
'https://soundcloud.com/princefoxmusic/raindrops-feat-kerli-prince-fox-remix',
'https://soundcloud.com/octobersveryown/remyboyz-my-way-rmx-ft-drake'
]
mp3_dir = './music'
mp3_files = []
own_track_ids = []
for url in soundcloud_urls:
print url
track = client.get('/resolve', url=url)
mp3_file = mp3_dir + os.sep + "%s.mp3" % track.title
mp3_files.append(mp3_file)
own_track_ids.append(track.id)
stream = client.get('/tracks/%d/streams' % track.id)
if hasattr(stream, 'http_mp3_128_url'):
mp3_url = stream.http_mp3_128_url
elif hasattr(stream, 'preview_mp3_128_url'): # if we cant get the full mp3 we take the 1:30 preview
mp3_url = stream.preview_mp3_128_url
else:
print "No MP3 can be downloaded for this song."
mp3_url = None # in this case we can't get an mp3
if not mp3_url == None:
urllib.urlretrieve (mp3_url, mp3_file) # Download the 128 kbit stream MP3
print "Downloaded " + mp3_file
# show list of mp3 files we got:
# print mp3_files
"""
Explanation: Iterate over a List of Soundcloud Tracks
This will take a number of Souncloud URLs and get the track info for them and download the mp3 stream if available.
End of explanation
"""
# mp3_files is the list of downloaded Soundcloud files as stored above (mp3_files.append())
# all_features will be a list of dict entries for all files
all_features = []
for mp3 in mp3_files:
# Read the Audio file
samplerate, samplewidth, wavedata = audiofile_read(mp3)
print "Successfully read audio file:", mp3
nsamples = wavedata.shape[0]
nchannels = wavedata.shape[1]
print samplerate, "Hz,", samplewidth*8, "bit,", nchannels, "channel(s),", nsamples, "samples"
# Extract the Audio Features
# (adapt the fext array to your needs)
fext = ['rp','ssd','rh','mvd'] # sh, tssd, trh
features = rp_extract(wavedata,
samplerate,
extract_rp = ('rp' in fext), # extract Rhythm Patterns features
extract_ssd = ('ssd' in fext), # extract Statistical Spectrum Descriptor
extract_sh = ('sh' in fext), # extract Statistical Histograms
extract_tssd = ('tssd' in fext), # extract temporal Statistical Spectrum Descriptor
extract_rh = ('rh' in fext), # extract Rhythm Histogram features
extract_trh = ('trh' in fext), # extract temporal Rhythm Histogram features
extract_mvd = ('mvd' in fext), # extract Modulation Frequency Variance Descriptor
)
all_features.append(features)
print "Finished analyzing", len(mp3_files), "files."
"""
Explanation: <a name="soundcloudanalysis"><font color="#0404B4">4.2. Analyzing Songs from Soundcloud</font></a>
Analyze the previously loaded Songs
Now this combines reading all the MP3s we've got and analyzing the features
End of explanation
"""
# iterates over all featuers (files) we extracted
for feat in all_features:
plotrp(feat['rp'])
plotrh(feat['rh'])
maxbin = feat['rh'].argmax(axis=0) + 1 # +1 because it starts from 0
bpm = maxbin * mod_freq_res * 60
print "roughly", round(bpm), "bpm"
"""
Explanation: <b>Note:</b> also see source file <b>rp_extract_files.py</b> on how to iterate over ALL mp3 or wav files in a directory.
Look at the results
End of explanation
"""
# currently this does not work
genre = 'Dancehall'
curr_offset = 0 # Note: the API has a limit of 50 items per response, so to get more you have to query multiple times with an offset.
tracks = client.get('/tracks', genres=genre, offset=curr_offset)
print "Retrieved", len(tracks), "track objects data"
# original Soundcloud example, searching for genre and bpm
# currently this does not work
tracks = client.get('/tracks', genres='punk', bpm={'from': 120})
"""
Explanation: Further Example: Get a list of tracks by Genre
This is an example on how to retrieve Songs from Soundcloud by genre and/or bpm.
currently this does not work ... (issue on Soundcloud side?)
End of explanation
"""
# IMPORTING mir_utils (installed from git above in parallel to rp_extract (otherwise ajust path))
import sys
sys.path.append("../mir_utils")
from demo.NotebookUtils import *
from demo.PlottingUtils import *
from demo.Soundcloud_Demo_Dataset import SoundcloudDemodatasetHandler
# IMPORTS for NearestNeighbor Search
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors
"""
Explanation: <a name="similar"><font color="#0404B4">4.3. Finding Similar Sounding Songs</font></a>
In these application scenarios we try to find similar songs or classify music into different categories.
For these Use Cases we need to import a few additional functions from the sklearn package and from mir_utils (installed from git above in parallel to rp_extract):
End of explanation
"""
# show the data set as Souncloud playlist
iframe = '<iframe width="100%" height="450" scrolling="no" frameborder="no" src="https://w.soundcloud.com/player/?url=https%3A//api.soundcloud.com/playlists/106852365&auto_play=false&hide_related=false&show_comments=true&show_user=true&show_reposts=false&visual=false"></iframe>'
HTML(iframe)
"""
Explanation: The Soundcloud Demo Dataset
The Soundcloud Demo Dataset is a collection of commonly known mainstream radio songs hosted on the online streaming platform Soundcloud. The Dataset is available as playlist and is intended to be used to demonstrate the performance of MIR algorithms with the help of well known songs.
<!-- not working on Mac
<iframe width="100%" height="450" scrolling="no" frameborder="no" src="https://w.soundcloud.com/player/?url=https%3A//api.soundcloud.com/playlists/106852365&auto_play=false&hide_related=false&show_comments=true&show_user=true&show_reposts=false&visual=true"></iframe>
-->
End of explanation
"""
# first argument is local file path for downloaded MP3s and local metadata (if present, otherwise None)
scds = SoundcloudDemodatasetHandler(None, lazy=False)
"""
Explanation: The SoundcloudDemodatasetHandler abstracts the access to the TU-Wien server. On this server the extracted features are stored as csv-files. The SoundcloudDemodatasetHandler remotely loads the features and returns them by request. The features have been extracted using the method explained in the previous sections.
End of explanation
"""
# Initialize the similarity search object
sim_song_search = NearestNeighbors(n_neighbors = 6, metric='euclidean')
"""
Explanation: Finding rhythmically similar songs
End of explanation
"""
# set feature type
feature_set = 'rh'
# get features from Soundcloud demo set
demoset_features = scds.features[feature_set]["data"]
# Normalize the extracted features
scaled_feature_space = StandardScaler().fit_transform(demoset_features)
# Fit the Nearest-Neighbor search object to the extracted features
sim_song_search.fit(scaled_feature_space)
"""
Explanation: Finding rhythmically similar songs using Rhythm Histograms
End of explanation
"""
query_track_soundcloud_id = 68687842 # Mr. Saxobeat
HTML(scds.getPlayerHTMLForID(query_track_soundcloud_id))
"""
Explanation: Our query-song:
This is a query song from the pre-analyzed data set:
End of explanation
"""
query_track_feature_vector = scaled_feature_space[scds.features[feature_set]["ids"] == query_track_soundcloud_id]
"""
Explanation: Retrieve the feature vector for the query song
End of explanation
"""
(distances, similar_songs) = sim_song_search.kneighbors(query_track_feature_vector, return_distance=True)
print distances
print similar_songs
# For now we use only the song indices without distances
similar_songs = sim_song_search.kneighbors(query_track_feature_vector, return_distance=False)[0]
# because we are searching in the entire collection, the top-most result is the query song itself. Thus, we can skip it.
similar_songs = similar_songs[1:]
"""
Explanation: Search the nearest neighbors of the query-feature-vector
This retrieves the most similar song indices and their distance:
End of explanation
"""
similar_soundcloud_ids = scds.features[feature_set]["ids"][similar_songs]
print similar_soundcloud_ids
"""
Explanation: Lookup the corresponding Soundcloud-IDs
End of explanation
"""
SoundcloudTracklist(similar_soundcloud_ids, width=90, height=120, visual=False)
"""
Explanation: Listen to the results
End of explanation
"""
def search_similar_songs_by_id(query_song_id, feature_set, skip_query=True):
scaled_feature_space = StandardScaler().fit_transform(scds.features[feature_set]["data"])
sim_song_search.fit(scaled_feature_space);
query_track_feature_vector = scaled_feature_space[scds.features[feature_set]["ids"] == query_song_id]
similar_songs = sim_song_search.kneighbors(query_track_feature_vector, return_distance=False)[0]
if skip_query:
similar_songs = similar_songs[1:]
similar_soundcloud_ids = scds.features[feature_set]["ids"][similar_songs]
return similar_soundcloud_ids
similar_soundcloud_ids = search_similar_songs_by_id(query_track_soundcloud_id,
feature_set='rp')
SoundcloudTracklist(similar_soundcloud_ids, width=90, height=120, visual=False)
"""
Explanation: Finding rhythmically similar songs using Rhythm Patterns
This time we define a function that performs steps analogously to the RH retrieval above:
End of explanation
"""
similar_soundcloud_ids = search_similar_songs_by_id(query_track_soundcloud_id,
feature_set='ssd')
SoundcloudTracklist(similar_soundcloud_ids, width=90, height=120, visual=False)
"""
Explanation: Finding songs based on Timbral Similarity
Finding songs based on timbral similarity using Statistical Spectral Descriptors
End of explanation
"""
track_id = 68687842 # 40439758
results_track_1 = search_similar_songs_by_id(track_id, feature_set='ssd', skip_query=False)
results_track_2 = search_similar_songs_by_id(track_id, feature_set='rh', skip_query=False)
compareSimilarityResults([results_track_1, results_track_2],
width=100, height=120, visual=False,
columns=['Statistical Spectrum Descriptors', 'Rhythm Histograms'])
"""
Explanation: Compare the Results of Timbral and Rhythmic Similarity
First entry is query-track
End of explanation
"""
# check which files we got
mp3_files
# select from the list above the number of the song you want to use as a query (counting from 1)
song_id = 3 # count from 1
# select the feature vector type
feat_type = 'rp' # 'rh' or 'ssd' or 'rp'
# from the all_features data structure, we get the desired feature vector belonging to that song
query_feature_vector = all_features[song_id - 1][feat_type]
# get all the feature vectors of desired feature type from the Soundcloud demo set
demo_features = scds.features[feat_type]["data"]
# Initialize Neighbour Search space with demo set features
sim_song_search.fit(demo_features)
# use our own query_feature_vector for search in the demo set
(distances, similar_songs) = sim_song_search.kneighbors(query_feature_vector, return_distance=True)
print distances
print similar_songs
# now we got the song indices for similar songs in the demo set
similar_songs = similar_songs[0]
similar_songs
# and we get the according Soundcloud Track IDs
similar_soundcloud_ids = scds.features[feat_type]["ids"][similar_songs]
similar_soundcloud_ids
# we add our own Track ID at the beginning to show the seed song below:
my_track_id = own_track_ids[song_id - 1]
print my_track_id
result = np.insert(similar_soundcloud_ids,0,my_track_id)
"""
Explanation: Using your Own Query Song from the self-extracted Souncloud tracks above
End of explanation
"""
print "Feature Type:", feat_type
SoundcloudTracklist(result, width=90, height=120, visual=False)
"""
Explanation: Visual Player with the Songs most similar to our Own Song
first song is the query song
End of explanation
"""
def search_similar_songs_with_combined_sets(scds, query_song_id, feature_sets, skip_query=True, n_neighbors=6):
features = scds.getCombinedFeaturesets(feature_sets)
sim_song_search = NearestNeighbors(n_neighbors = n_neighbors, metric='l2')
#
scaled_feature_space = StandardScaler().fit_transform(features)
#
sim_song_search.fit(scaled_feature_space);
#
query_track_feature_vector = scaled_feature_space[scds.getFeatureIndexByID(query_song_id, feature_sets[0])]
#
similar_songs = sim_song_search.kneighbors(query_track_feature_vector, return_distance=False)[0]
if skip_query:
similar_songs = similar_songs[1:]
#
similar_soundcloud_ids = scds.getIdsByIndex(similar_songs, feature_sets[0])
return similar_soundcloud_ids
feature_sets = ['ssd','rh']
compareSimilarityResults([search_similar_songs_with_combined_sets(scds, 68687842, feature_sets=feature_sets, n_neighbors=5),
search_similar_songs_with_combined_sets(scds, 40439758, feature_sets=feature_sets, n_neighbors=5)],
width=100, height=120, visual=False,
columns=[scds.getNameByID(68687842),
scds.getNameByID(40439758)])
"""
Explanation: Add On: Combining different Music Descriptors
Here we merge SSD and RH features together to account for <b>both</b> timbral and rhythmic similarity:
End of explanation
"""
|
AllenDowney/ProbablyOverthinkingIt | generations.ipynb | mit | from __future__ import print_function, division
from thinkstats2 import Pmf, Cdf
import thinkstats2
import thinkplot
import pandas as pd
import numpy as np
from scipy.stats import entropy
%matplotlib inline
"""
Explanation: Do generations exist?
This notebook contains a "one-day paper", my attempt to pose a research question, answer it, and publish the results in one work day (May 13, 2016).
Copyright 2016 Allen B. Downey
MIT License: https://opensource.org/licenses/MIT
End of explanation
"""
dct = thinkstats2.ReadStataDct('GSS.dct')
"""
Explanation: What's a generation supposed to be, anyway?
If generation names like "Baby Boomers" and "Generation X" are just a short way of referring to people born during certain intervals, you can use them without implying that these categories have any meaningful properties.
But if these names are supposed to refer to generations with identifiable characteristics, we can test whether these generations exist. In this notebook, I suggest one way to formulate generations as a claim about the world, and test it.
Suppose we take a representative sample of people in the U.S., divide them into cohorts by year of birth, and measure the magnitude of the differences between consecutive cohorts. Of course, there are many ways we could define and measure these differences; I'll suggest one in a minute.
But ignoring the details for now, what would those difference look like if generations exist? Presumably, the differences between successive cohorts would be relatively small within each generation, and bigger between generations.
If we plot the cumulative total of these differences, we expect to see something like the figure below (left), with relatively fast transitions (big differences) between generations, and periods of slow change (small differences) within generations.
On the other hand, if there are no generations, we expect the differences between successive cohorts to be about the same. In that case the cumulative differences should look like a straight line, as in the figure below (right):
So, how should we quantify the differences between successive cohorts. When people talk about generational differences, they are often talking about differences in attitudes about political, social issues, and other cultural questions. Fortunately, these are exactly the sorts of things surveyed by the General Social Survey (GSS).
To gather data, I selected question from the GSS that were asked during the last three cycles (2010, 2012, 2014) and that were coded on a 5-point Likert scale.
You can see the variables that met these criteria, and download the data I used, here:
https://gssdataexplorer.norc.org/projects/13170/variables/data_cart
Now let's see what we got.
First I load the data dictionary, which contains the metadata:
End of explanation
"""
df = dct.ReadFixedWidth('GSS.dat')
"""
Explanation: Then I load the data itself:
End of explanation
"""
df.drop(['immcrime', 'pilloky'], axis=1, inplace=True)
"""
Explanation: I'm going to drop two variables that turned out to be mostly N/A
End of explanation
"""
df.ix[:, 3:] = df.ix[:, 3:].replace([8, 9, 0], np.nan)
df.head()
"""
Explanation: And then replace the special codes 8, 9, and 0 with N/A
End of explanation
"""
df.age.replace([99], np.nan, inplace=True)
"""
Explanation: For the age variable, I also have to replace 99 with N/A
End of explanation
"""
thinkplot.Hist(Pmf(df.choices))
"""
Explanation: Here's an example of a typical variable on a 5-point Likert scale.
End of explanation
"""
df['yrborn'] = df.year - df.age
"""
Explanation: I have to compute year born
End of explanation
"""
pmf_yrborn = Pmf(df.yrborn)
thinkplot.Cdf(pmf_yrborn.MakeCdf())
"""
Explanation: Here's what the distribution looks like. The survey includes roughly equal numbers of people born each year from 1922 to 1996.
End of explanation
"""
df_sorted = df[~df.age.isnull()].sort_values(by='yrborn')
df_sorted['counter'] = np.arange(len(df_sorted), dtype=int) // 200
df_sorted[['year', 'age', 'yrborn', 'counter']].head()
df_sorted[['year', 'age', 'yrborn', 'counter']].tail()
"""
Explanation: Next I sort the respondents by year born and then assign them to cohorts so there are 200 people in each cohort.
End of explanation
"""
thinkplot.Cdf(Cdf(df_sorted.counter))
None
"""
Explanation: I end up with the same number of people in each cohort (except the last).
End of explanation
"""
groups = df_sorted.groupby('counter')
"""
Explanation: Then I can group by cohort.
End of explanation
"""
class Cohort:
skip = ['year', 'id_', 'age', 'yrborn', 'cohort', 'counter']
def __init__(self, name, df):
self.name = name
self.df = df
self.pmf_map = {}
def make_pmfs(self):
for col in self.df.columns:
if col in self.skip:
continue
self.pmf_map[col] = Pmf(self.df[col].dropna())
try:
self.pmf_map[col].Normalize()
except ValueError:
print(self.name, col)
def total_divergence(self, other, divergence_func):
total = 0
for col, pmf1 in self.pmf_map.items():
pmf2 = other.pmf_map[col]
divergence = divergence_func(pmf1, pmf2)
#print(col, pmf1.Mean(), pmf2.Mean(), divergence)
total += divergence
return total
"""
Explanation: I'll instantiate an object for each cohort.
End of explanation
"""
def MeanDivergence(pmf1, pmf2):
return abs(pmf1.Mean() - pmf2.Mean())
"""
Explanation: To compute the difference between successive cohorts, I'll loop through the questions, compute Pmfs to represent the responses, and then compute the difference between Pmfs.
I'll use two functions to compute these differences. One computes the difference in means:
End of explanation
"""
def JSDivergence(pmf1, pmf2):
xs = set(pmf1.Values()) | set(pmf2.Values())
ps = np.asarray(pmf1.Probs(xs))
qs = np.asarray(pmf2.Probs(xs))
ms = ps + qs
return 0.5 * (entropy(ps, ms) + entropy(qs, ms))
"""
Explanation: The other computes the Jensen-Shannon divergence
End of explanation
"""
cohorts = []
for name, group in groups:
cohort = Cohort(name, group)
cohort.make_pmfs()
cohorts.append(cohort)
len(cohorts)
"""
Explanation: First I'll loop through the groups and make Cohort objects
End of explanation
"""
cohorts[11].df.yrborn.describe()
"""
Explanation: Each cohort spans a range about 3 birth years. For example, the cohort at index 10 spans 1965 to 1967.
End of explanation
"""
cohorts[0].total_divergence(cohorts[1], MeanDivergence)
"""
Explanation: Here's the total divergence between the first two cohorts, using the mean difference between Pmfs.
End of explanation
"""
cohorts[0].total_divergence(cohorts[1], JSDivergence)
"""
Explanation: And here's the total J-S divergence:
End of explanation
"""
res = []
cumulative = 0
for i in range(len(cohorts)-1):
td = cohorts[i].total_divergence(cohorts[i+1], MeanDivergence)
cumulative += td
print(i, td, cumulative)
res.append((i, cumulative))
"""
Explanation: This loop computes the (absolute value) difference between successive cohorts and the cumulative sum of the differences.
End of explanation
"""
xs, ys = zip(*res)
thinkplot.Plot(xs, ys)
thinkplot.Config(xlabel='Cohort #',
ylabel='Cumulative difference in means',
legend=False)
"""
Explanation: The results are a nearly straight line, suggesting that there are no meaningful generations, at least as I've formulated the question.
End of explanation
"""
res = []
cumulative = 0
for i in range(len(cohorts)-1):
td = cohorts[i].total_divergence(cohorts[i+1], JSDivergence)
cumulative += td
print(i, td, cumulative)
res.append((i, cumulative))
xs, ys = zip(*res)
thinkplot.Plot(xs, ys)
thinkplot.Config(xlabel='Cohort #',
ylabel='Cumulative JS divergence',
legend=False)
"""
Explanation: The results looks pretty much the same using J-S divergence.
End of explanation
"""
|
statsmodels/statsmodels.github.io | v0.13.0/examples/notebooks/generated/wls.ipynb | bsd-3-clause | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
from scipy import stats
from statsmodels.iolib.table import SimpleTable, default_txt_fmt
np.random.seed(1024)
"""
Explanation: Weighted Least Squares
End of explanation
"""
nsample = 50
x = np.linspace(0, 20, nsample)
X = np.column_stack((x, (x - 5) ** 2))
X = sm.add_constant(X)
beta = [5.0, 0.5, -0.01]
sig = 0.5
w = np.ones(nsample)
w[nsample * 6 // 10 :] = 3
y_true = np.dot(X, beta)
e = np.random.normal(size=nsample)
y = y_true + sig * w * e
X = X[:, [0, 1]]
"""
Explanation: WLS Estimation
Artificial data: Heteroscedasticity 2 groups
Model assumptions:
Misspecification: true model is quadratic, estimate only linear
Independent noise/error term
Two groups for error variance, low and high variance groups
End of explanation
"""
mod_wls = sm.WLS(y, X, weights=1.0 / (w ** 2))
res_wls = mod_wls.fit()
print(res_wls.summary())
"""
Explanation: WLS knowing the true variance ratio of heteroscedasticity
In this example, w is the standard deviation of the error. WLS requires that the weights are proportional to the inverse of the error variance.
End of explanation
"""
res_ols = sm.OLS(y, X).fit()
print(res_ols.params)
print(res_wls.params)
"""
Explanation: OLS vs. WLS
Estimate an OLS model for comparison:
End of explanation
"""
se = np.vstack(
[
[res_wls.bse],
[res_ols.bse],
[res_ols.HC0_se],
[res_ols.HC1_se],
[res_ols.HC2_se],
[res_ols.HC3_se],
]
)
se = np.round(se, 4)
colnames = ["x1", "const"]
rownames = ["WLS", "OLS", "OLS_HC0", "OLS_HC1", "OLS_HC3", "OLS_HC3"]
tabl = SimpleTable(se, colnames, rownames, txt_fmt=default_txt_fmt)
print(tabl)
"""
Explanation: Compare the WLS standard errors to heteroscedasticity corrected OLS standard errors:
End of explanation
"""
covb = res_ols.cov_params()
prediction_var = res_ols.mse_resid + (X * np.dot(covb, X.T).T).sum(1)
prediction_std = np.sqrt(prediction_var)
tppf = stats.t.ppf(0.975, res_ols.df_resid)
pred_ols = res_ols.get_prediction()
iv_l_ols = pred_ols.summary_frame()["obs_ci_lower"]
iv_u_ols = pred_ols.summary_frame()["obs_ci_upper"]
"""
Explanation: Calculate OLS prediction interval:
End of explanation
"""
pred_wls = res_wls.get_prediction()
iv_l = pred_wls.summary_frame()["obs_ci_lower"]
iv_u = pred_wls.summary_frame()["obs_ci_upper"]
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(x, y, "o", label="Data")
ax.plot(x, y_true, "b-", label="True")
# OLS
ax.plot(x, res_ols.fittedvalues, "r--")
ax.plot(x, iv_u_ols, "r--", label="OLS")
ax.plot(x, iv_l_ols, "r--")
# WLS
ax.plot(x, res_wls.fittedvalues, "g--.")
ax.plot(x, iv_u, "g--", label="WLS")
ax.plot(x, iv_l, "g--")
ax.legend(loc="best")
"""
Explanation: Draw a plot to compare predicted values in WLS and OLS:
End of explanation
"""
resid1 = res_ols.resid[w == 1.0]
var1 = resid1.var(ddof=int(res_ols.df_model) + 1)
resid2 = res_ols.resid[w != 1.0]
var2 = resid2.var(ddof=int(res_ols.df_model) + 1)
w_est = w.copy()
w_est[w != 1.0] = np.sqrt(var2) / np.sqrt(var1)
res_fwls = sm.WLS(y, X, 1.0 / ((w_est ** 2))).fit()
print(res_fwls.summary())
"""
Explanation: Feasible Weighted Least Squares (2-stage FWLS)
Like w, w_est is proportional to the standard deviation, and so must be squared.
End of explanation
"""
|
GoogleCloudPlatform/asl-ml-immersion | notebooks/tfx_pipelines/walkthrough/solutions/tfx_walkthrough.ipynb | apache-2.0 | import os
import tempfile
import time
from pprint import pprint
import absl
import tensorflow as tf
import tensorflow_data_validation as tfdv
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
import tfx
from tensorflow_metadata.proto.v0 import (
anomalies_pb2,
schema_pb2,
statistics_pb2,
)
from tensorflow_transform.tf_metadata import schema_utils
from tfx.components import (
CsvExampleGen,
Evaluator,
ExampleValidator,
InfraValidator,
Pusher,
ResolverNode,
SchemaGen,
StatisticsGen,
Trainer,
Transform,
Tuner,
)
from tfx.components.common_nodes.importer_node import ImporterNode
from tfx.components.trainer import executor as trainer_executor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata, pipeline
from tfx.orchestration.experimental.interactive.interactive_context import (
InteractiveContext,
)
from tfx.proto import (
evaluator_pb2,
example_gen_pb2,
infra_validator_pb2,
pusher_pb2,
trainer_pb2,
)
from tfx.proto.evaluator_pb2 import SingleSlicingSpec
from tfx.types import Channel
from tfx.types.standard_artifacts import (
HyperParameters,
InfraBlessing,
Model,
ModelBlessing,
)
"""
Explanation: TFX Components Walk-through
Learning Objectives
Develop a high level understanding of TFX pipeline components.
Learn how to use a TFX Interactive Context for prototype development of TFX pipelines.
Work with the Tensorflow Data Validation (TFDV) library to check and analyze input data.
Utilize the Tensorflow Transform (TFT) library for scalable data preprocessing and feature transformations.
Employ the Tensorflow Model Analysis (TFMA) library for model evaluation.
In this lab, you will work with the Covertype Data Set and use TFX to analyze, understand, and pre-process the dataset and train, analyze, validate, and deploy a multi-class classification model to predict the type of forest cover from cartographic features.
You will utilize TFX Interactive Context to work with the TFX components interactivelly in a Jupyter notebook environment. Working in an interactive notebook is useful when doing initial data exploration, experimenting with models, and designing ML pipelines. You should be aware that there are differences in the way interactive notebooks are orchestrated, and how they access metadata artifacts. In a production deployment of TFX on GCP, you will use an orchestrator such as Kubeflow Pipelines, or Cloud Composer. In an interactive mode, the notebook itself is the orchestrator, running each TFX component as you execute the notebook cells. In a production deployment, ML Metadata will be managed in a scalabe database like MySQL, and artifacts in apersistent store such as Google Cloud Storage. In an interactive mode, both properties and payloads are stored in a local file system of the Jupyter host.
Setup Note:
Currently, TFMA visualizations do not render properly in JupyterLab. It is recommended to run this notebook in Jupyter Classic Notebook. To switch to Classic Notebook select Launch Classic Notebook from the Help menu.
End of explanation
"""
print("Tensorflow Version:", tf.__version__)
print("TFX Version:", tfx.__version__)
print("TFDV Version:", tfdv.__version__)
print("TFMA Version:", tfma.VERSION_STRING)
absl.logging.set_verbosity(absl.logging.INFO)
"""
Explanation: Note: this lab was developed and tested with the following TF ecosystem package versions:
Tensorflow Version: 2.3.1
TFX Version: 0.25.0
TFDV Version: 0.25.0
TFMA Version: 0.25.0
If you encounter errors with the above imports (e.g. TFX component not found), check your package versions in the cell below.
End of explanation
"""
os.environ["PATH"] += os.pathsep + "/home/jupyter/.local/bin"
"""
Explanation: If the versions above do not match, update your packages in the current Jupyter kernel below. The default %pip package installation location is not on your system installation PATH; use the command below to append the local installation path to pick up the latest package versions. Note that you may also need to restart your notebook kernel to pick up the specified package versions and re-run the imports cell above before proceeding with the lab.
End of explanation
"""
ARTIFACT_STORE = os.path.join(os.sep, "home", "jupyter", "artifact-store")
SERVING_MODEL_DIR = os.path.join(os.sep, "home", "jupyter", "serving_model")
DATA_ROOT = "gs://workshop-datasets/covertype/small"
"""
Explanation: Configure lab settings
Set constants, location paths and other environment settings.
End of explanation
"""
PIPELINE_NAME = "tfx-covertype-classifier"
PIPELINE_ROOT = os.path.join(
ARTIFACT_STORE, PIPELINE_NAME, time.strftime("%Y%m%d_%H%M%S")
)
os.makedirs(PIPELINE_ROOT, exist_ok=True)
context = InteractiveContext(
pipeline_name=PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
metadata_connection_config=None,
)
"""
Explanation: Creating Interactive Context
TFX Interactive Context allows you to create and run TFX Components in an interactive mode. It is designed to support experimentation and development in a Jupyter Notebook environment. It is an experimental feature and major changes to interface and functionality are expected. When creating the interactive context you can specifiy the following parameters:
- pipeline_name - Optional name of the pipeline for ML Metadata tracking purposes. If not specified, a name will be generated for you.
- pipeline_root - Optional path to the root of the pipeline's outputs. If not specified, an ephemeral temporary directory will be created and used.
- metadata_connection_config - Optional metadata_store_pb2.ConnectionConfig instance used to configure connection to a ML Metadata connection. If not specified, an ephemeral SQLite MLMD connection contained in the pipeline_root directory with file name "metadata.sqlite" will be used.
End of explanation
"""
output_config = example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(
splits=[
# TODO: Your code to configure train data split
# TODO: Your code to configure eval data split
example_gen_pb2.SplitConfig.Split(name="train", hash_buckets=4),
example_gen_pb2.SplitConfig.Split(name="eval", hash_buckets=1),
]
)
)
example_gen = tfx.components.CsvExampleGen(
input_base=DATA_ROOT, output_config=output_config
)
context.run(example_gen)
"""
Explanation: Ingesting data using ExampleGen
In any ML development process the first step is to ingest the training and test datasets. The ExampleGen component ingests data into a TFX pipeline. It consumes external files/services to generate a set file files in the TFRecord format, which will be used by other TFX components. It can also shuffle the data and split into an arbitrary number of partitions.
<img src=../../images/ExampleGen.png width="300">
Configure and run CsvExampleGen
In this exercise, you use the CsvExampleGen specialization of ExampleGen to ingest CSV files from a GCS location and emit them as tf.Example records for consumption by downstream TFX pipeline components. Your task is to configure the component to create 80-20 train and eval splits. Hint: review the ExampleGen proto definition to split your data with hash buckets.
End of explanation
"""
examples_uri = example_gen.outputs["examples"].get()[0].uri
tfrecord_filenames = [
os.path.join(examples_uri, "train", name)
for name in os.listdir(os.path.join(examples_uri, "train"))
]
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
for tfrecord in dataset.take(2):
example = tf.train.Example()
example.ParseFromString(tfrecord.numpy())
for name, feature in example.features.feature.items():
if feature.HasField("bytes_list"):
value = feature.bytes_list.value
if feature.HasField("float_list"):
value = feature.float_list.value
if feature.HasField("int64_list"):
value = feature.int64_list.value
print(f"{name}: {value}")
print("******")
"""
Explanation: Examine the ingested data
End of explanation
"""
statistics_gen = tfx.components.StatisticsGen(
examples=example_gen.outputs["examples"]
)
context.run(statistics_gen)
"""
Explanation: Generating statistics using StatisticsGen
The StatisticsGen component generates data statistics that can be used by other TFX components. StatisticsGen uses TensorFlow Data Validation. StatisticsGen generate statistics for each split in the ExampleGen component's output. In our case there two splits: train and eval.
<img src=../../images/StatisticsGen.png width="200">
Configure and run the StatisticsGen component
End of explanation
"""
context.show(statistics_gen.outputs["statistics"])
"""
Explanation: Visualize statistics
The generated statistics can be visualized using the tfdv.visualize_statistics() function from the TensorFlow Data Validation library or using a utility method of the InteractiveContext object. In fact, most of the artifacts generated by the TFX components can be visualized using InteractiveContext.
End of explanation
"""
schema_gen = SchemaGen(
statistics=statistics_gen.outputs["statistics"], infer_feature_shape=False
)
context.run(schema_gen)
"""
Explanation: Infering data schema using SchemaGen
Some TFX components use a description input data called a schema. The schema is an instance of schema.proto. It can specify data types for feature values, whether a feature has to be present in all examples, allowed value ranges, and other properties. SchemaGen automatically generates the schema by inferring types, categories, and ranges from data statistics. The auto-generated schema is best-effort and only tries to infer basic properties of the data. It is expected that developers review and modify it as needed. SchemaGen uses TensorFlow Data Validation.
The SchemaGen component generates the schema using the statistics for the train split. The statistics for other splits are ignored.
<img src=../../images/SchemaGen.png width="200">
Configure and run the SchemaGen components
End of explanation
"""
context.show(schema_gen.outputs["schema"])
"""
Explanation: Visualize the inferred schema
End of explanation
"""
schema_proto_path = "{}/{}".format(
schema_gen.outputs["schema"].get()[0].uri, "schema.pbtxt"
)
schema = tfdv.load_schema_text(schema_proto_path)
"""
Explanation: Updating the auto-generated schema
In most cases the auto-generated schemas must be fine-tuned manually using insights from data exploration and/or domain knowledge about the data. For example, you know that in the covertype dataset there are seven types of forest cover (coded using 1-7 range) and that the value of the Slope feature should be in the 0-90 range. You can manually add these constraints to the auto-generated schema by setting the feature domain.
Load the auto-generated schema proto file
End of explanation
"""
# TODO: Your code to restrict the categorical feature Cover_Type between the values of 0 and 6.
# TODO: Your code to restrict the numeric feature Slope between 0 and 90.
tfdv.set_domain(
schema,
"Cover_Type",
schema_pb2.IntDomain(name="Cover_Type", min=0, max=6, is_categorical=True),
)
tfdv.set_domain(
schema, "Slope", schema_pb2.IntDomain(name="Slope", min=0, max=90)
)
tfdv.display_schema(schema=schema)
"""
Explanation: Modify the schema
You can use the protocol buffer APIs to modify the schema.
Hint: Review the TFDV library API documentation on setting a feature's domain. You can use the protocol buffer APIs to modify the schema. Review the Tensorflow Metadata proto definition for configuration options.
End of explanation
"""
schema_dir = os.path.join(ARTIFACT_STORE, "schema")
tf.io.gfile.makedirs(schema_dir)
schema_file = os.path.join(schema_dir, "schema.pbtxt")
tfdv.write_schema_text(schema, schema_file)
!cat {schema_file}
"""
Explanation: Save the updated schema
End of explanation
"""
schema_importer = ImporterNode(
instance_name="Schema_Importer",
source_uri=schema_dir,
artifact_type=tfx.types.standard_artifacts.Schema,
reimport=False,
)
context.run(schema_importer)
"""
Explanation: Importing the updated schema using ImporterNode
The ImporterNode component allows you to import an external artifact, including the schema file, so it can be used by other TFX components in your workflow.
Configure and run the ImporterNode component
End of explanation
"""
context.show(schema_importer.outputs["result"])
"""
Explanation: Visualize the imported schema
End of explanation
"""
# TODO: Complete ExampleValidator
# Hint: review the visual above and review the documentation on ExampleValidator's inputs and outputs:
# https://www.tensorflow.org/tfx/guide/exampleval
# Make sure you use the output of the schema_importer component created above.
example_validator = ExampleValidator(
instance_name="Data_Validation",
statistics=statistics_gen.outputs["statistics"],
schema=schema_importer.outputs["result"],
)
context.run(example_validator)
"""
Explanation: Validating data with ExampleValidator
The ExampleValidator component identifies anomalies in data. It identifies anomalies by comparing data statistics computed by the StatisticsGen component against a schema generated by SchemaGen or imported by ImporterNode.
ExampleValidator can detect different classes of anomalies. For example it can:
perform validity checks by comparing data statistics against a schema
detect training-serving skew by comparing training and serving data.
detect data drift by looking at a series of data.
The ExampleValidator component validates the data in the eval split only. Other splits are ignored.
<img src=../../images/ExampleValidator.png width="350">
Configure and run the ExampleValidator component
End of explanation
"""
train_uri = example_validator.outputs["anomalies"].get()[0].uri
train_anomalies_filename = os.path.join(train_uri, "train/anomalies.pbtxt")
!cat $train_anomalies_filename
"""
Explanation: Examine the output of ExampleValidator
The output artifact of the ExampleValidator is the anomalies.pbtxt file describing an anomalies_pb2.Anomalies protobuf.
End of explanation
"""
context.show(example_validator.outputs["output"])
"""
Explanation: Visualize validation results
The file anomalies.pbtxt can be visualized using context.show.
End of explanation
"""
TRANSFORM_MODULE = "preprocessing.py"
!cat {TRANSFORM_MODULE}
"""
Explanation: In our case no anomalies were detected in the eval split.
For a detailed deep dive into data validation and schema generation refer to the lab-31-tfdv-structured-data lab.
Preprocessing data with Transform
The Transform component performs data transformation and feature engineering. The Transform component consumes tf.Examples emitted from the ExampleGen component and emits the transformed feature data and the SavedModel graph that was used to process the data. The emitted SavedModel can then be used by serving components to make sure that the same data pre-processing logic is applied at training and serving.
The Transform component requires more code than many other components because of the arbitrary complexity of the feature engineering that you may need for the data and/or model that you're working with. It requires code files to be available which define the processing needed.
<img src=../../images/Transform.png width="400">
Define the pre-processing module
To configure Trainsform, you need to encapsulate your pre-processing code in the Python preprocessing_fn function and save it to a python module that is then provided to the Transform component as an input. This module will be loaded by transform and the preprocessing_fn function will be called when the Transform component runs.
In most cases, your implementation of the preprocessing_fn makes extensive use of TensorFlow Transform for performing feature engineering on your dataset.
End of explanation
"""
transform = Transform(
examples=example_gen.outputs["examples"],
schema=schema_importer.outputs["result"],
module_file=TRANSFORM_MODULE,
)
context.run(transform)
"""
Explanation: Configure and run the Transform component.
End of explanation
"""
os.listdir(transform.outputs["transform_graph"].get()[0].uri)
"""
Explanation: Examine the Transform component's outputs
The Transform component has 2 outputs:
transform_graph - contains the graph that can perform the preprocessing operations (this graph will be included in the serving and evaluation models).
transformed_examples - contains the preprocessed training and evaluation data.
Take a peek at the transform_graph artifact: it points to a directory containing 3 subdirectories:
End of explanation
"""
os.listdir(transform.outputs["transformed_examples"].get()[0].uri)
transform_uri = transform.outputs["transformed_examples"].get()[0].uri
tfrecord_filenames = [
os.path.join(transform_uri, "train", name)
for name in os.listdir(os.path.join(transform_uri, "train"))
]
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
for tfrecord in dataset.take(2):
example = tf.train.Example()
example.ParseFromString(tfrecord.numpy())
for name, feature in example.features.feature.items():
if feature.HasField("bytes_list"):
value = feature.bytes_list.value
if feature.HasField("float_list"):
value = feature.float_list.value
if feature.HasField("int64_list"):
value = feature.int64_list.value
print(f"{name}: {value}")
print("******")
"""
Explanation: And the transform.examples artifact
End of explanation
"""
TRAINER_MODULE_FILE = "model.py"
!cat {TRAINER_MODULE_FILE}
"""
Explanation: Train your TensorFlow model with the Trainer component
The Trainer component trains a model using TensorFlow.
Trainer takes:
tf.Examples used for training and eval.
A user provided module file that defines the trainer logic.
A data schema created by SchemaGen or imported by ImporterNode.
A proto definition of train args and eval args.
An optional transform graph produced by upstream Transform component.
An optional base models used for scenarios such as warmstarting training.
<img src=../../images/Trainer.png width="400">
Define the trainer module
To configure Trainer, you need to encapsulate your training code in a Python module that is then provided to the Trainer as an input.
End of explanation
"""
trainer = Trainer(
custom_executor_spec=executor_spec.ExecutorClassSpec(
trainer_executor.GenericExecutor
),
module_file=TRAINER_MODULE_FILE,
transformed_examples=transform.outputs.transformed_examples,
schema=schema_importer.outputs.result,
transform_graph=transform.outputs.transform_graph,
train_args=trainer_pb2.TrainArgs(splits=["train"], num_steps=2),
eval_args=trainer_pb2.EvalArgs(splits=["eval"], num_steps=1),
)
context.run(trainer)
"""
Explanation: Create and run the Trainer component
As of the 0.25.0 release of TFX, the Trainer component only supports passing a single field - num_steps - through the train_args and eval_args arguments.
End of explanation
"""
logs_path = trainer.outputs["model_run"].get()[0].uri
print(logs_path)
"""
Explanation: Analyzing training runs with TensorBoard
In this step you will analyze the training run with TensorBoard.dev. TensorBoard.dev is a managed service that enables you to easily host, track and share your ML experiments.
Retrieve the location of TensorBoard logs
Each model run's train and eval metric logs are written to the model_run directory by the Tensorboard callback defined in model.py.
End of explanation
"""
tuner = Tuner(
module_file=TRAINER_MODULE_FILE,
examples=transform.outputs["transformed_examples"],
transform_graph=transform.outputs["transform_graph"],
train_args=trainer_pb2.TrainArgs(num_steps=2),
eval_args=trainer_pb2.EvalArgs(num_steps=1),
)
context.run(tuner)
"""
Explanation: Upload the logs and start TensorBoard.dev
Open a new JupyterLab terminal window
From the terminal window, execute the following command
tensorboard dev upload --logdir [YOUR_LOGDIR]
Where [YOUR_LOGDIR] is an URI retrieved by the previous cell.
You will be asked to authorize TensorBoard.dev using your Google account. If you don't have a Google account or you don't want to authorize TensorBoard.dev you can skip this exercise.
After the authorization process completes, follow the link provided to view your experiment.
Tune your model's hyperparameters with the Tuner component
The Tuner component makes use of the Python KerasTuner API to tune your model's hyperparameters. It tighty integrates with the Transform and Trainer components for model hyperparameter tuning in continuous training pipelines as well as advanced use cases such as feature selection, feature engineering, and model architecture search.
<img src=../../images/Tuner_Overview.png width="400">
Tuner takes:
A user provided module file (or module fn) that defines the tuning logic, including model definition, hyperparameter search space, objective etc.
tf.Examples used for training and eval.
Protobuf definition of train args and eval args.
(Optional) Protobuf definition of tuning args.
(Optional) transform graph produced by an upstream Transform component.
(Optional) A data schema created by a SchemaGen pipeline component and optionally altered by the developer.
<img src=../../images/Tuner.png width="400">
With the given data, model, and objective, Tuner tunes the hyperparameters and emits the best results that can be directly fed into the Trainer component during model re-training.
End of explanation
"""
hparams_importer = ImporterNode(
instance_name="import_hparams",
# This can be Tuner's output file or manually edited file. The file contains
# text format of hyperparameters (kerastuner.HyperParameters.get_config())
source_uri=tuner.outputs.best_hyperparameters.get()[0].uri,
artifact_type=HyperParameters,
)
context.run(hparams_importer)
# TODO: your code to retrain your model with the best hyperparameters found by the Tuner component above.
# Hint: review the Trainer code above in this notebook and the documentation for how to configure the trainer to use the output artifact from the hparams_importer.
trainer = Trainer(
custom_executor_spec=executor_spec.ExecutorClassSpec(
trainer_executor.GenericExecutor
),
module_file=TRAINER_MODULE_FILE,
transformed_examples=transform.outputs.transformed_examples,
schema=schema_importer.outputs.result,
transform_graph=transform.outputs.transform_graph,
hyperparameters=hparams_importer.outputs.result,
train_args=trainer_pb2.TrainArgs(splits=["train"], num_steps=2),
eval_args=trainer_pb2.EvalArgs(splits=["eval"], num_steps=1),
)
context.run(trainer)
"""
Explanation: Retrain your model by running Tuner with the best hyperparameters
End of explanation
"""
model_resolver = ResolverNode(
instance_name="latest_blessed_model_resolver",
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing),
)
context.run(model_resolver)
"""
Explanation: Evaluating trained models with Evaluator
The Evaluator component analyzes model performance using the TensorFlow Model Analysis library. It runs inference requests on particular subsets of the test dataset, based on which slices are defined by the developer. Knowing which slices should be analyzed requires domain knowledge of what is important in this particular use case or domain.
The Evaluator can also optionally validate a newly trained model against a previous model. In this lab, you only train one model, so the Evaluator automatically will label the model as "blessed".
<img src=../../images/Evaluator.png width="400">
Configure and run the Evaluator component
Use the ResolverNode to pick the previous model to compare against. The model resolver is only required if performing model validation in addition to evaluation. In this case we validate against the latest blessed model. If no model has been blessed before (as in this case) the evaluator will make our candidate the first blessed model.
End of explanation
"""
# TODO: Your code here to create a tfma.MetricThreshold.
# Review the API documentation here: https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/MetricThreshold
# Hint: Review the API documentation for tfma.GenericValueThreshold to constrain accuracy between 50% and 99%.
accuracy_threshold = tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={"value": 0.55}, upper_bound={"value": 0.99}
)
)
metrics_specs = tfma.MetricsSpec(
metrics=[
tfma.MetricConfig(
class_name="SparseCategoricalAccuracy", threshold=accuracy_threshold
),
tfma.MetricConfig(class_name="ExampleCount"),
]
)
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key="Cover_Type")],
metrics_specs=[metrics_specs],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=["Wilderness_Area"]),
],
)
eval_config
model_analyzer = Evaluator(
examples=example_gen.outputs.examples,
model=trainer.outputs.model,
baseline_model=model_resolver.outputs.model,
eval_config=eval_config,
)
context.run(model_analyzer, enable_cache=False)
"""
Explanation: Configure evaluation metrics and slices.
End of explanation
"""
model_blessing_uri = model_analyzer.outputs.blessing.get()[0].uri
!ls -l {model_blessing_uri}
"""
Explanation: Check the model performance validation status
End of explanation
"""
evaluation_uri = model_analyzer.outputs["evaluation"].get()[0].uri
evaluation_uri
!ls {evaluation_uri}
eval_result = tfma.load_eval_result(evaluation_uri)
eval_result
tfma.view.render_slicing_metrics(eval_result)
tfma.view.render_slicing_metrics(eval_result, slicing_column="Wilderness_Area")
"""
Explanation: Visualize evaluation results
You can visualize the evaluation results using the tfma.view.render_slicing_metrics() function from TensorFlow Model Analysis library.
Setup Note: Currently, TFMA visualizations don't render in JupyterLab. Make sure that you run this notebook in Classic Notebook.
End of explanation
"""
infra_validator = InfraValidator(
model=trainer.outputs["model"],
examples=example_gen.outputs["examples"],
serving_spec=infra_validator_pb2.ServingSpec(
tensorflow_serving=infra_validator_pb2.TensorFlowServing(
tags=["latest"]
),
local_docker=infra_validator_pb2.LocalDockerConfig(),
),
validation_spec=infra_validator_pb2.ValidationSpec(
max_loading_time_seconds=60,
num_tries=5,
),
request_spec=infra_validator_pb2.RequestSpec(
tensorflow_serving=infra_validator_pb2.TensorFlowServingRequestSpec(),
num_examples=5,
),
)
context.run(infra_validator, enable_cache=False)
"""
Explanation: InfraValidator
The InfraValidator component acts as an additional early warning layer by validating a candidate model in a sandbox version of its serving infrastructure to prevent an unservable model from being pushed to production. Compared to the Evaluator component above which validates a model's performance, the InfraValidator component is validating that a model is able to generate predictions from served examples in an environment configured to match production. The config below takes a model and examples, launches the model in a sand-boxed TensorflowServing model server from the latest image in a local docker engine, and optionally checks that the model binary can be loaded and queried before "blessing" it for production.
<img src=../../images/InfraValidator.png width="400">
End of explanation
"""
infra_blessing_uri = infra_validator.outputs.blessing.get()[0].uri
!ls -l {infra_blessing_uri}
"""
Explanation: Check the model infrastructure validation status
End of explanation
"""
trainer.outputs["model"]
pusher = Pusher(
model=trainer.outputs["model"],
model_blessing=model_analyzer.outputs["blessing"],
infra_blessing=infra_validator.outputs["blessing"],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=SERVING_MODEL_DIR
)
),
)
context.run(pusher)
"""
Explanation: Deploying models with Pusher
The Pusher component checks whether a model has been "blessed", and if so, deploys it by pushing the model to a well known file destination.
<img src=../../images/Pusher.png width="400">
Configure and run the Pusher component
End of explanation
"""
pusher.outputs
# Set `PATH` to include a directory containing `saved_model_cli.
PATH = %env PATH
%env PATH=/opt/conda/envs/tfx/bin:{PATH}
latest_pushed_model = os.path.join(
SERVING_MODEL_DIR, max(os.listdir(SERVING_MODEL_DIR))
)
!saved_model_cli show --dir {latest_pushed_model} --all
"""
Explanation: Examine the output of Pusher
End of explanation
"""
|
Neuroglycerin/neukrill-net-work | notebooks/troubleshooting_and_sysadmin/Brute force venv comparison.ipynb | mit | cd ../..
"""
Explanation: In the last notebook compared pip freeze output and installed packages so that it matched. Did not find error. Must be some other difference in the virtualenv. So, going to use rsync to compare everything in both venvs.
End of explanation
"""
!rsync -nrvl --ignore-times --size-only --exclude={*.pyc,*.so} neukrill_venv2/ neukrillvenvfresh/
!rsync -nrvl neukrillvenvfresh/ neukrill_venv2/
"""
Explanation: Using rsync with n for dry run, r for recursive, t for ignore timestamps and l to compare symlinks.
End of explanation
"""
|
JamesSample/enviro_mod_notes | notebooks/odes.ipynb | mit | alpha = 0.75
# Download Tarland data
data_url = r'https://drive.google.com/uc?export=&id=0BximeC_RweaecHNIZF9GMHkwaWc'
met_df = pd.read_csv(data_url, parse_dates=True, dayfirst=True, index_col=0)
del met_df['Q_Cumecs']
# Linear interpolation of any missing values
met_df.interpolate(method='linear', inplace=True)
# Calculate HER
met_df['HER_mm'] = met_df['Rainfall_mm'] - alpha*met_df['PET_mm']
# Check there are no missing values
assert met_df.isnull().sum().all() == 0
print met_df.head()
"""
Explanation: Solving Ordinary Differential Equations
1. Background
The basic water balance models I've coded previously involve ODEs with simple analytical solutions. In future I'd like to work with more complex systems of equations where exact solutions are either impossible or beyond my ability to integrate. These notes document my experiments so far with various numerical solvers.
2. The model
As a test problem, I'm going to try to build a simple hydrological model consisting of just two "linear reservoirs" (i.e. buckets). This system has a straightforward analytical solution, so I should be able to test whether I'm using the numerical solvers correctly.
<img src="https://drive.google.com/uc?export=&id=0BximeC_RweaeaHNhNUYwZjFWaXc" alt="Two bucket model" height="400" width=400>
The Hydrologically Effective Rainfall, $H$, is estimated as $(P - \alpha E)$, where $P$ is precipitation, $E$ is potential evapotranspiration and $\alpha$ is a "correction factor" to convert from potential to actual evapotranspiration. For the toy example here I'm going to assume $\alpha = 0.75$, although in reality it might be estimated from e.g. land cover information.
The soil bucket receives an input flow of $H$, and the outflow from the bucket, $S$, is assumed to be directly proportional to the volume of water in the reservoir, $V_s$. The constant of proportionality is conventionally expressed as $\frac{1}{\tau_s}$, where $\tau_s$ is the “residence time” of the bucket. For the first bucket:
$$\frac{dV_s}{dt} = (H – S) \qquad and \qquad S = \frac{V_s}{\tau_s}$$
These expressions can be combined to give
$$\frac{dS}{dt} = \frac{(H – S)}{\tau_s}$$
A fraction of the water, $\beta$, leaving the first bucket enters the second bucket. The flow rate from the second bucket is denoted $G$ and, in the same way, the rate of change of $G$ is given by:
$$\frac{dG}{dt} = \frac{(\beta S - G)}{\tau_g}$$
3. Aims
I want to use numerical methods to:
Calculate how $S$ and $G$ vary with time. <br><br>
Calculate the total volume of water draining via the $S$ pathway (denoted $D_s$) and via the $G$ pathway (denoted $D_g$) over the course of the simulation. $D_s$ and $D_g$ are simply the integrals of $S$ and $G$ respectively:
$$\frac{dD_s}{dt} = (1 - \beta) S \qquad and \qquad \frac{dD_g}{dt} = G$$
4. Input data
To begin with, I'm just interested in solving the equations for a single time step i.e. given some initial conditions and a constant value for $H$, how does the system evolve? Eventually, I want to extend this to consider what options are avaialble when $H$ is a discrete time series with daily resolution. For this, I'll use some real data from a small study catchment in NE Scotland.
End of explanation
"""
# Model parameters
beta = 0.6 # BFI (dimensionless)
T_s = 1. # Soil residence time (days)
T_g = 2. # Groundwater residence time (days)
step_len = 5 # Time step length (days)
# Initial conditions
H = 5. # Input (mm/day)
S0 = 0. # Soil outflow (mm/day)
G0 = 0. # Groundwater outflow (mm/day)
def f(y, t):
""" Define ODE system.
"""
# Unpack incremental values for S and G
Si = y[0]
Gi = y[1]
# Model equations (see section 2)
dS_dt = (H - Si)/T_s
dG_dt = (beta*Si - Gi)/T_g
dDs_dt = (1 - beta)*Si
dDg_dt = Gi
return [dS_dt, dG_dt, dDs_dt, dDg_dt]
# Build integrator
solver = odespy.Vode(f) # Vode seems like a good general purpose solver
solver.set_initial_condition([S0, G0, 0, 0]) # [S, G, Ds, Dg]
# Divide step into 100 equal parts for plotting
t_i = np.linspace(0, step_len, 100)
# Solve
y, t = solver.solve(t_i)
# Plot
df = pd.DataFrame(y, columns=['S', 'G', 'Ds', 'Dg'],
index=t)
df.plot()
"""
Explanation: 5. Basic approach with odespy
Before getting involved with complicated dynamical systems modelling packages (like PyDSTool; see section 6), I want to explore some simpler options. scipy.integrate.ode provides some basic solvers, but the options available are limtied. An alternative is the odespy package, which has similar syntax to scipy.integrate.ode but with a much broader range of integrators. The plan is to use this to get started and then see if I can do better with PyDSTool.
5.1. Installing odespy
This is a bit of a faff. First, you need to have the ming32 compiler installed and on your path. Then download the .zip archive from the odespy repository and unzip it. Open the WinPython command prompt and cd to the odespy directory. Try running:
python setup.py install
You'll probably get an error message saying something like:
File "C:\Anaconda\lib\site-packages\numpy\distutils\fcompiler\gnu.py",
line 333, in get_libraries
raise NotImplementedError("Only MS compiler supported with gfortran on win64")
NotImplementedError: Only MS compiler supported with gfortran on win64
If this happens, open the file gnu.py at the path given by the error message and comment out the specified line, adding a pass statement:
else:
#raise NotImplementedError("Only MS compiler supported with gfortran on win64")
pass
Now try:
python setup.py install
again and with a bit of luck the package will install successfully.
5.2. Single time step example
The basic approach to using odespy looks something like this:
End of explanation
"""
# Reset the solver
solver = odespy.Vode(f)
solver.set_initial_condition([S0, G0, 0, 0]) # [S, G, Ds, Dg]
# Just get results at the end of the step
t_i = [0, float(step_len)]
# Solve
y, t = solver.solve(t_i)
# Results
print 'At end of step:'
print ' S = %.2f' % y[1, 0]
print ' G = %.2f' % y[1, 1]
print ' Ds = %.2f' % y[1, 2]
print ' Dg = %.2f' % y[1, 3]
"""
Explanation: This seems to be working OK: the soil outflow, $S$, is tending asymptotically towards $H$, and the groundwater outflow, $G$, is tending towards $\beta H$, which is as expected.
For plotting and visualisation, it's useful to evaluate many time points within the time step, as shown above. However, from the point of view of the model I'm really only interested in the values of $S$, $G$, $D_s$ and $D_g$ at the end of the step. I can therefore improve performance by just asking for the results that I'm interested in:
End of explanation
"""
# New initial conditions, where soil store is part full, but H < 0
H = -5.
S0 = 5.
# Reset integrator
solver = odespy.Vode(f)
solver.set_initial_condition([S0, G0, 0, 0]) # [S, G, Ds, Dg]
# Divide step into 100 equal parts for plotting
t_i = np.linspace(0, step_len, 100)
# Solve
y, t = solver.solve(t_i)
# Plot
df = pd.DataFrame(y, columns=['S', 'G', 'Ds', 'Dg'],
index=t)
df.plot()
"""
Explanation: These results are consistent with the plot above.
What if H is negative?
On some days, evapotranspiration exceeds precipitation, so $H < 0$. This can lead to results that are not physically meaningful:
End of explanation
"""
def f2(y, t):
""" ODE to integrate.
"""
return -y
def terminate(y, t, step_no):
""" Integration is terminated when this function returns True.
"""
return y[step_no] < 0.5
# Time points for solution
t_i = [0, 1.]
# Build integrator
solver = odespy.Vode(f2)
solver.set_initial_condition(1) # y=1 at t=0
# Solve
y, t = solver.solve(t_i, terminate)
"""
Explanation: Mathematically, this all works OK, but the negative values for $S$, $G$, $D_s$ and $D_g$ at the end of the step have no physical meaning in the model. This is where things start to get tricky. I can think of two ways to deal with this:
Fudge it. If the values for any of $S$, $G$, $D_s$ and $D_g$ go negative within a particular time step, I could simply reset those values back to zero ready for the next time step. This seems like a bad idea for a couple of reasons. Firstly, if $S$ goes negative part way through a step then $D_s$ will also start decreasing. By the end of the step, $D_s$ may still be positive, but it will nevertheless be wrong. Secondly, and perhaps more importantly, if $S$ goes negative then the computed trajectory for the water level in the groundwater store will be incorrect, even if the final value at the end of the step is still positive. This error will then be propagated to the next time step, creating the possibility that the groundwater store could evolve in completely the wrong way. Dimitri has a paper which discusses some of these issues in more detail, it's a bit beyond me at present. Come back and read this paper more carefully. <br><br>
Consider the time step in two parts. A better - but more complicated - alternative is to work out exactly when the soil dries out and then force a change in the behaviour of the ODE system after this point. Unfortunately, this involves introduing a discontinuity which (I think) makes the equations non-integrable. To get around this, we can run the solver twice: the first time normally, up to the point where the soil dries out; we then start the solver again from this point, but with $S = H = 0$, and run until the end of the time step.
Option 2 is clearly better, and it's not too difficult to code. The hard part is working out when the soil dries out. One way of doing this is to use the terminate option in odespy. For the simplest possible example of how to use this, consider the ODE
$$\frac{dy}{dt} = -y \qquad with \qquad y = 1 \qquad at \qquad t = 0$$
This problem has a particular solution of $y = e^{-t}$. We will use a terminate function to try to stop the solver when the threshold $y = 0.5$ is crossed. Based on the analytical solution, this should occur at $t = ln(2)$.
End of explanation
"""
# Divide step into 100 parts
t_i = np.linspace(0, 1, 100)
# Build integrator
solver = odespy.Vode(f2)
solver.set_initial_condition(1) # y=1 at t=0
# Solve
y, t = solver.solve(t_i, terminate)
# Print the true value from the analytical solution
print 'Exact threshold at t=%.5F' % np.log(2)
"""
Explanation: In the above code, the solver prints that integration has been terminated. However, in terms of identifying the time of termination, it only outputs the next time in the list of time points requested i.e. the next time point after termination in the vector $t$, which in this case is 1.
To identify the threshold crossing more accurately, we need to divide the time step into a number of smaller segments. The more accurately we need to find the event, the more segments are required, and this obviously has a computational cost. For example, if we are happy to identify the threshold "event" to within $\frac{1}{100}$ of a time step, we could try:
End of explanation
"""
def f(y, t, H=H):
""" Define ODE system.
"""
# Unpack incremental values for S and G
Si = y[0]
Gi = y[1]
# Model equations (see above)
dS_dt = (H - Si)/T_s
dG_dt = (beta*Si - Gi)/T_g
dDs_dt = (1 - beta)*Si
dDg_dt = Gi
return [dS_dt, dG_dt, dDs_dt, dDg_dt]
def terminate(y, t, step_no):
""" Terminates integration if S < 0.
"""
return y[step_no][0] < 0
def integrate_step(H, S, G, t_i):
""" Use odespy to integrate the ODE system over the time step.
If the soil dries out, the step is automatically split and
the integration performed in two parts.
"""
# Build integrator
solver = odespy.Vode(f, f_kwargs={'H':H})
solver.set_initial_condition([S, G, 0, 0])
# Solve
y, t = solver.solve(t_i, terminate)
# Array of results so far
res = y[-1]
# Did soil dry out?
if t[-1] != step_len:
# Soil has dried out
# Values can be very slightly negative due to errors in locating
# "event". Set values < 0 to 0
res[res < 0] = 0
# Calculate time remaining this step
t_rem = step_len - t[-1]
# Restart integrator with H=S=0
solver = odespy.Vode(f, f_kwargs={'H':0})
solver.set_initial_condition([0, res[1], 0, 0])
# Solve
y, t = solver.solve([0, t_rem])
# Array of results for second part of step
res2 = y[-1]
# Update results from first part with those from second part
res[1] = res2[1] # Update G
res[3] = res[3] + res2[3] # Dg
return res
# Set new initial conditions for H and S0
H = -5
S0 = 5
# Identify "events" to within 1/100 of time step
t_i = np.linspace(0, step_len, 100)
# Run solver
res = integrate_step(H, S0, G0, t_i)
# Print results
print 'At end of step:'
print ' S = %.3f' % res[0]
print ' G = %.3f' % res[1]
print ' Ds = %.3f' % res[2]
print ' Dg = %.3f' % res[3]
"""
Explanation: This approach allows us to detect the threshold crossing event to within arbitrary precision. We can use this to create a function for performing accurate integration across a time step, splitting the step at drying out events as necessary.
End of explanation
"""
def soil_outflow_equn(t, R, S_0, k_s):
""" Evaluates the soil outflow rate at time t.
"""
return R - (R - S_0)*np.exp(-1.*k_s*t)
def gw_outflow_equn(t, R, S_0, D_0, k_s, k_g, b):
""" Evaluates the gw outflow rate at time t.
"""
return (R*b*(1 - np.exp(-1.*k_g*t)) +
k_g*b*(R - S_0)*(np.exp(-1.*k_s*t) - np.exp(-1.*k_g*t))/(k_s - k_g) +
D_0*np.exp(-1.*k_g*t))
def unsaturated_vol(R, S_0, t_1, t_c, k_s):
""" Calculates the drainage taking place between t_1 and t_c when the water
level is below saturation capacity.
"""
return (R*(t_c - t_1) +
(R - S_0)*(np.exp(-1.*k_s*t_c) - np.exp(-1.*k_s*t_1))/k_s)
def gw_vol(R, S_0, D_0, t_1, t_2, k_s, k_g, b):
""" Evaluates the GW drainage between t_1 and t_2. The parameters R, S_0
and D_0 can be specified explicitly here as it useful to be able to
change them from the global R and S_0 values used by the other
functions.
"""
return (R*b*(t_2 - t_1) +
R*b*(np.exp(-1.*k_g*t_2) - np.exp(-1.*k_g*t_1))/k_g +
b*(R - S_0)*(np.exp(-1.*k_g*t_2) - np.exp(-1.*k_g*t_1))/(k_s - k_g) +
k_g*b*(R - S_0)*(np.exp(-1.*k_s*t_1) - np.exp(-1.*k_s*t_2))/(k_s*(k_s - k_g)) +
D_0*(np.exp(-1.*k_g*t_1) - np.exp(-1.*k_g*t_2))/k_g)
def t_zero(R, S_0, k_s):
""" Calculates the time when water level equals zero. Only relevant if
R < 0
"""
t_0 = np.log(1 - (S_0/R))/k_s
return t_0
# Does the soil dry out within this step?
t_0 = t_zero(H, S0, 1/T_s)
# Only need to consider times within the current step
if (t_0 > step_len) or (t_0 == 0):
t_0 = step_len
# Dict to store analytical results
ana_res = {}
# For S
if t_0 < step_len:
# The soil dries out in this step, so at end S = 0
ana_res['S'] = 0
else:
ana_res['S'] = soil_outflow_equn(step_len, H, S0, 1/T_s)
# The total soil drainage is the amount up until the soil dries out
ana_res['Ds'] = (1 - beta)*unsaturated_vol(H, S0, 0, t_0, 1/T_s)
# For gw variables, need to consider the step in 2 parts
# For G:
G1 = gw_outflow_equn(t_0, H, S0, G0, 1/T_s, 1/T_g, beta) # 1. When soil dries
G2 = gw_outflow_equn(step_len - t_0, 0, 0, G1, 1/T_s, 1/T_g, beta) # 2. At end of step
ana_res['G'] = G2
# For Dg
Dg1 = gw_vol(H, S0, G0, 0, t_0, 1/T_s, 1/T_g, beta) # 1. When soil dries
Dg2 = gw_vol(0, 0, G1, 0, step_len - t_0, 1/T_s, 1/T_g, beta) # 2. At end of step
ana_res['Dg'] = Dg1 + Dg2
# Check for conservation of volume
w_st = T_s*S0 + T_g*G0 # Water at start
w_in = t_0*H # Water added or removed
w_out = ana_res['Ds'] + ana_res['Dg'] # Water draining
w_end = T_s*ana_res['S'] + T_g*ana_res['G'] # Water in stores at end
# Check volumes balance to within 4 d.p.
assert round(w_st + w_in, 4) == round(w_end + w_out, 4), 'Volume not conserved.'
# Print results
print 'At end of step:'
print ' S = %.3f' % ana_res['S']
print ' G = %.3f' % ana_res['G']
print ' Ds = %.3f' % ana_res['Ds']
print ' Dg = %.3f' % ana_res['Dg']
"""
Explanation: 5.3. Checking against the analytical solution
This seems to be working, but it's worth checking against the anytical solution. The functions below are taken from my (analytical) water balance model. Unfortunately they use different notation to what I've used so far in this notebook. For clarity:
$R$ is the equivalent of $H$
$D_0$ is the equivalent of $G_0$
$k_s = \frac{1}{T_s}$
$k_g = \frac{1}{T_g}$
$b$ is the equivalent of $\beta$
End of explanation
"""
%%capture
# The line above stops odespy from printing lots of "Vode
# terminated at t=" messages to the output
# Let's change some of the model parameters to be more compatible
# with our real data
T_s = 10. # Days
T_g = 100. # Days
step_len = 1 # Day
n_steps = 300 # Consider just the first n_steps of the met data
# Reset the initial conditions
S0 = 0
G0 = 0
# Identify "events" to within 1/100 of time step
t_i = np.linspace(0, step_len, 100)
# Empty list to store output
data = []
# Variables to hold values of S and G
S = S0
G = G0
t1 = time.time()
# Loop over met data
for step in range(n_steps):
# Get H for this step
H = float(met_df['HER_mm'].ix[step])
# Run solver
res = integrate_step(H, S, G, t_i);
# Append to results dataset
data.append(res)
# Update S and G for next step
S = res[0]
G = res[1]
t2 = time.time()
def plot_results(data):
""" Produces simple plots of results.
"""
# Build df
df = pd.DataFrame(data=np.vstack(data), columns=['S', 'G', 'Ds', 'Dg'])
# Plot
fig, axes = plt.subplots(nrows=2, ncols=1)
axes[0].plot(df.index, df['S'], 'r-', label='S')
axes[0].plot(df.index, df['G'], 'b-', label='G')
axes[0].set_ylabel('Rate (mm/day)')
axes[0].legend(loc='best')
axes[1].plot(df.index, df['Ds'], 'r-', label='Ds')
axes[1].plot(df.index, df['Dg'], 'b-', label='Dg')
axes[1].set_ylabel('Daily drainage (mm)')
axes[1].set_xlabel('Time (days)')
axes[1].legend(loc='best')
plt.show()
print 'Runtime = %.2f s.' % (t2 - t1)
plot_results(data)
"""
Explanation: The analytical results are the same as those obtained above using the numerical algorithm, so this approach seems to be working.
5.4. Looping over a discrete time series
We can now run the solver in a loop where the value of $H$ for each time step is taken from a real dataset.
End of explanation
"""
methods = odespy.list_available_solvers()
for method in methods:
print method
"""
Explanation: This seems to work pretty well, but it's quite cumbersome and could become difficult to work with for more complex systems. It's also worth noting that the performance is strongly influenced by the choice of solver. odespy has lots of different solver methods available:
End of explanation
"""
# Define ODEs as strings
dS_dt = '(H - S)/T_s' # Soil outflow rate (mm/day)
dG_dt = '(beta*S - G)/T_g' # Groundwater outflow rate (mm/day)
dDs_dt = '(1 - beta)*S' # Accumulated soil drainage to stream (mm)
dDg_dt = 'G' # Accumulated groundwater drainage to stream (mm)
# Define an "event" for when the soil store dries out
event_args = {'name':'soil_dry',
'eventtol':1e-6,
'term':True,
'active':True}
soil_dry_ev = dst.makeZeroCrossEvent('S',
-1, # Only trigger in decreasing S direction
event_args,
varnames=['S'])
# Build model
mod_args = {'pars':{'H':0, 'beta':beta, 'T_s':T_s, 'T_g':T_g},
'varspecs':{'S':dS_dt, 'G':dG_dt, 'Ds':dDs_dt, 'Dg':dDg_dt},
'events':soil_dry_ev, # Associate event with this model
'name':'model'}
model = dst.Generator.Vode_ODEsystem(mod_args) # Stick with Vode integrator for this example
"""
Explanation: Of these, Vode seems popular and is reasonably fast. Dopri5 and Dop853 are also widely used (and I think very stable?), but they are significantly slower. Lsoda is marginally faster than Vode and the very basic Euler scheme is faster still, although I suspect its performance would degrade rapidly for more complex ODE systems.
Some obvious options to affect the performance of the above code include:
Changing the integration time step (i.e. the separation of the $t_i$). Using fewer segmenst within each time step should improve performance, but at the expense of larger errors in locating the time of threshold crossing "events". <br><br>
Change the solver used. More sophisticated solvers are presumably more robust, but also slower. I think some solvers are better suited to dealing with e.g. "stiff" or strongly non-linear systems than others. The test system here is so simple that I suspect almost any solver will work OK, but this will not necessarily be true for more realistic examples. <br><br>
Switch the solver code to FORTRAN or C. In some cases, odespy is already calling underlying FORTRAN and C libraries (e.g. ODEPACK). In other cases the implementations are written in pure Python. The FORTRAN and C solvers should be faster.
6. Dynamical systems packages
PyDSTool is a sophisticated-looking Python package for modelling complex dynamic systems. It's capabilities are well beyond anything I need at present, but it looks interesting. In particular, PyDSTool includes "event detection" to arbitrary precision, as well as the ability to develop hybrid models, which automatically switch from one ODE system to another when a specified event occurs.
6.1. Specify the PyDSTool model
End of explanation
"""
data = []
# Variables to hold values of S and G
S = S0
G = G0
t1 = time.time()
# Loop over met data
for step in range(n_steps):
# Get H for this step
H = float(met_df['HER_mm'].ix[step])
# Set H, S and G in model
model.set(pars={'H':H},
ics={'S':S, 'G':G, 'Ds':0, 'Dg':0},
tdata=[0, step_len])
# Solve
traj = model.compute('traj')
res = traj.sample()[-1] # Get only the last values
res = np.array([res['S'], res['G'], res['Ds'], res['Dg']])
# Results can be negative to within small tolerance.
# Set back to zero
res[res < 0] = 0
# Check for events
if model.getEvents()['soil_dry']:
# Get remaining time in this step
t_rem = step_len - float(model.getEvents()['soil_dry']['t'])
# Restart the solver
model.set(pars={'H':0},
ics={'S':0, 'G':res[1]},
tdata=[0, t_rem])
traj = model.compute('traj')
res2 = traj.sample()[-1]
res2 = np.array([res2['S'], res2['G'], res2['Ds'], res2['Dg']])
# Update results
res[1] = res2[1] # Update G
res[3] = res[3] + res2[3] # Dg
# Append results
data.append(res)
# Update initial conditions for next step
S = res[0]
G = res[1]
t2 = time.time()
"""
Explanation: 6.2. Loop over time series data
End of explanation
"""
print 'Runtime = %.2f s.' % (t2 - t1)
plot_results(data)
"""
Explanation: 6.3. Plot results
End of explanation
"""
# Hybrid model example
# Define an "event" for switching models if soil store dries out
event_args = {'name':'soil_dry',
'eventtol':1e-6,
'term':True,
'active':True}
soil_dry_ev = dst.makeZeroCrossEvent('S',
-1, # Only trigger in decreasing S direction
event_args,
varnames=['S'])
# Build "wet" model
wet_args = {'pars':{'H':0, 'beta':beta, 'T_s':T_s, 'T_g':T_g},
'varspecs':{'S':dS_dt, 'G':dG_dt, 'Ds':dDs_dt, 'Dg':dDg_dt, 'is_dry':'0'},
'xdomain':{'is_dry':0},
'events':soil_dry_ev,
'name':'wet'}
wet_model = dst.embed(dst.Generator.Vode_ODEsystem(wet_args), name='wet')
# Build "dry" model
dry_args = {'pars':{'H':0, 'beta':beta, 'T_s':T_s, 'T_g':T_g},
'varspecs':{'S':'0', 'G':dG_dt, 'Ds':'0', 'Dg':dDg_dt, 'is_dry':'1'},
'xdomain':{'is_dry':1},
'ics':{'is_dry':1},
'name':'dry'}
dry_model = dst.embed(dst.Generator.Vode_ODEsystem(dry_args), name='dry')
# Build hybrid model
mod_names = ['wet', 'dry']
# 'soil_dry' event triggers transition to 'dry' model
wet_mi = dst.intModelInterface(wet_model)
wet_info = dst.makeModelInfoEntry(wet_mi, mod_names, [('soil_dry', 'dry')])
dry_mi = dst.intModelInterface(dry_model)
dry_info = dst.makeModelInfoEntry(dry_mi, mod_names, [('time', 'wet')])
# Combine separate models into hybrid model
mod_info_dict = dst.makeModelInfo([wet_info, dry_info])
mod_args = {'name':'two_bucket_model', 'modelInfo':mod_info_dict}
model = dst.Model.HybridModel(mod_args)
# List to store output
data = []
# Set variables equal to initial conditions
S = S0
G = G0
# Loop over discrete H time series
t1 = time.time()
for step in range(n_steps):
# Get H for this step
H = float(met_df['HER_mm'].ix[step])
# Set H in model
wet_model.set(pars={'H':H})
# Compute trajectory
model.compute('traj',
ics={'S':S, 'G':G, 'Ds':0, 'Dg':0, 'is_dry':0},
tdata=[0, step_len],
force=True) # Allow over-writing of 'traj' on each loop
# Get values at end of step
res = model.getEndPoint('traj')
res = np.array([res['S'], res['G'], res['Ds'], res['Dg']])
# Results can be negative to within small tolerance.
# Set back to zero
res[res < 0] = 0
# Append results
data.append(res)
# Update initial condition for next step
S = res[0]
G = res[1]
t2 = time.time()
print 'Runtime = %.2f s.' % (t2 - t1)
plot_results(data)
"""
Explanation: This approach has the advantage of identifying the soil drying "event" to much greater precision, and it's also (arguably) neater to code. However, it's also three times slower than the odespy implementation. As with odespy, PyDSTool has lots of options for abstracting things into e.g. C to make it faster, but I'm not going to delve into optimisation at this stage.
6.4. Hybrid Dynamical Systems
One final option to investigate is to use PyDSTool to build a Hybrid Dynamical System, which is capable of switching from one ODE model to another when an event occurs. This eliminates the need for the inner loop in the above code, which makes things a bit neater (although the model setup is more complicated).
End of explanation
"""
|
quoniammm/mine-tensorflow-examples | dpAI/Logistic+Regression+with+a+Neural+Network+mindset+v3.ipynb | mit | import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
%matplotlib inline
"""
Explanation: Logistic Regression with a Neural Network mindset
Welcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.
Instructions:
- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.
You will learn to:
- Build the general architecture of a learning algorithm, including:
- Initializing parameters
- Calculating the cost function and its gradient
- Using an optimization algorithm (gradient descent)
- Gather all three functions above into a main model function, in the right order.
1 - Packages
First, let's run the cell below to import all the packages that you will need during this assignment.
- numpy is the fundamental package for scientific computing with Python.
- h5py is a common package to interact with a dataset that is stored on an H5 file.
- matplotlib is a famous library to plot graphs in Python.
- PIL and scipy are used here to test your model with your own picture at the end.
End of explanation
"""
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
"""
Explanation: 2 - Overview of the Problem set
Problem Statement: You are given a dataset ("data.h5") containing:
- a training set of m_train images labeled as cat (y=1) or non-cat (y=0)
- a test set of m_test images labeled as cat or non-cat
- each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).
You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.
Let's get more familiar with the dataset. Load the data by running the following code.
End of explanation
"""
# Example of a picture
index = 25
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
"""
Explanation: We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).
Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the index value and re-run to see other images.
End of explanation
"""
### START CODE HERE ### (≈ 3 lines of code)
m_train = None
m_test = None
num_px = None
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
"""
Explanation: Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs.
Exercise: Find the values for:
- m_train (number of training examples)
- m_test (number of test examples)
- num_px (= height = width of a training image)
Remember that train_set_x_orig is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access m_train by writing train_set_x_orig.shape[0].
End of explanation
"""
# Reshape the training and test examples
### START CODE HERE ### (≈ 2 lines of code)
train_set_x_flatten = None
test_set_x_flatten = None
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
"""
Explanation: Expected Output for m_train, m_test and num_px:
<table style="width:15%">
<tr>
<td>**m_train**</td>
<td> 209 </td>
</tr>
<tr>
<td>**m_test**</td>
<td> 50 </td>
</tr>
<tr>
<td>**num_px**</td>
<td> 64 </td>
</tr>
</table>
For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $$ num_px $$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.
Exercise: Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num_px $$ num_px $$ 3, 1).
A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$$c$$d, a) is to use:
python
X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X
End of explanation
"""
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
"""
Explanation: Expected Output:
<table style="width:35%">
<tr>
<td>**train_set_x_flatten shape**</td>
<td> (12288, 209)</td>
</tr>
<tr>
<td>**train_set_y shape**</td>
<td>(1, 209)</td>
</tr>
<tr>
<td>**test_set_x_flatten shape**</td>
<td>(12288, 50)</td>
</tr>
<tr>
<td>**test_set_y shape**</td>
<td>(1, 50)</td>
</tr>
<tr>
<td>**sanity check after reshaping**</td>
<td>[17 31 56 22 33]</td>
</tr>
</table>
To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.
One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).
<!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !-->
Let's standardize our dataset.
End of explanation
"""
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (≈ 1 line of code)
s = None
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
"""
Explanation: <font color='blue'>
What you need to remember:
Common steps for pre-processing a new dataset are:
- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)
- Reshape the datasets such that each example is now a vector of size (num_px * num_px * 3, 1)
- "Standardize" the data
3 - General Architecture of the learning algorithm
It's time to design a simple algorithm to distinguish cat images from non-cat images.
You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why Logistic Regression is actually a very simple Neural Network!
<img src="images/LogReg_kiank.png" style="width:650px;height:400px;">
Mathematical expression of the algorithm:
For one example $x^{(i)}$:
$$z^{(i)} = w^T x^{(i)} + b \tag{1}$$
$$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$
$$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$
The cost is then computed by summing over all training examples:
$$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$
Key steps:
In this exercise, you will carry out the following steps:
- Initialize the parameters of the model
- Learn the parameters for the model by minimizing the cost
- Use the learned parameters to make predictions (on the test set)
- Analyse the results and conclude
4 - Building the parts of our algorithm ##
The main steps for building a Neural Network are:
1. Define the model structure (such as number of input features)
2. Initialize the model's parameters
3. Loop:
- Calculate current loss (forward propagation)
- Calculate current gradient (backward propagation)
- Update parameters (gradient descent)
You often build 1-3 separately and integrate them into one function we call model().
4.1 - Helper functions
Exercise: Using your code from "Python Basics", implement sigmoid(). As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().
End of explanation
"""
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ### (≈ 1 line of code)
w = None
b = None
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
"""
Explanation: Expected Output:
<table>
<tr>
<td>**sigmoid([0, 2])**</td>
<td> [ 0.5 0.88079708]</td>
</tr>
</table>
4.2 - Initializing parameters
Exercise: Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.
End of explanation
"""
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
A = None # compute activation
cost = None # compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = None
db = None
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
"""
Explanation: Expected Output:
<table style="width:15%">
<tr>
<td> ** w ** </td>
<td> [[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td> ** b ** </td>
<td> 0 </td>
</tr>
</table>
For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1).
4.3 - Forward and Backward propagation
Now that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.
Exercise: Implement a function propagate() that computes the cost function and its gradient.
Hints:
Forward Propagation:
- You get X
- You compute $A = \sigma(w^T X + b) = (a^{(0)}, a^{(1)}, ..., a^{(m-1)}, a^{(m)})$
- You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$
Here are the two formulas you will be using:
$$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$
$$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$
End of explanation
"""
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
### START CODE HERE ###
grads, cost = None
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = None
b = None
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training examples
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
"""
Explanation: Expected Output:
<table style="width:50%">
<tr>
<td> ** dw ** </td>
<td> [[ 0.99993216]
[ 1.99980262]]</td>
</tr>
<tr>
<td> ** db ** </td>
<td> 0.499935230625 </td>
</tr>
<tr>
<td> ** cost ** </td>
<td> 6.000064773192205</td>
</tr>
</table>
d) Optimization
You have initialized your parameters.
You are also able to compute a cost function and its gradient.
Now, you want to update the parameters using gradient descent.
Exercise: Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate.
End of explanation
"""
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
A = None
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (≈ 4 lines of code)
pass
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
print ("predictions = " + str(predict(w, b, X)))
"""
Explanation: Expected Output:
<table style="width:40%">
<tr>
<td> **w** </td>
<td>[[ 0.1124579 ]
[ 0.23106775]] </td>
</tr>
<tr>
<td> **b** </td>
<td> 1.55930492484 </td>
</tr>
<tr>
<td> **dw** </td>
<td> [[ 0.90158428]
[ 1.76250842]] </td>
</tr>
<tr>
<td> **db** </td>
<td> 0.430462071679 </td>
</tr>
</table>
Exercise: The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the predict() function. There is two steps to computing predictions:
Calculate $\hat{Y} = A = \sigma(w^T X + b)$
Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector Y_prediction. If you wish, you can use an if/else statement in a for loop (though there is also a way to vectorize this).
End of explanation
"""
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
# initialize parameters with zeros (≈ 1 line of code)
w, b = None
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = None
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = None
Y_prediction_train = None
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
"""
Explanation: Expected Output:
<table style="width:30%">
<tr>
<td>
**predictions**
</td>
<td>
[[ 1. 1.]]
</td>
</tr>
</table>
<font color='blue'>
What to remember:
You've implemented several functions that:
- Initialize (w,b)
- Optimize the loss iteratively to learn parameters (w,b):
- computing the cost and its gradient
- updating the parameters using gradient descent
- Use the learned (w,b) to predict the labels for a given set of examples
5 - Merge all functions into a model
You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.
Exercise: Implement the model function. Use the following notation:
- Y_prediction for your predictions on the test set
- Y_prediction_train for your predictions on the train set
- w, costs, grads for the outputs of optimize()
End of explanation
"""
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
"""
Explanation: Run the following cell to train your model.
End of explanation
"""
# Example of a picture that was wrongly classified.
index = 1
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.")
"""
Explanation: Expected Output:
<table style="width:40%">
<tr>
<td> **Train Accuracy** </td>
<td> 99.04306220095694 % </td>
</tr>
<tr>
<td>**Test Accuracy** </td>
<td> 70.0 % </td>
</tr>
</table>
Comment: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test error is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!
Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the index variable) you can look at predictions on pictures of the test set.
End of explanation
"""
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
"""
Explanation: Let's also plot the cost function and the gradients.
End of explanation
"""
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
"""
Explanation: Interpretation:
You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting.
6 - Further analysis (optional/ungraded exercise)
Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$.
Choice of learning rate
Reminder:
In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.
Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the learning_rates variable to contain, and see what happens.
End of explanation
"""
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "my_image.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
"""
Explanation: Interpretation:
- Different learning rates give different costs and thus different predictions results.
- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost).
- A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.
- In deep learning, we usually recommend that you:
- Choose the learning rate that better minimizes the cost function.
- If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.)
7 - Test with your own image (optional/ungraded exercise)
Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Change your image's name in the following code
4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
End of explanation
"""
|
liyigerry/msm_test | mdtraj_clustering.ipynb | apache-2.0 | from __future__ import print_function
%matplotlib inline
import mdtraj as md
import numpy as np
import matplotlib.pyplot as plt
import scipy.cluster.hierarchy
"""
Explanation: In this example, we cluster our alanine dipeptide trajectory using the RMSD distance metric and Ward's method.
End of explanation
"""
traj = md.load('material/md.pdb')
"""
Explanation: Let's load up our trajectory. This is the trajectory that we generated in the "Running a simulation in OpenMM and analyzing the results with mdtraj" example. The first step is to build the rmsd cache, which precalculates some values for the RMSD computation.
End of explanation
"""
distances = np.empty((traj.n_frames, traj.n_frames))
for i in range(traj.n_frames):
distances[i] = md.rmsd(traj, traj, i)
print('Max pairwise rmsd: %f nm' % np.max(distances))
"""
Explanation: Lets compute all pairwise rmsds between conformations.
End of explanation
"""
linkage = scipy.cluster.hierarchy.ward(distances)
"""
Explanation: scipy.cluster implements the ward linkage algorithm (among others)
End of explanation
"""
plt.title('RMSD Ward hierarchical clustering')
scipy.cluster.hierarchy.dendrogram(linkage, no_labels=True, count_sort='descendent')
None
"""
Explanation: Lets plot the resulting dendrogram.
End of explanation
"""
|
AndreySheka/dl_ekb | hw10/Bonus-handcrafted-rnn.ipynb | mit | start_token = " "
with open("names") as f:
names = f.read()[:-1].split('\n')
names = [start_token+name for name in names]
print 'n samples = ',len(names)
for x in names[::1000]:
print x
"""
Explanation: Generate names
Struggle to find a name for the variable? Let's see how you'll come up with a name for your son/daughter. Surely no human has expertize over what is a good child name, so let us train NN instead.
Dataset contains ~8k human names from different cultures[in latin transcript]
Objective (toy problem): learn a generative model over names.
End of explanation
"""
#all unique characters go here
token_set = set()
for name in names:
for letter in name:
token_set.add(letter)
tokens = list(token_set)
print 'n_tokens = ',len(tokens)
#!token_to_id = <dictionary of symbol -> its identifier (index in tokens list)>
token_to_id = {t:i for i,t in enumerate(tokens) }
#!id_to_token = < dictionary of symbol identifier -> symbol itself>
id_to_token = {i:t for i,t in enumerate(tokens)}
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(map(len,names),bins=25);
# truncate names longer than MAX_LEN characters.
MAX_LEN = min([60,max(list(map(len,names)))])
#ADJUST IF YOU ARE UP TO SOMETHING SERIOUS
"""
Explanation: Text processing
End of explanation
"""
names_ix = list(map(lambda name: list(map(token_to_id.get,name)),names))
#crop long names and pad short ones
for i in range(len(names_ix)):
names_ix[i] = names_ix[i][:MAX_LEN] #crop too long
if len(names_ix[i]) < MAX_LEN:
names_ix[i] += [token_to_id[" "]]*(MAX_LEN - len(names_ix[i])) #pad too short
assert len(set(map(len,names_ix)))==1
names_ix = np.array(names_ix)
"""
Explanation: Cast everything from symbols into identifiers
End of explanation
"""
from agentnet import Recurrence
from lasagne.layers import *
from agentnet.memory import *
from agentnet.resolver import ProbabilisticResolver
sequence = T.matrix('token sequence','int64')
inputs = sequence[:,:-1]
targets = sequence[:,1:]
l_input_sequence = InputLayer(shape=(None, None),input_var=inputs)
"""
Explanation: Input variables
End of explanation
"""
###One step of rnn
class step:
#inputs
inp = InputLayer((None,),name='current character')
h_prev = InputLayer((None,10),name='previous rnn state')
#recurrent part
emb = EmbeddingLayer(inp, len(tokens), 30,name='emb')
h_new = RNNCell(h_prev,emb,name="rnn") #just concat -> denselayer
next_token_probas = DenseLayer(h_new,len(tokens),nonlinearity=T.nnet.softmax)
#pick next token from predicted probas
next_token = ProbabilisticResolver(next_token_probas)
training_loop = Recurrence(
state_variables={step.h_new:step.h_prev},
input_sequences={step.inp:l_input_sequence},
tracked_outputs=[step.next_token_probas,],
unroll_scan=False,
)
# Model weights
weights = lasagne.layers.get_all_params(training_loop,trainable=True)
print weights
predicted_probabilities = lasagne.layers.get_output(training_loop[step.next_token_probas])
#If you use dropout do not forget to create deterministic version for evaluation
loss = lasagne.objectives.categorical_crossentropy(predicted_probabilities.reshape((-1,len(tokens))),
targets.reshape((-1,))).mean()
#<Loss function - a simple categorical crossentropy will do, maybe add some regularizer>
updates = lasagne.updates.adam(loss,weights)
"""
Explanation: Build NN
You'll be building a model that takes token sequence and predicts next tokens at each tick
This is basically equivalent to how rnn step was described in the lecture
End of explanation
"""
#training
train_step = theano.function([sequence], loss,
updates=training_loop.get_automatic_updates()+updates)
"""
Explanation: Compiling it
End of explanation
"""
n_steps = T.scalar(dtype='int32')
feedback_loop = Recurrence(
state_variables={step.h_new:step.h_prev,
step.next_token:step.inp},
tracked_outputs=[step.next_token_probas,],
batch_size=theano.shared(1),
n_steps=n_steps,
unroll_scan=False,
)
generated_tokens = get_output(feedback_loop[step.next_token])
generate_sample = theano.function([n_steps],generated_tokens,updates=feedback_loop.get_automatic_updates())
def generate_string(length=MAX_LEN):
output_indices = generate_sample(length)[0]
return ''.join(tokens[i] for i in output_indices)
generate_string()
"""
Explanation: generation
here we re-wire the recurrent network so that it's output is fed back to it's input
End of explanation
"""
def sample_batch(data, batch_size):
rows = data[np.random.randint(0,len(data),size=batch_size)]
return rows
print("Training ...")
#total N iterations
n_epochs=100
# how many minibatches are there in the epoch
batches_per_epoch = 500
#how many training sequences are processed in a single function call
batch_size=10
for epoch in xrange(n_epochs):
avg_cost = 0;
for _ in range(batches_per_epoch):
avg_cost += train_step(sample_batch(names_ix,batch_size))
print("\n\nEpoch {} average loss = {}".format(epoch, avg_cost / batches_per_epoch))
print "Generated names"
for i in range(10):
print generate_string(),
"""
Explanation: Model training
Here you can tweak parameters or insert your generation function
Once something word-like starts generating, try increasing seq_length
End of explanation
"""
|
samuelshaner/openmc | docs/source/pythonapi/examples/mdgxs-part-ii.ipynb | mit | import math
import pickle
from IPython.display import Image
import matplotlib.pyplot as plt
import numpy as np
import openmc
import openmc.mgxs
import openmoc
import openmoc.process
from openmoc.opencg_compatible import get_openmoc_geometry
from openmoc.materialize import load_openmc_mgxs_lib
%matplotlib inline
"""
Explanation: This IPython Notebook illustrates the use of the openmc.mgxs.Library class. The Library class is designed to automate the calculation of multi-group cross sections for use cases with one or more domains, cross section types, and/or nuclides. In particular, this Notebook illustrates the following features:
Calculation of multi-energy-group and multi-delayed-group cross sections for a fuel assembly
Automated creation, manipulation and storage of MGXS with openmc.mgxs.Library
Steady-state pin-by-pin delayed neutron fractions (beta) for each delayed group.
Generation of surface currents on the interfaces and surfaces of a Mesh.
Generate Input Files
End of explanation
"""
# Instantiate some Nuclides
h1 = openmc.Nuclide('H1')
b10 = openmc.Nuclide('B10')
o16 = openmc.Nuclide('O16')
u235 = openmc.Nuclide('U235')
u238 = openmc.Nuclide('U238')
zr90 = openmc.Nuclide('Zr90')
"""
Explanation: First we need to define materials that will be used in the problem. Before defining a material, we must create nuclides that are used in the material.
End of explanation
"""
# 1.6 enriched fuel
fuel = openmc.Material(name='1.6% Fuel')
fuel.set_density('g/cm3', 10.31341)
fuel.add_nuclide(u235, 3.7503e-4)
fuel.add_nuclide(u238, 2.2625e-2)
fuel.add_nuclide(o16, 4.6007e-2)
# borated water
water = openmc.Material(name='Borated Water')
water.set_density('g/cm3', 0.740582)
water.add_nuclide(h1, 4.9457e-2)
water.add_nuclide(o16, 2.4732e-2)
water.add_nuclide(b10, 8.0042e-6)
# zircaloy
zircaloy = openmc.Material(name='Zircaloy')
zircaloy.set_density('g/cm3', 6.55)
zircaloy.add_nuclide(zr90, 7.2758e-3)
"""
Explanation: With the nuclides we defined, we will now create three materials for the fuel, water, and cladding of the fuel pins.
End of explanation
"""
# Instantiate a Materials object
materials_file = openmc.Materials((fuel, water, zircaloy))
materials_file.default_xs = '71c'
# Export to "materials.xml"
materials_file.export_to_xml()
"""
Explanation: With our three materials, we can now create a Materials object that can be exported to an actual XML file.
End of explanation
"""
# Create cylinders for the fuel and clad
fuel_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, R=0.39218)
clad_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, R=0.45720)
# Create boundary planes to surround the geometry
min_x = openmc.XPlane(x0=-10.71, boundary_type='reflective')
max_x = openmc.XPlane(x0=+10.71, boundary_type='reflective')
min_y = openmc.YPlane(y0=-10.71, boundary_type='reflective')
max_y = openmc.YPlane(y0=+10.71, boundary_type='reflective')
min_z = openmc.ZPlane(z0=-10., boundary_type='reflective')
max_z = openmc.ZPlane(z0=+10., boundary_type='reflective')
"""
Explanation: Now let's move on to the geometry. This problem will be a square array of fuel pins and control rod guide tubes for which we can use OpenMC's lattice/universe feature. The basic universe will have three regions for the fuel, the clad, and the surrounding coolant. The first step is to create the bounding surfaces for fuel and clad, as well as the outer bounding surfaces of the problem.
End of explanation
"""
# Create a Universe to encapsulate a fuel pin
fuel_pin_universe = openmc.Universe(name='1.6% Fuel Pin')
# Create fuel Cell
fuel_cell = openmc.Cell(name='1.6% Fuel')
fuel_cell.fill = fuel
fuel_cell.region = -fuel_outer_radius
fuel_pin_universe.add_cell(fuel_cell)
# Create a clad Cell
clad_cell = openmc.Cell(name='1.6% Clad')
clad_cell.fill = zircaloy
clad_cell.region = +fuel_outer_radius & -clad_outer_radius
fuel_pin_universe.add_cell(clad_cell)
# Create a moderator Cell
moderator_cell = openmc.Cell(name='1.6% Moderator')
moderator_cell.fill = water
moderator_cell.region = +clad_outer_radius
fuel_pin_universe.add_cell(moderator_cell)
"""
Explanation: With the surfaces defined, we can now construct a fuel pin cell from cells that are defined by intersections of half-spaces created by the surfaces.
End of explanation
"""
# Create a Universe to encapsulate a control rod guide tube
guide_tube_universe = openmc.Universe(name='Guide Tube')
# Create guide tube Cell
guide_tube_cell = openmc.Cell(name='Guide Tube Water')
guide_tube_cell.fill = water
guide_tube_cell.region = -fuel_outer_radius
guide_tube_universe.add_cell(guide_tube_cell)
# Create a clad Cell
clad_cell = openmc.Cell(name='Guide Clad')
clad_cell.fill = zircaloy
clad_cell.region = +fuel_outer_radius & -clad_outer_radius
guide_tube_universe.add_cell(clad_cell)
# Create a moderator Cell
moderator_cell = openmc.Cell(name='Guide Tube Moderator')
moderator_cell.fill = water
moderator_cell.region = +clad_outer_radius
guide_tube_universe.add_cell(moderator_cell)
"""
Explanation: Likewise, we can construct a control rod guide tube with the same surfaces.
End of explanation
"""
# Create fuel assembly Lattice
assembly = openmc.RectLattice(name='1.6% Fuel Assembly')
assembly.pitch = (1.26, 1.26)
assembly.lower_left = [-1.26 * 17. / 2.0] * 2
"""
Explanation: Using the pin cell universe, we can construct a 17x17 rectangular lattice with a 1.26 cm pitch.
End of explanation
"""
# Create array indices for guide tube locations in lattice
template_x = np.array([5, 8, 11, 3, 13, 2, 5, 8, 11, 14, 2, 5, 8,
11, 14, 2, 5, 8, 11, 14, 3, 13, 5, 8, 11])
template_y = np.array([2, 2, 2, 3, 3, 5, 5, 5, 5, 5, 8, 8, 8, 8,
8, 11, 11, 11, 11, 11, 13, 13, 14, 14, 14])
# Initialize an empty 17x17 array of the lattice universes
universes = np.empty((17, 17), dtype=openmc.Universe)
# Fill the array with the fuel pin and guide tube universes
universes[:,:] = fuel_pin_universe
universes[template_x, template_y] = guide_tube_universe
# Store the array of universes in the lattice
assembly.universes = universes
"""
Explanation: Next, we create a NumPy array of fuel pin and guide tube universes for the lattice.
End of explanation
"""
# Create root Cell
root_cell = openmc.Cell(name='root cell')
root_cell.fill = assembly
# Add boundary planes
root_cell.region = +min_x & -max_x & +min_y & -max_y & +min_z & -max_z
# Create root Universe
root_universe = openmc.Universe(universe_id=0, name='root universe')
root_universe.add_cell(root_cell)
"""
Explanation: OpenMC requires that there is a "root" universe. Let us create a root cell that is filled by the pin cell universe and then assign it to the root universe.
End of explanation
"""
# Create Geometry and set root Universe
geometry = openmc.Geometry()
geometry.root_universe = root_universe
# Export to "geometry.xml"
geometry.export_to_xml()
"""
Explanation: We now must create a geometry that is assigned a root universe and export it to XML.
End of explanation
"""
# OpenMC simulation parameters
batches = 50
inactive = 10
particles = 2500
# Instantiate a Settings object
settings_file = openmc.Settings()
settings_file.batches = batches
settings_file.inactive = inactive
settings_file.particles = particles
settings_file.output = {'tallies': False}
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-10.71, -10.71, -10, 10.71, 10.71, 10.]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings_file.source = openmc.source.Source(space=uniform_dist)
# Export to "settings.xml"
settings_file.export_to_xml()
"""
Explanation: With the geometry and materials finished, we now just need to define simulation parameters. In this case, we will use 10 inactive batches and 40 active batches each with 2500 particles.
End of explanation
"""
# Instantiate a Plot
plot = openmc.Plot(plot_id=1)
plot.filename = 'materials-xy'
plot.origin = [0, 0, 0]
plot.pixels = [250, 250]
plot.width = [-10.71*2, -10.71*2]
plot.color = 'mat'
# Instantiate a Plots object, add Plot, and export to "plots.xml"
plot_file = openmc.Plots([plot])
plot_file.export_to_xml()
"""
Explanation: Let us also create a Plots file that we can use to verify that our fuel assembly geometry was created successfully.
End of explanation
"""
# Run openmc in plotting mode
openmc.plot_geometry(output=False)
# Convert OpenMC's funky ppm to png
!convert materials-xy.ppm materials-xy.png
# Display the materials plot inline
Image(filename='materials-xy.png')
"""
Explanation: With the plots.xml file, we can now generate and view the plot. OpenMC outputs plots in .ppm format, which can be converted into a compressed format like .png with the convert utility.
End of explanation
"""
# Instantiate a 20-group EnergyGroups object
energy_groups = openmc.mgxs.EnergyGroups()
energy_groups.group_edges = np.logspace(-3, 7.3, 21)
# Instantiate a 1-group EnergyGroups object
one_group = openmc.mgxs.EnergyGroups()
one_group.group_edges = np.array([energy_groups.group_edges[0], energy_groups.group_edges[-1]])
# Instantiate a 6-delayed-group list
delayed_groups = list(range(1,7))
"""
Explanation: As we can see from the plot, we have a nice array of fuel and guide tube pin cells with fuel, cladding, and water!
Create an MGXS Library
Now we are ready to generate multi-group cross sections! First, let's define 20-energy-group, 1-energy-group, and 6-delayed-group structures.
End of explanation
"""
# Instantiate a tally mesh
mesh = openmc.Mesh(mesh_id=1)
mesh.type = 'regular'
mesh.dimension = [17, 17, 1]
mesh.lower_left = [-10.71, -10.71, -10000.]
mesh.width = [1.26, 1.26, 20000.]
# Initialize an 20-energy-group and 6-delayed-group MGXS Library
mgxs_lib = openmc.mgxs.Library(geometry)
mgxs_lib.energy_groups = energy_groups
mgxs_lib.delayed_groups = delayed_groups
# Specify multi-group cross section types to compute
mgxs_lib.mgxs_types = ['total', 'transport', 'nu-scatter matrix', 'kappa-fission', 'inverse-velocity', 'chi-prompt',
'prompt-nu-fission', 'chi-delayed', 'delayed-nu-fission', 'beta']
# Specify a "mesh" domain type for the cross section tally filters
mgxs_lib.domain_type = 'mesh'
# Specify the mesh domain over which to compute multi-group cross sections
mgxs_lib.domains = [mesh]
# Construct all tallies needed for the multi-group cross section library
mgxs_lib.build_library()
# Create a "tallies.xml" file for the MGXS Library
tallies_file = openmc.Tallies()
mgxs_lib.add_to_tallies_file(tallies_file, merge=True)
# Instantiate a current tally
mesh_filter = openmc.MeshFilter(mesh)
current_tally = openmc.Tally(name='current tally')
current_tally.scores = ['current']
current_tally.filters = [mesh_filter]
# Add current tally to the tallies file
tallies_file.append(current_tally)
# Export to "tallies.xml"
tallies_file.export_to_xml()
"""
Explanation: Next, we will instantiate an openmc.mgxs.Library for the energy and delayed groups with our the fuel assembly geometry.
End of explanation
"""
# Run OpenMC
openmc.run()
"""
Explanation: Now, we can run OpenMC to generate the cross sections.
End of explanation
"""
# Load the last statepoint file
sp = openmc.StatePoint('statepoint.50.h5')
"""
Explanation: Tally Data Processing
Our simulation ran successfully and created statepoint and summary output files. We begin our analysis by instantiating a StatePoint object.
End of explanation
"""
# Initialize MGXS Library with OpenMC statepoint data
mgxs_lib.load_from_statepoint(sp)
# Extrack the current tally separately
current_tally = sp.get_tally(name='current tally')
"""
Explanation: The statepoint is now ready to be analyzed by the Library. We simply have to load the tallies from the statepoint into the Library and our MGXS objects will compute the cross sections for us under-the-hood.
End of explanation
"""
# Set the time constants for the delayed precursors (in seconds^-1)
precursor_halflife = np.array([55.6, 24.5, 16.3, 2.37, 0.424, 0.195])
precursor_lambda = -np.log(0.5) / precursor_halflife
beta = mgxs_lib.get_mgxs(mesh, 'beta')
# Create a tally object with only the delayed group filter for the time constants
beta_filters = [f for f in beta.xs_tally.filters if type(f) is not openmc.DelayedGroupFilter]
lambda_tally = beta.xs_tally.summation(nuclides=beta.xs_tally.nuclides)
for f in beta_filters:
lambda_tally = lambda_tally.summation(filter_type=type(f), remove_filter=True) * 0. + 1.
# Set the mean of the lambda tally and reshape to account for nuclides and scores
lambda_tally._mean = precursor_lambda
lambda_tally._mean.shape = lambda_tally.std_dev.shape
# Set a total nuclide and lambda score
lambda_tally.nuclides = [openmc.Nuclide(name='total')]
lambda_tally.scores = ['lambda']
delayed_nu_fission = mgxs_lib.get_mgxs(mesh, 'delayed-nu-fission')
# Use tally arithmetic to compute the precursor concentrations
precursor_conc = beta.xs_tally.summation(filter_type=openmc.EnergyFilter, remove_filter=True) * \
delayed_nu_fission.xs_tally.summation(filter_type=openmc.EnergyFilter, remove_filter=True) / lambda_tally
# The difference is a derived tally which can generate Pandas DataFrames for inspection
precursor_conc.get_pandas_dataframe().head(10)
"""
Explanation: Using Tally Arithmetic to Compute the Delayed Neutron Precursor Concentrations
Finally, we illustrate how one can leverage OpenMC's tally arithmetic data processing feature with MGXS objects. The openmc.mgxs module uses tally arithmetic to compute multi-group cross sections with automated uncertainty propagation. Each MGXS object includes an xs_tally attribute which is a "derived" Tally based on the tallies needed to compute the cross section type of interest. These derived tallies can be used in subsequent tally arithmetic operations. For example, we can use tally artithmetic to compute the delayed neutron precursor concentrations using the Beta and DelayedNuFissionXS objects. The delayed neutron precursor concentrations are modeled using the following equations:
$$\frac{\partial}{\partial t} C_{k,d} (t) = \int_{0}^{\infty}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r} \beta_{k,d} (t) \nu_d \sigma_{f,x}(\mathbf{r},E',t)\Phi(\mathbf{r},E',t) - \lambda_{d} C_{k,d} (t) $$
$$C_{k,d} (t=0) = \frac{1}{\lambda_{d}} \int_{0}^{\infty}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r} \beta_{k,d} (t=0) \nu_d \sigma_{f,x}(\mathbf{r},E',t=0)\Phi(\mathbf{r},E',t=0) $$
End of explanation
"""
current_tally.get_pandas_dataframe().head(10)
"""
Explanation: Another useful feature of the Python API is the ability to extract the surface currents for the interfaces and surfaces of a mesh. We can inspect the currents for the mesh by getting the pandas dataframe.
End of explanation
"""
# Extract the energy-condensed delayed neutron fraction tally
beta_by_group = beta.get_condensed_xs(one_group).xs_tally.summation(filter_type='energy', remove_filter=True)
beta_by_group.mean.shape = (17, 17, 6)
beta_by_group.mean[beta_by_group.mean == 0] = np.nan
# Plot the betas
plt.figure(figsize=(18,9))
fig = plt.subplot(231)
plt.imshow(beta_by_group.mean[:,:,0], interpolation='none', cmap='jet')
plt.colorbar()
plt.title('Beta - delayed group 1')
fig = plt.subplot(232)
plt.imshow(beta_by_group.mean[:,:,1], interpolation='none', cmap='jet')
plt.colorbar()
plt.title('Beta - delayed group 2')
fig = plt.subplot(233)
plt.imshow(beta_by_group.mean[:,:,2], interpolation='none', cmap='jet')
plt.colorbar()
plt.title('Beta - delayed group 3')
fig = plt.subplot(234)
plt.imshow(beta_by_group.mean[:,:,3], interpolation='none', cmap='jet')
plt.colorbar()
plt.title('Beta - delayed group 4')
fig = plt.subplot(235)
plt.imshow(beta_by_group.mean[:,:,4], interpolation='none', cmap='jet')
plt.colorbar()
plt.title('Beta - delayed group 5')
fig = plt.subplot(236)
plt.imshow(beta_by_group.mean[:,:,5], interpolation='none', cmap='jet')
plt.colorbar()
plt.title('Beta - delayed group 6')
"""
Explanation: Cross Section Visualizations
In addition to inspecting the data in the tallies by getting the pandas dataframe, we can also plot the tally data on the domain mesh. Below is the delayed neutron fraction tallied in each mesh cell for each delayed group.
End of explanation
"""
|
ronnydw/data-science-projects | class-central-survey-2016-17/Class Central Survey - Latin America vs Rest of the World.ipynb | mit | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
sns.set_context("talk")
"""
Explanation: Class Central Survey: compare target group 'Latin America' with the rest of the sample
End of explanation
"""
df = pd.read_csv('raw/2016-17-ClassCentral-Survey-data-noUserText.csv', decimal=',', encoding = "ISO-8859-1")
"""
Explanation: Read the survey data
End of explanation
"""
df['Which region of the world are you in?'].value_counts()
target_name = 'Latin America'
mask_latin_america = (df['Which region of the world are you in?'] == 'Central or South America') | \
(df['Which region of the world are you in?'] =='Mexico')
"""
Explanation: Create target group 'Latin America'
End of explanation
"""
def binary_compare_categorical_barh(mask, feature, df=df,
target_name='target', nontarget_name='Other',
split_name='visitor', answer='answer'):
"""Split dataframe into two based on mask
Draw horizontal barcharts for each category item for both masked and unmasked object"""
target = df[mask]
nontarget = df[~mask]
target_size, nontarget_size = len(target), len(nontarget)
res_target = target[feature].value_counts()/target_size*100
res_nontarget = nontarget[feature].value_counts()/nontarget_size*100
result = pd.DataFrame({target_name: res_target, nontarget_name: res_nontarget})
result[answer] = result.index
res_df = pd.melt(result, id_vars=answer, var_name=split_name, value_name='percentage')
print(res_df)
sns.factorplot(x='percentage', y=answer, hue=split_name, data=res_df, kind='bar', orient='h', size=6, aspect=2)
plt.title(feature)
sns.despine(left=True, bottom=True)
plt.show()
return
"""
Explanation: Generic function to plot barchart for any categorical feature on any target/nontarget split
End of explanation
"""
def binary_compare_multi_select_categorical_barh(df, target, target_name, question, selectors, nontarget_name = 'Others'):
"""draw a barchart for Survey results on a question that allows to select multiple categories
df: dataframe to use
target: selection of rows based on column values
question: the question you want to analyse
selectors: list of df column containing the selectors (values 0/1)"""
size = {}
target_df = df[target]
nontarget_df = df[~target]
size[target_name], size[nontarget_name] = len(target_df), len(nontarget_df)
print(size)
graph_targetdata = target_df.loc[:, selectors]
graph_targetdata['target'] = target_name
graph_nontargetdata = nontarget_df.loc[:, selectors]
graph_nontargetdata['target'] = nontarget_name
graph_data = pd.concat([graph_targetdata, graph_nontargetdata])
melted = pd.melt(graph_data, id_vars='target', var_name='select', value_name='percentage')
grouped = melted.groupby(['target', 'select'], as_index=False).sum()
#print(size[grouped['target']])
grouped.percentage = grouped.percentage/grouped['target'].map(size)*100 # make it percentage of total
grouped['select'] = grouped['select'].apply(lambda x: x.split(": ")[1]) # remove prefix from string
print(grouped)
sns.factorplot(x='percentage', y='select', hue='target', data=grouped, kind='bar', orient='h', size=6, aspect=2)
sns.plt.title(question)
sns.despine(left=True, bottom=True)
sns.plt.show()
"""
Explanation: Generic function to plot barchart for any multi-categorical feature on any target/nontarget split
End of explanation
"""
binary_compare_categorical_barh(mask=mask_latin_america,
target_name='Latin America', feature='How familiar are you with MOOCs?')
"""
Explanation: Apply this plot on the target 'Latin America' for some categorical features
End of explanation
"""
binary_compare_categorical_barh(mask=mask_latin_america,
target_name='Latin America', feature='How important is the ability to earn a certificate when you complete a MOOC?')
"""
Explanation: Latin american visitors are more familiar with MOOCs than other visitors of Class Central
End of explanation
"""
reasons = ['Reasons: Learning skills for current career', 'Reasons: Learning skills for new career',
'Reasons: School credit', 'Reasons: Personal interest', 'Reasons: Access to reference materials']
binary_compare_multi_select_categorical_barh(df, target=mask_latin_america, target_name='Latin America',
question='Which of the following are important reasons for you to take MOOCs?',
selectors=reasons)
"""
Explanation: Latin American visitors find it more important than other visitors of Class Central
End of explanation
"""
decisions = ['Decide: Topic/Subject', 'Decide: Instructor', 'Decide: Institution/university',
'Decide: Platform', 'Decide: Ratings', 'Decide: Others recommendations']
binary_compare_multi_select_categorical_barh(df, target=mask_latin_america, target_name='Latin America',
question='Which are the most important factors in deciding which MOOC to take?',
selectors=decisions)
"""
Explanation: Latin Americans indicate more reasons than others to follow MOOCs, personal interest and learning skills for current career being the most important
End of explanation
"""
aspects = ['Aspects: Browsing discussion forums',
'Aspects: Actively contributing to discussion forums',
'Aspects: Connecting with other learners in the course environment',
'Aspects: Connecting with learners outside the course environment',
'Aspects: Taking the course with other people you know (friends, colleagues, etc.)']
binary_compare_multi_select_categorical_barh(df, target=mask_latin_america, target_name='Latin America',
question='Which of the following are important aspects of the MOOC experience to you?',
selectors=aspects)
"""
Explanation: The topic/subject and the Institution are the most appealing reasons to follow a MOOC
End of explanation
"""
benefits = ['Benefit: Have not taken MOOCs',
'Benefit: Not Really',
'Benefit: School credit towards a degree',
'Benefit: Promotion at current organization',
'Benefit: Higher performance evaluation at current job',
'Benefit: Helped me get a new job in the same field',
'Benefit: Helped me get a new job in a different field']
binary_compare_multi_select_categorical_barh(df, target=mask_latin_america, target_name='Latin America',
question='Have you received any tangible benefits from taking MOOCs?',
selectors=benefits)
"""
Explanation: Connecting with other students is more important for Latin American students than for others
End of explanation
"""
pays = ['Pay: The topic/subject',
'Pay: The institution/university offering the MOOC',
'Pay: The instructor/professor',
'Pay: The MOOC platform being used',
'Pay: A multi-course certification that the MOOC is a part of']
binary_compare_multi_select_categorical_barh(df, target=mask_latin_america, target_name='Latin America',
question='Which of the following have a strong impact on your willingness to pay for a MOOC certificate?',
selectors=pays)
"""
Explanation: Higher performance evaluation at current job is perceived as benefit by 17% of the Latin American visitors
End of explanation
"""
binary_compare_categorical_barh(mask=mask_latin_america,
target_name='Latin America', feature='# MOOCs Started')
"""
Explanation: The institution/university offering the MOOC is a very important argument for Latin Americans to take MOOCs
End of explanation
"""
binary_compare_categorical_barh(mask=mask_latin_america,
target_name='Latin America', feature='# MOOCs Finished')
"""
Explanation: 38% of the Latin American visitors have started 4 or more MOOCs
End of explanation
"""
binary_compare_categorical_barh(mask=mask_latin_america,
target_name='Latin America', feature='When did you first start taking MOOCs?')
"""
Explanation: Latin American Class Central visitors are more experienced compared to the rest of the world, they have in general started and finished more MOOCs
End of explanation
"""
binary_compare_categorical_barh(mask=mask_latin_america,
target_name='Latin America', feature='How willing are you to pay for a certificate for a MOOC?')
"""
Explanation: They also started earlier with MOOCs
End of explanation
"""
binary_compare_categorical_barh(mask=mask_latin_america,
target_name='Latin America',
feature='How much do you think employers value MOOC certificates?')
binary_compare_categorical_barh(mask=mask_latin_america,
target_name='Latin America',
feature='What is your level of formal education?')
"""
Explanation: Latin American Class Central visitor show a higher willingness to pay for a MOOC certificate
End of explanation
"""
binary_compare_categorical_barh(mask=mask_latin_america,
target_name='Latin America',
feature='What is your age range?')
"""
Explanation: More than half of LAtin American visitors have a graduate degree
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/mri/cmip6/models/mri-agcm3-2/aerosol.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mri', 'mri-agcm3-2', 'aerosol')
"""
Explanation: ES-DOC CMIP6 Model Properties - Aerosol
MIP Era: CMIP6
Institute: MRI
Source ID: MRI-AGCM3-2
Topic: Aerosol
Sub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model.
Properties: 69 (37 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:18
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Meteorological Forcings
5. Key Properties --> Resolution
6. Key Properties --> Tuning Applied
7. Transport
8. Emissions
9. Concentrations
10. Optical Radiative Properties
11. Optical Radiative Properties --> Absorption
12. Optical Radiative Properties --> Mixtures
13. Optical Radiative Properties --> Impact Of H2o
14. Optical Radiative Properties --> Radiative Scheme
15. Optical Radiative Properties --> Cloud Interactions
16. Model
1. Key Properties
Key properties of the aerosol model
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of aerosol model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of aerosol model code
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Prognostic variables in the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of tracers in the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are aerosol calculations generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Physical properties of seawater in ocean
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the time evolution of the prognostic variables
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for aerosol advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for aerosol physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the aerosol model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.5. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Meteorological Forcings
**
4.1. Variables 3D
Is Required: FALSE Type: STRING Cardinality: 0.1
Three dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Variables 2D
Is Required: FALSE Type: STRING Cardinality: 0.1
Two dimensionsal forcing variables, e.g. land-sea mask definition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Frequency
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Frequency with which meteological forcings are applied (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Resolution
Resolution in the aersosol model grid
5.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 5.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 5.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Tuning Applied
Tuning methodology for aerosol model
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Transport
Aerosol transport
7.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of transport in atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
"""
Explanation: 7.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for aerosol transport modeling
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7.3. Mass Conservation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method used to ensure mass conservation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7.4. Convention
Is Required: TRUE Type: ENUM Cardinality: 1.N
Transport by convention
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Emissions
Atmospheric aerosol emissions
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of emissions in atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method used to define aerosol species (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the aerosol species are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
"""
Explanation: 8.4. Prescribed Climatology
Is Required: FALSE Type: ENUM Cardinality: 0.1
Specify the climatology type for aerosol emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and prescribed via a climatology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.6. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.7. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.8. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.9. Other Method Characteristics
Is Required: FALSE Type: STRING Cardinality: 0.1
Characteristics of the "other method" used for aerosol emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Concentrations
Atmospheric aerosol concentrations
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of concentrations in atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.4. Prescribed Fields Mmr
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed as mass mixing ratios.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.5. Prescribed Fields Mmr
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed as AOD plus CCNs.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10. Optical Radiative Properties
Aerosol optical and radiative properties
10.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of optical and radiative properties
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11. Optical Radiative Properties --> Absorption
Absortion properties in aerosol scheme
11.1. Black Carbon
Is Required: FALSE Type: FLOAT Cardinality: 0.1
Absorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Dust
Is Required: FALSE Type: FLOAT Cardinality: 0.1
Absorption mass coefficient of dust at 550nm (if non-absorbing enter 0)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.3. Organics
Is Required: FALSE Type: FLOAT Cardinality: 0.1
Absorption mass coefficient of organics at 550nm (if non-absorbing enter 0)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Optical Radiative Properties --> Mixtures
**
12.1. External
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there external mixing with respect to chemical composition?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12.2. Internal
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there internal mixing with respect to chemical composition?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Mixing Rule
Is Required: FALSE Type: STRING Cardinality: 0.1
If there is internal mixing with respect to chemical composition then indicate the mixinrg rule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13. Optical Radiative Properties --> Impact Of H2o
**
13.1. Size
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does H2O impact size?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.2. Internal Mixture
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does H2O impact internal mixture?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Optical Radiative Properties --> Radiative Scheme
Radiative scheme for aerosol
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of radiative scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.2. Shortwave Bands
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of shortwave bands
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.3. Longwave Bands
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of longwave bands
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Optical Radiative Properties --> Cloud Interactions
Aerosol-cloud interactions
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of aerosol-cloud interactions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.2. Twomey
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the Twomey effect included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.3. Twomey Minimum Ccn
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If the Twomey effect is included, then what is the minimum CCN number?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.4. Drizzle
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the scheme affect drizzle?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Cloud Lifetime
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the scheme affect cloud lifetime?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.6. Longwave Bands
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of longwave bands
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Model
Aerosol model
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
"""
Explanation: 16.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the Aerosol model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.3. Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other model components coupled to the Aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.4. Gas Phase Precursors
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of gas phase aerosol precursors.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.5. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.6. Bulk Scheme Species
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of species covered by the bulk scheme.
End of explanation
"""
|
NlGG/Projects | 不動産/research02.ipynb | mit | print(data['CITY_NAME'].value_counts())
"""
Explanation: 変数名とデータの内容メモ
CENSUS: 市区町村コード(9桁)
P: 成約価格
S: 専有面積
L: 土地面積
R: 部屋数
RW: 前面道路幅員
CY: 建築年
A: 建築後年数(成約時)
TS: 最寄駅までの距離
TT: 東京駅までの時間
ACC: ターミナル駅までの時間
WOOD: 木造ダミー
SOUTH: 南向きダミー
RSD: 住居系地域ダミー
CMD: 商業系地域ダミー
IDD: 工業系地域ダミー
FAR: 建ぺい率
FLR: 容積率
TDQ: 成約時点(四半期)
X: 緯度
Y: 経度
CITY_CODE: 市区町村コード(5桁)
CITY_NAME: 市区町村名
BLOCK: 地域ブロック名
市区町村別の件数を集計
End of explanation
"""
print(data.pivot_table(index=['TDQ'], columns=['CITY_NAME']))
"""
Explanation: 成約時点別×市区町村別の件数を集計
End of explanation
"""
print(data.pivot_table(index=['TDQ'], columns=['BLOCK']))
"""
Explanation: 成約時点別×地域ブロック別の件数を集計
End of explanation
"""
data['P'].hist()
"""
Explanation: Histogram
価格(真数)
End of explanation
"""
(np.log(data['P'])).hist()
"""
Explanation: 価格(自然対数)
End of explanation
"""
data['A'].hist()
plt.figure(figsize=(20,8))
plt.subplot(4, 2, 1)
data['P'].hist()
plt.title(u"成約価格")
plt.subplot(4, 2, 2)
data['S'].hist()
plt.title("専有面積")
plt.subplot(4, 2, 3)
data['L'].hist()
plt.title("土地面積")
plt.subplot(4, 2, 4)
data['R'].hist()
plt.title("部屋数")
plt.subplot(4, 2, 5)
data['A'].hist()
plt.title("建築後年数")
plt.subplot(4, 2, 6)
data['RW'].hist()
plt.title("前面道路幅員")
plt.subplot(4, 2, 7)
data['TS'].hist()
plt.title("最寄駅までの距離")
plt.subplot(4, 2, 8)
data['TT'].hist()
plt.title(u"東京駅までの時間")
"""
Explanation: 建築後年数
End of explanation
"""
plt.figure(figsize=(20,8))
data['TDQ'].value_counts().plot(kind='bar')
plt.figure(figsize=(20,8))
data['CITY_NAME'].value_counts().plot(kind='bar') #市区町村別の件数
"""
Explanation: Plot
件数の推移
End of explanation
"""
vars = ['P', 'S', 'L', 'R', 'RW', 'A', 'TS', 'TT', 'WOOD', 'SOUTH', 'CMD', 'IDD', 'FAR', 'X', 'Y']
eq = fml_build(vars)
y, X = dmatrices(eq, data=data, return_type='dataframe')
CITY_NAME = pd.get_dummies(data['CITY_NAME'])
TDQ = pd.get_dummies(data['TDQ'])
X = pd.concat((X, CITY_NAME, TDQ), axis=1)
datas = pd.concat((y, X), axis=1)
datas = datas[datas['12世田谷区'] == 1][0:5000]
datas.head()
vars = ['S', 'L', 'R', 'RW', 'A', 'TS', 'TT', 'WOOD', 'SOUTH', 'CMD', 'IDD', 'FAR']
#vars += vars + list(TDQ.columns)
class CAR(Chain):
def __init__(self, unit1, unit2, unit3, col_num):
self.unit1 = unit1
self.unit2 = unit2
self.unit3 = unit3
super(CAR, self).__init__(
l1 = L.Linear(col_num, unit1),
l2 = L.Linear(self.unit1, self.unit1),
l3 = L.Linear(self.unit1, self.unit2),
l4 = L.Linear(self.unit2, self.unit3),
l5 = L.Linear(self.unit3, self.unit3),
l6 = L.Linear(self.unit3, 1),
)
def __call__(self, x, y):
fv = self.fwd(x, y)
loss = F.mean_squared_error(fv, y)
return loss
def fwd(self, x, y):
h1 = F.sigmoid(self.l1(x))
h2 = F.sigmoid(self.l2(h1))
h3 = F.sigmoid(self.l3(h2))
h4 = F.sigmoid(self.l4(h3))
h5 = F.sigmoid(self.l5(h4))
h6 = self.l6(h5)
return h6
class OLS_DLmodel(object):
def __init__(self, data, vars, bs=200, n=1000):
self.vars = vars
eq = fml_build(vars)
y, X = dmatrices(eq, data=datas, return_type='dataframe')
self.y_in = y[:-n]
self.X_in = X[:-n]
self.y_ex = y[-n:]
self.X_ex = X[-n:]
self.logy_in = np.log(self.y_in)
self.logy_ex = np.log(self.y_ex)
self.bs = bs
def OLS(self):
X_in = self.X_in
X_in = X_in.drop(['X', 'Y'], axis=1)
model = sm.OLS(self.logy_in, X_in, intercept=False)
self.reg = model.fit()
print(self.reg.summary())
df = (pd.DataFrame(self.reg.params)).T
df['X'] = 0
df['Y'] = 0
self.reg.params = pd.Series((df.T)[0])
def directDL(self, ite=100, bs=200, add=False):
logy_in = np.array(self.logy_in, dtype='float32')
X_in = np.array(self.X_in, dtype='float32')
y = Variable(logy_in)
x = Variable(X_in)
num, col_num = X_in.shape
if add is False:
self.model1 = CAR(15, 15, 5, col_num)
optimizer = optimizers.SGD()
optimizer.setup(self.model1)
for j in range(ite):
sffindx = np.random.permutation(num)
for i in range(0, num, bs):
x = Variable(X_in[sffindx[i:(i+bs) if (i+bs) < num else num]])
y = Variable(logy_in[sffindx[i:(i+bs) if (i+bs) < num else num]])
self.model1.zerograds()
loss = self.model1(x, y)
loss.backward()
optimizer.update()
if j % 1000 == 0:
loss_val = loss.data
print('epoch:', j)
print('train mean loss={}'.format(loss_val))
print(' - - - - - - - - - ')
y_ex = np.array(self.y_ex, dtype='float32').reshape(len(self.y_ex))
X_ex = np.array(self.X_ex, dtype='float32')
X_ex = Variable(X_ex)
logy_pred = self.model1.fwd(X_ex, X_ex).data
y_pred = np.exp(logy_pred)
error = y_ex - y_pred.reshape(len(y_pred),)
plt.hist(error[:])
def DL(self, ite=100, bs=200, add=False):
y_in = np.array(self.y_in, dtype='float32').reshape(len(self.y_in))
resid = y_in - np.exp(self.reg.predict())
resid = np.array(resid, dtype='float32').reshape(len(resid),1)
X_in = np.array(self.X_in, dtype='float32')
y = Variable(resid)
x = Variable(X_in)
num, col_num = X_in.shape
if add is False:
self.model1 = CAR(10, 10, 3, col_num)
optimizer = optimizers.Adam()
optimizer.setup(self.model1)
for j in range(ite):
sffindx = np.random.permutation(num)
for i in range(0, num, bs):
x = Variable(X_in[sffindx[i:(i+bs) if (i+bs) < num else num]])
y = Variable(resid[sffindx[i:(i+bs) if (i+bs) < num else num]])
self.model1.zerograds()
loss = self.model1(x, y)
loss.backward()
optimizer.update()
if j % 1000 == 0:
loss_val = loss.data
print('epoch:', j)
print('train mean loss={}'.format(loss_val))
print(' - - - - - - - - - ')
def predict(self):
y_ex = np.array(self.y_ex, dtype='float32').reshape(len(self.y_ex))
X_ex = np.array(self.X_ex, dtype='float32')
X_ex = Variable(X_ex)
resid_pred = self.model1.fwd(X_ex, X_ex).data
print(resid_pred[:10])
self.logy_pred = np.matrix(self.X_ex)*np.matrix(self.reg.params).T
self.error1 = np.array(y_ex - np.exp(self.logy_pred.reshape(len(self.logy_pred),)))[0]
self.pred = np.exp(self.logy_pred) + resid_pred
self.error2 = np.array(y_ex - self.pred.reshape(len(self.pred),))[0]
def compare(self):
plt.hist(self.error1)
plt.hist(self.error2)
vars = ['P', 'S', 'L', 'R', 'RW', 'A', 'TS', 'TT', 'WOOD', 'SOUTH', 'CMD', 'IDD', 'FAR', 'X', 'Y']
#vars += vars + list(TDQ.columns)
model = OLS_DLmodel(datas, vars)
model.OLS()
model.DL(ite=10, bs=200)
model.predict()
model.DL(ite=20000, bs=200, add=True)
model.DL(ite=10000, bs=200, add=True)
model.predict()
"""
Explanation: Main Analysis
OLS part
End of explanation
"""
model.compare()
print(np.mean(model.error1))
print(np.mean(model.error2))
print(np.mean(np.abs(model.error1)))
print(np.mean(np.abs(model.error2)))
print(max(np.abs(model.error1)))
print(max(np.abs(model.error2)))
print(np.var(model.error1))
print(np.var(model.error2))
fig = plt.figure()
ax = fig.add_subplot(111)
errors = [model.error1, model.error2]
bp = ax.boxplot(errors)
plt.grid()
plt.ylim([-5000,5000])
plt.title('分布の箱ひげ図')
plt.show()
X = model.X_ex['X'].values
Y = model.X_ex['Y'].values
e = model.error2
import numpy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
fig=plt.figure()
ax=Axes3D(fig)
ax.scatter3D(X, Y, e)
plt.show()
t
plt.hist(Xs)
import numpy as np
from scipy.stats import gaussian_kde
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
Xs = np.linspace(min(X),max(X),10)
Ys = np.linspace(min(Y),max(Y),10)
error = model.error1
Xgrid, Ygrid = np.meshgrid(Xs, Ys)
Z = LL(X, Y, Xs, Ys, error)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_wireframe(Xgrid,Ygrid,Z) #<---ここでplot
plt.show()
fig = plt.figure()
ax = Axes3D(fig)
ax.set_zlim(-100, 500)
ax.plot_surface(Xgrid,Ygrid,Z) #<---ここでplot
plt.show()
h = 10
(0.9375*(1-((X-1)/h)**2)**2)*(0.9375*(1-((Y-2)/h)**2)**2)
def LL(X, Y, Xs, Ys, error):
n = len(X)
h = 0.1
error = model.error2
mean_of_error = np.zeros((len(Xs), len(Ys)))
for i in range(len(Xs)):
for j in range(len(Ys)):
u1 = ((X-Xs[i])/h)**2
u2 = ((Y-Ys[j])/h)**2
k = (0.9375*(1-((X-Xs[i])/h)**2)**2)*(0.9375*(1-((Y-Ys[j])/h)**2)**2)
K = np.diag(k)
indep = np.matrix(np.array([np.ones(n), X - Xs[i], Y-Ys[j]]).T)
dep = np.matrix(np.array([error]).T)
gls_model = sm.GLS(dep, indep, sigma=K)
gls_results = gls_model.fit()
mean_of_error[i, j] = gls_results.params[0]
return mean_of_error
h = 200
u1 = ((X-30)/h)**2
u1
u1[u1 < 0] = 0
for x in range(lXs[:2]):
print(x)
mean_of_error
plt.plot(gaussian_kde(Y, 0.1)(Ys))
N = 5
means = np.random.randn(N,2) * 10 + np.array([100, 200])
stdev = np.random.randn(N,2) * 10 + 30
count = np.int64(np.int64(np.random.randn(N,2) * 10000 + 50000))
a = [
np.hstack([
np.random.randn(count[i,j]) * stdev[i,j] + means[i,j]
for j in range(2)])
for i in range(N)]
for x in Xs:
for y in Ys:
def loclinearc(points,x,y,h):
n = len(points[,1])
const = matrix(1, nrow=length(x), ncol=1)
bhat = matrix(0, nrow=3, ncol=n)
b1 = matrix(0, n, n)
predict = matrix(0, n, 1)
for (j in 1:n) {
for (i in 1:n) {
a <- -.5*sign( abs( (points[i, 1]*const - x[,1])/h ) -1 ) + .5
#get the right data points, (K(x) ~=0)
b <- -.5*sign( abs( (points[j, 2]*const - x[,2])/h ) -1 ) + .5
x1andy <- nonzmat(cbind((x[,1]*a*b), (y*a*b)))
x2andy <- nonzmat(cbind((x[,2]*a*b), (y*a*b)))
ztheta1 <- x1andy[,1]
ztheta2 <- x2andy[,1]
yuse <- x1andy[,2]
q1 <- (ztheta1 - points[i,1]);
q2 <- (ztheta2 - points[j,2]);
nt1 <- ( (ztheta1- points[i,1])/h )
nt2 <- ( (ztheta2- points[j,2])/h )
#q2 = ((ztheta - points(i,1)).^2)/2;
weights <- diag(c((15/16)%*%( 1-(nt1^2))^2*((15/16)%*%( 1-(nt2^2))^2)))
#Biweight Kernel
tempp3 <- cbind(matrix(1, nrow=length(ztheta1), ncol=1), q1, q2)
bhat[,i] <- solve(t(tempp3)%*%weights%*%tempp3)%*%t(tempp3)%*%weights%*%yuse
}
b1[,j] <- t(bhat[1,])
}
return(b1)
}
nonzmat(x):
#This function computes nonzeros of a MATRIX when certain ROWS of the
#matrix are zero. This function returns a matrix with the
#zero rows deleted
m, k = x.shape
xtemp = matrix(np.zeros(m, k))
for (i in 1:m) {
xtemp[i,] <- ifelse(x[i,] == matrix(0, nrow=1, ncol=k), 99999*matrix(1, nrow=1, ncol=k), x[i,])
}
xtemp <- xtemp - 99999
if (length(which(xtemp !=0,arr.ind = T)) == 0) {
a <- matrix(-99999, nrow=1, ncol=k)
} else {
a <- xtemp[which(xtemp !=0,arr.ind = T)]
}
a <- a + 99999
n1 <- length(a)
rowlen <- n1/k
collen <- k
out = matrix(a, nrow=rowlen, ncol=collen)
return(out)
}
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.tri as mtri
#============
# First plot
#============
# Plot the surface. The triangles in parameter space determine which x, y, z
# points are connected by an edge.
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.plot_trisurf(X, Y, e)
ax.set_zlim(-1, 1)
plt.show()
"""
Explanation: 青がOLSの誤差、緑がOLSと深層学習を組み合わせた誤差。
End of explanation
"""
|
Almaz-KG/MachineLearning | ml-for-finance/python-for-financial-analysis-and-algorithmic-trading/02-NumPy/Numpy Exercise - Solutions.ipynb | apache-2.0 | import numpy as np
"""
Explanation: <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
<center>Copyright Pierian Data 2017</center>
<center>For more information, visit us at www.pieriandata.com</center>
NumPy Exercises - Solutions
Now that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks and then you'll be asked some more complicated questions.
IMPORTANT NOTE! Make sure you don't run the cells directly above the example output shown, otherwise you will end up writing over the example output!
Import NumPy as np
End of explanation
"""
# CODE HERE
np.zeros(10)
"""
Explanation: Create an array of 10 zeros
End of explanation
"""
# CODE HERE
np.ones(10)
"""
Explanation: Create an array of 10 ones
End of explanation
"""
# CODE HERE
np.ones(10) * 5
"""
Explanation: Create an array of 10 fives
End of explanation
"""
# CODE HERE
np.arange(10,51)
"""
Explanation: Create an array of the integers from 10 to 50
End of explanation
"""
# CODE HERE
np.arange(10,51,2)
"""
Explanation: Create an array of all the even integers from 10 to 50
End of explanation
"""
# CODE HERE
np.arange(9).reshape(3,3)
"""
Explanation: Create a 3x3 matrix with values ranging from 0 to 8
End of explanation
"""
# CODE HERE
np.eye(3)
"""
Explanation: Create a 3x3 identity matrix
End of explanation
"""
# CODE HERE
np.random.rand(1)
"""
Explanation: Use NumPy to generate a random number between 0 and 1
End of explanation
"""
# CODE HERE
np.random.randn(25)
"""
Explanation: Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution
End of explanation
"""
np.arange(1,101).reshape(10,10) / 100
"""
Explanation: Create the following matrix:
End of explanation
"""
np.linspace(0,1,20)
"""
Explanation: Create an array of 20 linearly spaced points between 0 and 1:
End of explanation
"""
# CODE HERE
mat = np.arange(1,26).reshape(5,5)
mat
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[2:,1:]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[3,4]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[:3,1:2]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[4,:]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[3:5,:]
"""
Explanation: Numpy Indexing and Selection
Now you will be given a few matrices, and be asked to replicate the resulting matrix outputs:
End of explanation
"""
# CODE HERE
mat.sum()
"""
Explanation: Now do the following
Get the sum of all the values in mat
End of explanation
"""
# CODE HERE
mat.std()
"""
Explanation: Get the standard deviation of the values in mat
End of explanation
"""
# CODE HERE
mat.sum(axis=0)
"""
Explanation: Get the sum of all the columns in mat
End of explanation
"""
np.random.seed(101)
"""
Explanation: Bonus Question
We worked a lot with random data with numpy, but is there a way we can insure that we always get the same random numbers? Click Here for a Hint
End of explanation
"""
|
YAtOff/python0-reloaded | week2/Expressions, variables and errors.ipynb | mit | 2 * 3 + 2
2 * (3 + 2)
"""
Explanation: Изрази
Изразите в Python са като изразите в математиката.
Всеки изразе е изграден от сотйности (като напр. числата 1, 2, 3, ...) и оператори (+, -, ...).
Типове
Всяка стойност се характеризира с определн тип.
А типът е:
- Множеството от стойности
- Множество от операции, които могат да се извършват с тези стойности
Целочислени числа (тип int)
сотйности | операции
--- | ---
..., -3, -2, -1, 0, 1, 2, 3, ...| +, -, , /, //, %, *
Реални числа (Числа с плаваща запетая, float)
сотйности | операции
--- | ---
-0.1, -0.11, ..., 0.0, ..., 0.1, ... | +, -, , /, //, %, *
### Числови низове (тип str)
сотйности | операции
--- | ---
"hello", "goodbye", ... | +
## Приоритет на операциите
1. *
2. -
3. , /, //, %
4. +, -
End of explanation
"""
c = 10 # number of coins - прекалени късо
number_of_coins = 10 # прекалино детайлно име
coinsCount = 10 # ОК, но за Java
coins_count = 10 # OK
# Задаването на стойност на променлива се нарича `присвояване`
count = 1
# Когато Python срещне променлива в израз, той я заменя със стойността и
print(count + 1)
# Променливите се наричат променливи, защото стойността им може да се променя
count = 2
print(count + 1)
"""
Explanation: Променливи
Променливата е име,с което се асоциира дадена стойност.
Валидни имена на променливи
Името на променлива може да съдържа главни и малки букви, цифри и символът _.
Името на променлива трябва да започва с буква или _.
За имена на променливи не може да се използват служебни думи от Python.
Препоръки за именуване на променливи
Имената трябва да са описателни и да обясняват за какво служи
дадената променлива. Например за име на човек подходящо име е
person_name, а неподходящо име е x.
Трябва да се използват само латински букви.
В Python e прието променливите да започват винаги с малка буква и да
съдържат само малки букви, като всяка следваща дума в тях е разделе от
предходната със символа _.
Името на променливите трябва да не е нито много дълго, нито много
късо – просто трябва да е ясно за какво служи променливата в
контекста, в който се използва.
Трябва да се внимава за главни и малки букви, тъй като Python прави
разлика между тях. Например age и Age са различни променливи.
Работа с променливи
End of explanation
"""
count = 1
count = count + 1
print(count)
"""
Explanation: Какво трябва да напишем, за да увеличим стойността на count с 1 (приемете, че не знаем каква е стойността на count)?
End of explanation
"""
my var = 1
price = 1
print(pirce)
"""
Explanation: Грешки
End of explanation
"""
|
qaisermazhar/qaisermazhar.github.io | markdown_generator/publications.ipynb | mit | !cat publications.tsv
"""
Explanation: Publications markdown generator for academicpages
Takes a TSV of publications with metadata and converts them for use with academicpages.github.io. This is an interactive Jupyter notebook (see more info here). The core python code is also in publications.py. Run either from the markdown_generator folder after replacing publications.tsv with one containing your data.
TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
Data format
The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
excerpt and paper_url can be blank, but the others must have values.
pub_date must be formatted as YYYY-MM-DD.
url_slug will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be YYYY-MM-DD-[url_slug].md and the permalink will be https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]
This is how the raw file looks (it doesn't look pretty, use a spreadsheet or other program to edit and create).
End of explanation
"""
import pandas as pd
"""
Explanation: Import pandas
We are using the very handy pandas library for dataframes.
End of explanation
"""
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
"""
Explanation: Import TSV
Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or \t.
I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
End of explanation
"""
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
"""
Explanation: Escape special characters
YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
End of explanation
"""
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
if len(str(item.paper_url)) > 5:
md += "\n[Download paper here](" + item.paper_url + ")\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
"""
Explanation: Creating the markdown files
This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (md) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
End of explanation
"""
!ls ../_publications/
!cat ../_publications/2009-10-01-paper-title-number-1.md
"""
Explanation: These files are in the publications directory, one directory below where we're working from.
End of explanation
"""
|
NagyAttila/Udacity_DLND_Assigments | 3_tv-script-generation/dlnd_tv_script_generation.ipynb | gpl-3.0 | """
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
"""
Explanation: TV Script Generation
In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.
Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
End of explanation
"""
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
vocab = set(text)
vocab_to_int = {word : index for index, word in enumerate(vocab)}
int_to_vocab = {index : word for index, word in enumerate(vocab)}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
"""
Explanation: Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call vocab_to_int
- Dictionary to go from the id to word, we'll call int_to_vocab
Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab)
End of explanation
"""
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
return dict([
(".","period"),
(",","comma"),
("\"","quotation_mark"),
(";","semicolon"),
("!","exclamation_mark"),
("?","question_mark"),
("(","left_parentheses"),
(")","right_parentheses"),
("--","dash"),
("\n","return")])
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
"""
Explanation: Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( . )
- Comma ( , )
- Quotation Mark ( " )
- Semicolon ( ; )
- Exclamation mark ( ! )
- Question mark ( ? )
- Left Parentheses ( ( )
- Right Parentheses ( ) )
- Dash ( -- )
- Return ( \n )
This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below:
- get_inputs
- get_init_cell
- get_embed
- build_rnn
- build_nn
- get_batches
Check the Version of TensorFlow and Access to GPU
End of explanation
"""
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
inputs = tf.placeholder(shape=(None, None), dtype=tf.int32, name='input')
targets = tf.placeholder(shape=(None, None), dtype=tf.int32)
learning_rate = tf.placeholder(dtype=tf.float32)
return inputs, targets, learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
"""
Explanation: Input
Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the TF Placeholder name parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following the tuple (Input, Targets, LearingRate)
End of explanation
"""
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([lstm])
initial_state = cell.zero_state(batch_size, tf.float32)
initial_state = tf.identity(initial_state, name='initial_state')
return cell, initial_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
"""
Explanation: Build RNN Cell and Initialize
Stack one or more BasicLSTMCells in a MultiRNNCell.
- The Rnn size should be set using rnn_size
- Initalize Cell State using the MultiRNNCell's zero_state() function
- Apply the name "initial_state" to the initial state using tf.identity()
Return the cell and initial state in the following tuple (Cell, InitialState)
End of explanation
"""
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1, dtype=tf.float32))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
"""
Explanation: Word Embedding
Apply embedding to input_data using TensorFlow. Return the embedded sequence.
End of explanation
"""
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
outputs, state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(state, name='final_state')
return outputs, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
"""
Explanation: Build RNN
You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.
- Build the RNN using the tf.nn.dynamic_rnn()
- Apply the name "final_state" to the final state using tf.identity()
Return the outputs and final_state state in the following tuple (Outputs, FinalState)
End of explanation
"""
def build_nn(cell, rnn_size, input_data, vocab_size):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
"""
embed_dim = 5000
embedded_input = get_embed(input_data, vocab_size, embed_dim)
outputs, final_state = build_rnn(cell, embedded_input)
logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None)
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.
- Build RNN using cell and your build_rnn(cell, inputs) function.
- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState)
End of explanation
"""
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
def parse_text(offset):
starts = range(offset, len(int_text), seq_length)
ends = range(offset+seq_length, len(int_text), seq_length)
return [ int_text[start:end] for start, end in zip(starts,ends) ]
n_batches = int(len(int_text)/(batch_size*seq_length))
batches = np.empty((n_batches, 2, batch_size, seq_length))
inputss = parse_text(0)
targetss = parse_text(1)
indices = [(inputs_i, batches_i) for inputs_i in range(batch_size) \
for batches_i in range(n_batches)]
for (inputs_i, batch_i), inputs, targets in zip(indices, inputss, targetss):
# Inputs
batches[batch_i, 0, inputs_i] = inputs
# Targets
batches[batch_i, 1, inputs_i] = targets
return batches
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
"""
Explanation: Batches
Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:
- The first element is a single batch of input with the shape [batch size, sequence length]
- The second element is a single batch of targets with the shape [batch size, sequence length]
If you can't fill the last batch with enough data, drop the last batch.
For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3) would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2 3], [ 7 8 9]],
# Batch of targets
[[ 2 3 4], [ 8 9 10]]
],
# Second Batch
[
# Batch of Input
[[ 4 5 6], [10 11 12]],
# Batch of targets
[[ 5 6 7], [11 12 13]]
]
]
```
End of explanation
"""
# For TensorBoard
from tensorflow.contrib import seq2seq
def train(num_epochs, rnn_size, batch_size, seq_length, learning_rate):
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
logFile = './logs/rs={}_bs={}_sl={}_lr={}'.format(
rnn_size, batch_size, seq_length, learning_rate)
writer = tf.summary.FileWriter(logFile)
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Save Loss For TensorBoard
cost_buf = tf.summary.scalar('cost', cost)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
batches = get_batches(int_text, batch_size, seq_length)
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
summary, train_loss, state, _ = sess.run(
[cost_buf, cost, final_state, train_op], feed)
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss), end='\r')
iteration = epoch_i * len(batches) + batch_i
writer.add_summary(summary, iteration)
print(logFile, ' train_loss = {:.3f}'.format(train_loss))
### Find the right params by TensorBoard
num_epochs = 100
for rnn_size in [128, 256, 512, 1024]:
for batch_size in [64, 128, 256, 512]:
for seq_length in [5, 10, 20]:
for learning_rate in [0.1, 0.01, 0.001, 0.0001]:
pass
#rain(num_epochs, rnn_size, batch_size, seq_length, learning_rate)
### Final Parameters
# Number of Epochs
num_epochs = 100
# Batch Size
batch_size = 512
# RNN Size
rnn_size = 1024
# Sequence Length
seq_length = 20
# Learning Rate
learning_rate = 0.001
# Show stats for every n number of batches
show_every_n_batches = 12
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set num_epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set seq_length to the length of sequence.
Set learning_rate to the learning rate.
Set show_every_n_batches to the number of batches the neural network should print progress.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
%%time
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
"""
Explanation: Save Parameters
Save seq_length and save_dir for generating a new TV script.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
get_tensor = loaded_graph.get_tensor_by_name
input_ = get_tensor('input:0')
initial_state = get_tensor('initial_state:0')
final_state = get_tensor('final_state:0')
probs = get_tensor('probs:0')
return input_, initial_state, final_state, probs
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
"""
Explanation: Implement Generate Functions
Get Tensors
Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
End of explanation
"""
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
def throw_the_dice(words):
# filter out some words based on their probability
return [word for prob, (_, word) in words if prob >= np.random.rand()]
k = 10
vocab = sorted(zip(probabilities, int_to_vocab.items()), reverse=True)
words = vocab[:k]
leftover_words = []
while not leftover_words:
# Keep trying until we have at least one word that was not filter out
leftover_words = throw_the_dice(words)
return leftover_words[0]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
"""
Explanation: Choose Word
Implement the pick_word() function to select the next word using probabilities.
End of explanation
"""
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
"""
Explanation: Generate TV Script
This will generate the TV script for you. Set gen_length to the length of TV script you want to generate.
End of explanation
"""
|
jovanbrakus/data-analysis-tools | week-2-assignment.ipynb | gpl-3.0 | import pandas
import numpy
import scipy.stats
import seaborn
import matplotlib.pyplot as plt
data = pandas.read_csv('nesarc_pds.csv', low_memory=False)
# MAJORDEPLIFE - Diagnosed major depressions in lifetime
# S2AQ5A - Drink beer in last 12 months
# S2AQ5B - How often drank a beer in last year
data['MAJORDEPLIFE'] = pandas.to_numeric(data['TAB12MDX'], errors='coerce')
data['S2AQ5A'] = pandas.to_numeric(data['S2AQ5A'], errors='coerce')
data['S2AQ5B'] = pandas.to_numeric(data['S2AQ5B'], errors='coerce')
data['AGE'] = pandas.to_numeric(data['AGE'], errors='coerce')
"""
Explanation: Data analysis tools
For Week 2 assignment I'm testing:
Null hypothesis - there is no difference in beer consumption between people with/without major depression diagnosis
Alternate hypothesis - there is difference in beer consumption between people with/without major depression diagnosis
End of explanation
"""
#subset data to young adults age 18 to 25 who have drinked beer in the past 12 months
sub1=data[(data['AGE']>=18) & (data['AGE']<=25) & (data['S2AQ5A']==1)]
sub2 = sub1.copy()
# recode missing values to python missing (NaN)
sub2['S2AQ5B']=sub2['S2AQ5B'].replace(99, numpy.nan)
# contingency table of observed counts
ct1=pandas.crosstab(sub2['MAJORDEPLIFE'], sub2['S2AQ5B'])
print (ct1)
"""
Explanation: Following block prepares data we will use and prints contingency table with number of people we have within each category:
End of explanation
"""
# column percentages
colsum=ct1.sum(axis=0)
colpct=ct1/colsum
print(colpct)
"""
Explanation: Next block prints the same contingency table but in percentages instead of absolute values:
End of explanation
"""
# chi-square test
cs1= scipy.stats.chi2_contingency(ct1)
print 'Chi-square value: ', cs1[0]
print 'p value: ', cs1[1]
print 'Expected counts:', cs1[3]
"""
Explanation: Chi-square test
End of explanation
"""
bp_adjusted = 0.05 / 45
print 'adjusted p value: ', bp_adjusted
"""
Explanation: Test value is significant, p value is much smaller than 0.05, so we can reject null hypothesis and accept alternate one.
Bonferroni adjusted p - post-hoc test
End of explanation
"""
for idx1 in range(1,11):
for idx2 in range (idx1+1, 11):
map_filter = {idx1:idx1, idx2:idx2}
sub2['COMP1v2']= sub2['S2AQ5B'].map(map_filter)
# contingency table of observed counts
ct2 = pandas.crosstab(sub2['MAJORDEPLIFE'], sub2['COMP1v2'])
# chi-square test
cs2 = scipy.stats.chi2_contingency(ct2)
print 'Category1:', idx1, 'Category2:',idx2, 'P-value:',cs2[1],'Rejected:', cs2[1]<0.05
"""
Explanation: Chi-square test p value is still smaller than bonferroni adjusted p value. This test indicated we were right to reject null hypothesis.
Individual category pairs post-hoc test
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.